Squashed 'third_party/rawrtc/usrsctp/' content from commit bd1a92db3

Change-Id: If227cd6edd3243ac26044056b7427ae5bca71ef8
git-subtree-dir: third_party/rawrtc/usrsctp
git-subtree-split: bd1a92db338ba1e57453637959a127032bb566ff
diff --git a/usrsctplib/netinet/sctp.h b/usrsctplib/netinet/sctp.h
new file mode 100755
index 0000000..276cade
--- /dev/null
+++ b/usrsctplib/netinet/sctp.h
@@ -0,0 +1,667 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp.h 297662 2016-04-07 09:10:34Z rrs $");
+#endif
+
+#ifndef _NETINET_SCTP_H_
+#define _NETINET_SCTP_H_
+
+#if (defined(__APPLE__) || defined(__Userspace_os_Linux) || defined(__Userspace_os_Darwin))
+#include <stdint.h>
+#endif
+
+#include <sys/types.h>
+
+
+#if !defined(__Userspace_os_Windows)
+#define SCTP_PACKED __attribute__((packed))
+#else
+#pragma pack (push, 1)
+#define SCTP_PACKED
+#endif
+
+/*
+ * SCTP protocol - RFC4960.
+ */
+struct sctphdr {
+	uint16_t src_port;	/* source port */
+	uint16_t dest_port;	/* destination port */
+	uint32_t v_tag;		/* verification tag of packet */
+	uint32_t checksum;	/* CRC32C checksum */
+	/* chunks follow... */
+} SCTP_PACKED;
+
+/*
+ * SCTP Chunks
+ */
+struct sctp_chunkhdr {
+	uint8_t chunk_type;	/* chunk type */
+	uint8_t chunk_flags;	/* chunk flags */
+	uint16_t chunk_length;	/* chunk length */
+	/* optional params follow */
+} SCTP_PACKED;
+
+/*
+ * SCTP chunk parameters
+ */
+struct sctp_paramhdr {
+	uint16_t param_type;	/* parameter type */
+	uint16_t param_length;	/* parameter length */
+} SCTP_PACKED;
+
+/*
+ * user socket options: socket API defined
+ */
+/*
+ * read-write options
+ */
+#define SCTP_RTOINFO			0x00000001
+#define SCTP_ASSOCINFO			0x00000002
+#define SCTP_INITMSG			0x00000003
+#define SCTP_NODELAY			0x00000004
+#define SCTP_AUTOCLOSE			0x00000005
+#define SCTP_SET_PEER_PRIMARY_ADDR	0x00000006
+#define SCTP_PRIMARY_ADDR		0x00000007
+#define SCTP_ADAPTATION_LAYER		0x00000008
+/* same as above */
+#define SCTP_ADAPTION_LAYER		0x00000008
+#define SCTP_DISABLE_FRAGMENTS		0x00000009
+#define SCTP_PEER_ADDR_PARAMS 		0x0000000a
+#define SCTP_DEFAULT_SEND_PARAM		0x0000000b
+/* ancillary data/notification interest options */
+#define SCTP_EVENTS			0x0000000c /* deprecated */
+/* Without this applied we will give V4 and V6 addresses on a V6 socket */
+#define SCTP_I_WANT_MAPPED_V4_ADDR	0x0000000d
+#define SCTP_MAXSEG 			0x0000000e
+#define SCTP_DELAYED_SACK               0x0000000f
+#define SCTP_FRAGMENT_INTERLEAVE        0x00000010
+#define SCTP_PARTIAL_DELIVERY_POINT     0x00000011
+/* authentication support */
+#define SCTP_AUTH_CHUNK 		0x00000012
+#define SCTP_AUTH_KEY 			0x00000013
+#define SCTP_HMAC_IDENT 		0x00000014
+#define SCTP_AUTH_ACTIVE_KEY 		0x00000015
+#define SCTP_AUTH_DELETE_KEY 		0x00000016
+#define SCTP_USE_EXT_RCVINFO		0x00000017
+#define SCTP_AUTO_ASCONF		0x00000018 /* rw */
+#define SCTP_MAXBURST			0x00000019 /* rw */
+#define SCTP_MAX_BURST			0x00000019 /* rw */
+/* assoc level context */
+#define SCTP_CONTEXT                    0x0000001a /* rw */
+/* explicit EOR signalling */
+#define SCTP_EXPLICIT_EOR               0x0000001b
+#define SCTP_REUSE_PORT                 0x0000001c /* rw */
+#define SCTP_AUTH_DEACTIVATE_KEY	0x0000001d
+#define SCTP_EVENT                      0x0000001e
+#define SCTP_RECVRCVINFO                0x0000001f
+#define SCTP_RECVNXTINFO                0x00000020
+#define SCTP_DEFAULT_SNDINFO            0x00000021
+#define SCTP_DEFAULT_PRINFO             0x00000022
+#define SCTP_PEER_ADDR_THLDS            0x00000023
+#define SCTP_REMOTE_UDP_ENCAPS_PORT     0x00000024
+#define SCTP_ECN_SUPPORTED              0x00000025
+#define SCTP_PR_SUPPORTED               0x00000026
+#define SCTP_AUTH_SUPPORTED             0x00000027
+#define SCTP_ASCONF_SUPPORTED           0x00000028
+#define SCTP_RECONFIG_SUPPORTED         0x00000029
+#define SCTP_NRSACK_SUPPORTED           0x00000030
+#define SCTP_PKTDROP_SUPPORTED          0x00000031
+#define SCTP_MAX_CWND                   0x00000032
+
+/*
+ * read-only options
+ */
+#define SCTP_STATUS			0x00000100
+#define SCTP_GET_PEER_ADDR_INFO		0x00000101
+/* authentication support */
+#define SCTP_PEER_AUTH_CHUNKS 		0x00000102
+#define SCTP_LOCAL_AUTH_CHUNKS 		0x00000103
+#define SCTP_GET_ASSOC_NUMBER           0x00000104 /* ro */
+#define SCTP_GET_ASSOC_ID_LIST          0x00000105 /* ro */
+#define SCTP_TIMEOUTS                   0x00000106
+#define SCTP_PR_STREAM_STATUS           0x00000107
+#define SCTP_PR_ASSOC_STATUS            0x00000108
+
+/*
+ * user socket options: BSD implementation specific
+ */
+/*
+ * Blocking I/O is enabled on any TCP type socket by default. For the UDP
+ * model if this is turned on then the socket buffer is shared for send
+ * resources amongst all associations.  The default for the UDP model is that
+ * is SS_NBIO is set.  Which means all associations have a separate send
+ * limit BUT they will NOT ever BLOCK instead you will get an error back
+ * EAGAIN if you try to send too much. If you want the blocking semantics you
+ * set this option at the cost of sharing one socket send buffer size amongst
+ * all associations. Peeled off sockets turn this option off and block. But
+ * since both TCP and peeled off sockets have only one assoc per socket this
+ * is fine. It probably does NOT make sense to set this on SS_NBIO on a TCP
+ * model OR peeled off UDP model, but we do allow you to do so. You just use
+ * the normal syscall to toggle SS_NBIO the way you want.
+ *
+ * Blocking I/O is controlled by the SS_NBIO flag on the socket state so_state
+ * field.
+ */
+
+#define SCTP_ENABLE_STREAM_RESET	0x00000900 /* struct sctp_assoc_value */
+#define SCTP_RESET_STREAMS		0x00000901 /* struct sctp_reset_streams */
+#define SCTP_RESET_ASSOC		0x00000902 /* sctp_assoc_t */
+#define SCTP_ADD_STREAMS		0x00000903 /* struct sctp_add_streams */
+
+/* For enable stream reset */
+#define SCTP_ENABLE_RESET_STREAM_REQ 	0x00000001
+#define SCTP_ENABLE_RESET_ASSOC_REQ 	0x00000002
+#define SCTP_ENABLE_CHANGE_ASSOC_REQ 	0x00000004
+#define SCTP_ENABLE_VALUE_MASK		0x00000007
+/* For reset streams */
+#define SCTP_STREAM_RESET_INCOMING	0x00000001
+#define SCTP_STREAM_RESET_OUTGOING	0x00000002
+
+
+/* here on down are more implementation specific */
+#define SCTP_SET_DEBUG_LEVEL		0x00001005
+#define SCTP_CLR_STAT_LOG               0x00001007
+/* CMT ON/OFF socket option */
+#define SCTP_CMT_ON_OFF                 0x00001200
+#define SCTP_CMT_USE_DAC                0x00001201
+/* JRS - Pluggable Congestion Control Socket option */
+#define SCTP_PLUGGABLE_CC               0x00001202
+/* RS - Pluggable Stream Scheduling Socket option */
+#define SCTP_PLUGGABLE_SS		0x00001203
+#define SCTP_SS_VALUE			0x00001204
+#define SCTP_CC_OPTION			0x00001205 /* Options for CC modules */
+/* For I-DATA */
+#define SCTP_INTERLEAVING_SUPPORTED	0x00001206
+
+/* read only */
+#define SCTP_GET_SNDBUF_USE		0x00001101
+#define SCTP_GET_STAT_LOG		0x00001103
+#define SCTP_PCB_STATUS			0x00001104
+#define SCTP_GET_NONCE_VALUES           0x00001105
+
+
+/* Special hook for dynamically setting primary for all assoc's,
+ * this is a write only option that requires root privilege.
+ */
+#define SCTP_SET_DYNAMIC_PRIMARY        0x00002001
+
+/* VRF (virtual router feature) and multi-VRF support
+ * options. VRF's provide splits within a router
+ * that give the views of multiple routers. A
+ * standard host, without VRF support, is just
+ * a single VRF. If VRF's are supported then
+ * the transport must be VRF aware. This means
+ * that every socket call coming in must be directed
+ * within the endpoint to one of the VRF's it belongs
+ * to. The endpoint, before binding, may select
+ * the "default" VRF it is in by using a set socket
+ * option with SCTP_VRF_ID. This will also
+ * get propagated to the default VRF. Once the
+ * endpoint binds an address then it CANNOT add
+ * additional VRF's to become a Multi-VRF endpoint.
+ *
+ * Before BINDING additional VRF's can be added with
+ * the SCTP_ADD_VRF_ID call or deleted with
+ * SCTP_DEL_VRF_ID.
+ *
+ * Associations are ALWAYS contained inside a single
+ * VRF. They cannot reside in two (or more) VRF's. Incoming
+ * packets, assuming the router is VRF aware, can always
+ * tell us what VRF they arrived on. A host not supporting
+ * any VRF's will find that the packets always arrived on the
+ * single VRF that the host has.
+ *
+ */
+
+#define SCTP_VRF_ID			0x00003001
+#define SCTP_ADD_VRF_ID			0x00003002
+#define SCTP_GET_VRF_IDS		0x00003003
+#define SCTP_GET_ASOC_VRF               0x00003004
+#define SCTP_DEL_VRF_ID                 0x00003005
+
+/*
+ * If you enable packet logging you can get
+ * a poor mans ethereal output in binary
+ * form. Note this is a compile option to
+ * the kernel,  SCTP_PACKET_LOGGING, and
+ * without it in your kernel you
+ * will get a EOPNOTSUPP
+ */
+#define SCTP_GET_PACKET_LOG             0x00004001
+
+/*
+ * hidden implementation specific options these are NOT user visible (should
+ * move out of sctp.h)
+ */
+/* sctp_bindx() flags as hidden socket options */
+#define SCTP_BINDX_ADD_ADDR		0x00008001
+#define SCTP_BINDX_REM_ADDR		0x00008002
+/* Hidden socket option that gets the addresses */
+#define SCTP_GET_PEER_ADDRESSES		0x00008003
+#define SCTP_GET_LOCAL_ADDRESSES	0x00008004
+/* return the total count in bytes needed to hold all local addresses bound */
+#define SCTP_GET_LOCAL_ADDR_SIZE	0x00008005
+/* Return the total count in bytes needed to hold the remote address */
+#define SCTP_GET_REMOTE_ADDR_SIZE	0x00008006
+/* hidden option for connectx */
+#define SCTP_CONNECT_X			0x00008007
+/* hidden option for connectx_delayed, part of sendx */
+#define SCTP_CONNECT_X_DELAYED		0x00008008
+#define SCTP_CONNECT_X_COMPLETE         0x00008009
+/* hidden socket option based sctp_peeloff */
+#define SCTP_PEELOFF                    0x0000800a
+/* the real worker for sctp_getaddrlen() */
+#define SCTP_GET_ADDR_LEN               0x0000800b
+#if defined(__APPLE__)
+/* temporary workaround for Apple listen() issue, no args used */
+#define SCTP_LISTEN_FIX			0x0000800c
+#endif
+#if defined(__Windows__)
+/* workaround for Cygwin on Windows: returns the SOCKET handle */
+#define SCTP_GET_HANDLE			0x0000800d
+#endif
+/* Debug things that need to be purged */
+#define SCTP_SET_INITIAL_DBG_SEQ	0x00009f00
+
+/* JRS - Supported congestion control modules for pluggable
+ * congestion control
+ */
+/* Standard TCP Congestion Control */
+#define SCTP_CC_RFC2581		0x00000000
+/* High Speed TCP Congestion Control (Floyd) */
+#define SCTP_CC_HSTCP		0x00000001
+/* HTCP Congestion Control */
+#define SCTP_CC_HTCP		0x00000002
+/* RTCC Congestion Control - RFC2581 plus */
+#define SCTP_CC_RTCC            0x00000003
+
+#define SCTP_CC_OPT_RTCC_SETMODE	0x00002000
+#define SCTP_CC_OPT_USE_DCCC_ECN	0x00002001
+#define SCTP_CC_OPT_STEADY_STEP         0x00002002
+
+#define SCTP_CMT_OFF            0
+#define SCTP_CMT_BASE           1
+#define SCTP_CMT_RPV1           2
+#define SCTP_CMT_RPV2           3
+#define SCTP_CMT_MPTCP          4
+#define SCTP_CMT_MAX            SCTP_CMT_MPTCP
+
+/* RS - Supported stream scheduling modules for pluggable
+ * stream scheduling
+ */
+/* Default simple round-robin */
+#define SCTP_SS_DEFAULT			0x00000000
+/* Real round-robin */
+#define SCTP_SS_ROUND_ROBIN		0x00000001
+/* Real round-robin per packet */
+#define SCTP_SS_ROUND_ROBIN_PACKET	0x00000002
+/* Priority */
+#define SCTP_SS_PRIORITY		0x00000003
+/* Fair Bandwidth */
+#define SCTP_SS_FAIR_BANDWITH		0x00000004
+/* First-come, first-serve */
+#define SCTP_SS_FIRST_COME		0x00000005
+
+
+/* fragment interleave constants
+ * setting must be one of these or
+ * EINVAL returned.
+ */
+#define SCTP_FRAG_LEVEL_0    0x00000000
+#define SCTP_FRAG_LEVEL_1    0x00000001
+#define SCTP_FRAG_LEVEL_2    0x00000002
+
+/*
+ * user state values
+ */
+#define SCTP_CLOSED			0x0000
+#define SCTP_BOUND			0x1000
+#define SCTP_LISTEN			0x2000
+#define SCTP_COOKIE_WAIT		0x0002
+#define SCTP_COOKIE_ECHOED		0x0004
+#define SCTP_ESTABLISHED		0x0008
+#define SCTP_SHUTDOWN_SENT		0x0010
+#define SCTP_SHUTDOWN_RECEIVED		0x0020
+#define SCTP_SHUTDOWN_ACK_SENT		0x0040
+#define SCTP_SHUTDOWN_PENDING		0x0080
+
+/*
+ * SCTP operational error codes (user visible)
+ */
+#define SCTP_CAUSE_NO_ERROR		0x0000
+#define SCTP_CAUSE_INVALID_STREAM	0x0001
+#define SCTP_CAUSE_MISSING_PARAM	0x0002
+#define SCTP_CAUSE_STALE_COOKIE		0x0003
+#define SCTP_CAUSE_OUT_OF_RESC		0x0004
+#define SCTP_CAUSE_UNRESOLVABLE_ADDR	0x0005
+#define SCTP_CAUSE_UNRECOG_CHUNK	0x0006
+#define SCTP_CAUSE_INVALID_PARAM	0x0007
+#define SCTP_CAUSE_UNRECOG_PARAM	0x0008
+#define SCTP_CAUSE_NO_USER_DATA		0x0009
+#define SCTP_CAUSE_COOKIE_IN_SHUTDOWN	0x000a
+#define SCTP_CAUSE_RESTART_W_NEWADDR	0x000b
+#define SCTP_CAUSE_USER_INITIATED_ABT	0x000c
+#define SCTP_CAUSE_PROTOCOL_VIOLATION	0x000d
+
+/* Error causes from RFC5061 */
+#define SCTP_CAUSE_DELETING_LAST_ADDR	0x00a0
+#define SCTP_CAUSE_RESOURCE_SHORTAGE	0x00a1
+#define SCTP_CAUSE_DELETING_SRC_ADDR	0x00a2
+#define SCTP_CAUSE_ILLEGAL_ASCONF_ACK	0x00a3
+#define SCTP_CAUSE_REQUEST_REFUSED	0x00a4
+
+/* Error causes from nat-draft */
+#define SCTP_CAUSE_NAT_COLLIDING_STATE  0x00b0
+#define SCTP_CAUSE_NAT_MISSING_STATE    0x00b1
+
+/* Error causes from RFC4895 */
+#define SCTP_CAUSE_UNSUPPORTED_HMACID	0x0105
+
+/*
+ * error cause parameters (user visible)
+ */
+struct sctp_gen_error_cause {
+	uint16_t code;
+	uint16_t length;
+	uint8_t info[];
+} SCTP_PACKED;
+
+struct sctp_error_cause {
+	uint16_t code;
+	uint16_t length;
+	/* optional cause-specific info may follow */
+} SCTP_PACKED;
+
+struct sctp_error_invalid_stream {
+	struct sctp_error_cause cause;	/* code=SCTP_CAUSE_INVALID_STREAM */
+	uint16_t stream_id;	/* stream id of the DATA in error */
+	uint16_t reserved;
+} SCTP_PACKED;
+
+struct sctp_error_missing_param {
+	struct sctp_error_cause cause;	/* code=SCTP_CAUSE_MISSING_PARAM */
+	uint32_t num_missing_params;	/* number of missing parameters */
+	uint16_t type[];
+} SCTP_PACKED;
+
+struct sctp_error_stale_cookie {
+	struct sctp_error_cause cause;	/* code=SCTP_CAUSE_STALE_COOKIE */
+	uint32_t stale_time;	/* time in usec of staleness */
+} SCTP_PACKED;
+
+struct sctp_error_out_of_resource {
+	struct sctp_error_cause cause;	/* code=SCTP_CAUSE_OUT_OF_RESOURCES */
+} SCTP_PACKED;
+
+struct sctp_error_unresolv_addr {
+	struct sctp_error_cause cause;	/* code=SCTP_CAUSE_UNRESOLVABLE_ADDR */
+} SCTP_PACKED;
+
+struct sctp_error_unrecognized_chunk {
+	struct sctp_error_cause cause;	/* code=SCTP_CAUSE_UNRECOG_CHUNK */
+	struct sctp_chunkhdr ch;/* header from chunk in error */
+} SCTP_PACKED;
+
+struct sctp_error_no_user_data {
+	struct sctp_error_cause cause;	/* code=SCTP_CAUSE_NO_USER_DATA */
+	uint32_t tsn;			/* TSN of the empty data chunk */
+} SCTP_PACKED;
+
+struct sctp_error_auth_invalid_hmac {
+	struct sctp_error_cause cause;	/* code=SCTP_CAUSE_UNSUPPORTED_HMACID */
+	uint16_t hmac_id;
+} SCTP_PACKED;
+
+/*
+ * Main SCTP chunk types we place these here so natd and f/w's in user land
+ * can find them.
+ */
+/************0x00 series ***********/
+#define SCTP_DATA		0x00
+#define SCTP_INITIATION		0x01
+#define SCTP_INITIATION_ACK	0x02
+#define SCTP_SELECTIVE_ACK	0x03
+#define SCTP_HEARTBEAT_REQUEST	0x04
+#define SCTP_HEARTBEAT_ACK	0x05
+#define SCTP_ABORT_ASSOCIATION	0x06
+#define SCTP_SHUTDOWN		0x07
+#define SCTP_SHUTDOWN_ACK	0x08
+#define SCTP_OPERATION_ERROR	0x09
+#define SCTP_COOKIE_ECHO	0x0a
+#define SCTP_COOKIE_ACK		0x0b
+#define SCTP_ECN_ECHO		0x0c
+#define SCTP_ECN_CWR		0x0d
+#define SCTP_SHUTDOWN_COMPLETE	0x0e
+/* RFC4895 */
+#define SCTP_AUTHENTICATION     0x0f
+/* EY nr_sack chunk id*/
+#define SCTP_NR_SELECTIVE_ACK	0x10
+/************0x40 series ***********/
+#define SCTP_IDATA		0x40
+/************0x80 series ***********/
+/* RFC5061 */
+#define	SCTP_ASCONF_ACK		0x80
+/* draft-ietf-stewart-pktdrpsctp */
+#define SCTP_PACKET_DROPPED	0x81
+/* draft-ietf-stewart-strreset-xxx */
+#define SCTP_STREAM_RESET       0x82
+
+/* RFC4820                         */
+#define SCTP_PAD_CHUNK          0x84
+/************0xc0 series ***********/
+/* RFC3758 */
+#define SCTP_FORWARD_CUM_TSN	0xc0
+/* RFC5061 */
+#define SCTP_ASCONF		0xc1
+#define SCTP_IFORWARD_CUM_TSN	0xc2
+
+/* ABORT and SHUTDOWN COMPLETE FLAG */
+#define SCTP_HAD_NO_TCB		0x01
+
+/* Packet dropped flags */
+#define SCTP_FROM_MIDDLE_BOX	SCTP_HAD_NO_TCB
+#define SCTP_BADCRC		0x02
+#define SCTP_PACKET_TRUNCATED	0x04
+
+/* Flag for ECN -CWR */
+#define SCTP_CWR_REDUCE_OVERRIDE 0x01
+#define SCTP_CWR_IN_SAME_WINDOW  0x02
+
+#define SCTP_SAT_NETWORK_MIN	400	/* min ms for RTT to set satellite
+					 * time */
+#define SCTP_SAT_NETWORK_BURST_INCR  2	/* how many times to multiply maxburst
+					 * in sat */
+
+/* Data Chuck Specific Flags */
+#define SCTP_DATA_FRAG_MASK        0x03
+#define SCTP_DATA_MIDDLE_FRAG      0x00
+#define SCTP_DATA_LAST_FRAG        0x01
+#define SCTP_DATA_FIRST_FRAG       0x02
+#define SCTP_DATA_NOT_FRAG         0x03
+#define SCTP_DATA_UNORDERED        0x04
+#define SCTP_DATA_SACK_IMMEDIATELY 0x08
+/* ECN Nonce: SACK Chunk Specific Flags */
+#define SCTP_SACK_NONCE_SUM        0x01
+
+/* CMT DAC algorithm SACK flag */
+#define SCTP_SACK_CMT_DAC          0x80
+
+/*
+ * PCB flags (in sctp_flags bitmask).
+ * Note the features and flags are meant
+ * for use by netstat.
+ */
+#define SCTP_PCB_FLAGS_UDPTYPE		0x00000001
+#define SCTP_PCB_FLAGS_TCPTYPE		0x00000002
+#define SCTP_PCB_FLAGS_BOUNDALL		0x00000004
+#define SCTP_PCB_FLAGS_ACCEPTING	0x00000008
+#define SCTP_PCB_FLAGS_UNBOUND		0x00000010
+#define SCTP_PCB_FLAGS_CLOSE_IP         0x00040000
+#define SCTP_PCB_FLAGS_WAS_CONNECTED    0x00080000
+#define SCTP_PCB_FLAGS_WAS_ABORTED      0x00100000
+/* TCP model support */
+
+#define SCTP_PCB_FLAGS_CONNECTED	0x00200000
+#define SCTP_PCB_FLAGS_IN_TCPPOOL	0x00400000
+#define SCTP_PCB_FLAGS_DONT_WAKE	0x00800000
+#define SCTP_PCB_FLAGS_WAKEOUTPUT	0x01000000
+#define SCTP_PCB_FLAGS_WAKEINPUT	0x02000000
+#define SCTP_PCB_FLAGS_BOUND_V6		0x04000000
+#define SCTP_PCB_FLAGS_BLOCKING_IO	0x08000000
+#define SCTP_PCB_FLAGS_SOCKET_GONE	0x10000000
+#define SCTP_PCB_FLAGS_SOCKET_ALLGONE	0x20000000
+#define SCTP_PCB_FLAGS_SOCKET_CANT_READ	0x40000000
+#if defined(__Userspace__)
+#define SCTP_PCB_FLAGS_BOUND_CONN       0x80000000
+
+/* flags to copy to new PCB */
+#define SCTP_PCB_COPY_FLAGS		(SCTP_PCB_FLAGS_BOUNDALL|\
+					 SCTP_PCB_FLAGS_WAKEINPUT|\
+					 SCTP_PCB_FLAGS_BOUND_V6|\
+					 SCTP_PCB_FLAGS_BOUND_CONN)
+#else
+
+/* flags to copy to new PCB */
+#define SCTP_PCB_COPY_FLAGS		(SCTP_PCB_FLAGS_BOUNDALL|\
+					 SCTP_PCB_FLAGS_WAKEINPUT|\
+					 SCTP_PCB_FLAGS_BOUND_V6)
+#endif
+
+/*
+ * PCB Features (in sctp_features bitmask)
+ */
+#define SCTP_PCB_FLAGS_DO_NOT_PMTUD      0x0000000000000001
+#define SCTP_PCB_FLAGS_EXT_RCVINFO       0x0000000000000002 /* deprecated */
+#define SCTP_PCB_FLAGS_DONOT_HEARTBEAT   0x0000000000000004
+#define SCTP_PCB_FLAGS_FRAG_INTERLEAVE   0x0000000000000008
+#define SCTP_PCB_FLAGS_INTERLEAVE_STRMS  0x0000000000000010
+#define SCTP_PCB_FLAGS_DO_ASCONF         0x0000000000000020
+#define SCTP_PCB_FLAGS_AUTO_ASCONF       0x0000000000000040
+#define SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE  0x0000000000000080
+/* socket options */
+#define SCTP_PCB_FLAGS_NODELAY           0x0000000000000100
+#define SCTP_PCB_FLAGS_AUTOCLOSE         0x0000000000000200
+#define SCTP_PCB_FLAGS_RECVDATAIOEVNT    0x0000000000000400 /* deprecated */
+#define SCTP_PCB_FLAGS_RECVASSOCEVNT     0x0000000000000800
+#define SCTP_PCB_FLAGS_RECVPADDREVNT     0x0000000000001000
+#define SCTP_PCB_FLAGS_RECVPEERERR       0x0000000000002000
+#define SCTP_PCB_FLAGS_RECVSENDFAILEVNT  0x0000000000004000 /* deprecated */
+#define SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT  0x0000000000008000
+#define SCTP_PCB_FLAGS_ADAPTATIONEVNT    0x0000000000010000
+#define SCTP_PCB_FLAGS_PDAPIEVNT         0x0000000000020000
+#define SCTP_PCB_FLAGS_AUTHEVNT          0x0000000000040000
+#define SCTP_PCB_FLAGS_STREAM_RESETEVNT  0x0000000000080000
+#define SCTP_PCB_FLAGS_NO_FRAGMENT       0x0000000000100000
+#define SCTP_PCB_FLAGS_EXPLICIT_EOR      0x0000000000400000
+#define SCTP_PCB_FLAGS_NEEDS_MAPPED_V4   0x0000000000800000
+#define SCTP_PCB_FLAGS_MULTIPLE_ASCONFS  0x0000000001000000
+#define SCTP_PCB_FLAGS_PORTREUSE         0x0000000002000000
+#define SCTP_PCB_FLAGS_DRYEVNT           0x0000000004000000
+#define SCTP_PCB_FLAGS_RECVRCVINFO       0x0000000008000000
+#define SCTP_PCB_FLAGS_RECVNXTINFO       0x0000000010000000
+#define SCTP_PCB_FLAGS_ASSOC_RESETEVNT   0x0000000020000000
+#define SCTP_PCB_FLAGS_STREAM_CHANGEEVNT 0x0000000040000000
+#define SCTP_PCB_FLAGS_RECVNSENDFAILEVNT 0x0000000080000000
+
+/*-
+ * mobility_features parameters (by micchie).Note
+ * these features are applied against the
+ * sctp_mobility_features flags.. not the sctp_features
+ * flags.
+ */
+#define SCTP_MOBILITY_BASE               0x00000001
+#define SCTP_MOBILITY_FASTHANDOFF        0x00000002
+#define SCTP_MOBILITY_PRIM_DELETED       0x00000004
+
+
+#define SCTP_SMALLEST_PMTU 512	 /* smallest pmtu allowed when disabling PMTU discovery */
+
+#if defined(__Userspace_os_Windows)
+#pragma pack()
+#endif
+#undef SCTP_PACKED
+
+#include <netinet/sctp_uio.h>
+
+/* This dictates the size of the packet
+ * collection buffer. This only applies
+ * if SCTP_PACKET_LOGGING is enabled in
+ * your config.
+ */
+#define SCTP_PACKET_LOG_SIZE 65536
+
+/* Maximum delays and such a user can set for options that
+ * take ms.
+ */
+#define SCTP_MAX_SACK_DELAY 500 /* per RFC4960 */
+#define SCTP_MAX_HB_INTERVAL 14400000 /* 4 hours in ms */
+#define SCTP_MAX_COOKIE_LIFE  3600000 /* 1 hour in ms */
+
+
+/* Types of logging/KTR tracing  that can be enabled via the
+ * sysctl net.inet.sctp.sctp_logging. You must also enable
+ * SUBSYS tracing.
+ * Note that you must have the SCTP option in the kernel
+ * to enable these as well.
+ */
+#define SCTP_BLK_LOGGING_ENABLE				0x00000001
+#define SCTP_CWND_MONITOR_ENABLE			0x00000002
+#define SCTP_CWND_LOGGING_ENABLE			0x00000004
+#define SCTP_FLIGHT_LOGGING_ENABLE			0x00000020
+#define SCTP_FR_LOGGING_ENABLE				0x00000040
+#define SCTP_LOCK_LOGGING_ENABLE			0x00000080
+#define SCTP_MAP_LOGGING_ENABLE				0x00000100
+#define SCTP_MBCNT_LOGGING_ENABLE			0x00000200
+#define SCTP_MBUF_LOGGING_ENABLE			0x00000400
+#define SCTP_NAGLE_LOGGING_ENABLE			0x00000800
+#define SCTP_RECV_RWND_LOGGING_ENABLE			0x00001000
+#define SCTP_RTTVAR_LOGGING_ENABLE			0x00002000
+#define SCTP_SACK_LOGGING_ENABLE			0x00004000
+#define SCTP_SACK_RWND_LOGGING_ENABLE			0x00008000
+#define SCTP_SB_LOGGING_ENABLE				0x00010000
+#define SCTP_STR_LOGGING_ENABLE				0x00020000
+#define SCTP_WAKE_LOGGING_ENABLE			0x00040000
+#define SCTP_LOG_MAXBURST_ENABLE			0x00080000
+#define SCTP_LOG_RWND_ENABLE    			0x00100000
+#define SCTP_LOG_SACK_ARRIVALS_ENABLE			0x00200000
+#define SCTP_LTRACE_CHUNK_ENABLE			0x00400000
+#define SCTP_LTRACE_ERROR_ENABLE			0x00800000
+#define SCTP_LAST_PACKET_TRACING			0x01000000
+#define SCTP_THRESHOLD_LOGGING				0x02000000
+#define SCTP_LOG_AT_SEND_2_SCTP				0x04000000
+#define SCTP_LOG_AT_SEND_2_OUTQ				0x08000000
+#define SCTP_LOG_TRY_ADVANCE				0x10000000
+
+#endif				/* !_NETINET_SCTP_H_ */
diff --git a/usrsctplib/netinet/sctp_asconf.c b/usrsctplib/netinet/sctp_asconf.c
new file mode 100755
index 0000000..70122ac
--- /dev/null
+++ b/usrsctplib/netinet/sctp_asconf.c
@@ -0,0 +1,3550 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_asconf.c 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_timer.h>
+
+/*
+ * debug flags:
+ * SCTP_DEBUG_ASCONF1: protocol info, general info and errors
+ * SCTP_DEBUG_ASCONF2: detailed info
+ */
+
+#if defined(__APPLE__)
+#define APPLE_FILE_NO 1
+#endif
+
+/*
+ * RFC 5061
+ *
+ * An ASCONF parameter queue exists per asoc which holds the pending address
+ * operations.  Lists are updated upon receipt of ASCONF-ACK.
+ *
+ * A restricted_addrs list exists per assoc to hold local addresses that are
+ * not (yet) usable by the assoc as a source address.  These addresses are
+ * either pending an ASCONF operation (and exist on the ASCONF parameter
+ * queue), or they are permanently restricted (the peer has returned an
+ * ERROR indication to an ASCONF(ADD), or the peer does not support ASCONF).
+ *
+ * Deleted addresses are always immediately removed from the lists as they will
+ * (shortly) no longer exist in the kernel.  We send ASCONFs as a courtesy,
+ * only if allowed.
+ */
+
+/*
+ * ASCONF parameter processing.
+ * response_required: set if a reply is required (eg. SUCCESS_REPORT).
+ * returns a mbuf to an "error" response parameter or NULL/"success" if ok.
+ * FIX: allocating this many mbufs on the fly is pretty inefficient...
+ */
+static struct mbuf *
+sctp_asconf_success_response(uint32_t id)
+{
+	struct mbuf *m_reply = NULL;
+	struct sctp_asconf_paramhdr *aph;
+
+	m_reply = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_paramhdr),
+					0, M_NOWAIT, 1, MT_DATA);
+	if (m_reply == NULL) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"asconf_success_response: couldn't get mbuf!\n");
+		return (NULL);
+	}
+	aph = mtod(m_reply, struct sctp_asconf_paramhdr *);
+	aph->correlation_id = id;
+	aph->ph.param_type = htons(SCTP_SUCCESS_REPORT);
+	aph->ph.param_length = sizeof(struct sctp_asconf_paramhdr);
+	SCTP_BUF_LEN(m_reply) = aph->ph.param_length;
+	aph->ph.param_length = htons(aph->ph.param_length);
+
+	return (m_reply);
+}
+
+static struct mbuf *
+sctp_asconf_error_response(uint32_t id, uint16_t cause, uint8_t *error_tlv,
+			   uint16_t tlv_length)
+{
+	struct mbuf *m_reply = NULL;
+	struct sctp_asconf_paramhdr *aph;
+	struct sctp_error_cause *error;
+	uint8_t *tlv;
+
+	m_reply = sctp_get_mbuf_for_msg((sizeof(struct sctp_asconf_paramhdr) +
+					 tlv_length +
+					 sizeof(struct sctp_error_cause)),
+					0, M_NOWAIT, 1, MT_DATA);
+	if (m_reply == NULL) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"asconf_error_response: couldn't get mbuf!\n");
+		return (NULL);
+	}
+	aph = mtod(m_reply, struct sctp_asconf_paramhdr *);
+	error = (struct sctp_error_cause *)(aph + 1);
+
+	aph->correlation_id = id;
+	aph->ph.param_type = htons(SCTP_ERROR_CAUSE_IND);
+	error->code = htons(cause);
+	error->length = tlv_length + sizeof(struct sctp_error_cause);
+	aph->ph.param_length = error->length +
+	    sizeof(struct sctp_asconf_paramhdr);
+
+	if (aph->ph.param_length > MLEN) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"asconf_error_response: tlv_length (%xh) too big\n",
+			tlv_length);
+		sctp_m_freem(m_reply);	/* discard */
+		return (NULL);
+	}
+	if (error_tlv != NULL) {
+		tlv = (uint8_t *) (error + 1);
+		memcpy(tlv, error_tlv, tlv_length);
+	}
+	SCTP_BUF_LEN(m_reply) = aph->ph.param_length;
+	error->length = htons(error->length);
+	aph->ph.param_length = htons(aph->ph.param_length);
+
+	return (m_reply);
+}
+
+static struct mbuf *
+sctp_process_asconf_add_ip(struct sockaddr *src, struct sctp_asconf_paramhdr *aph,
+                           struct sctp_tcb *stcb, int send_hb, int response_required)
+{
+	struct sctp_nets *net;
+	struct mbuf *m_reply = NULL;
+	union sctp_sockstore store;
+	struct sctp_paramhdr *ph;
+	uint16_t param_type, aparam_length;
+#if defined(INET) || defined(INET6)
+	uint16_t param_length;
+#endif
+	struct sockaddr *sa;
+	int zero_address = 0;
+	int bad_address = 0;
+#ifdef INET
+	struct sockaddr_in *sin;
+	struct sctp_ipv4addr_param *v4addr;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *sin6;
+	struct sctp_ipv6addr_param *v6addr;
+#endif
+
+	aparam_length = ntohs(aph->ph.param_length);
+	ph = (struct sctp_paramhdr *)(aph + 1);
+	param_type = ntohs(ph->param_type);
+#if defined(INET) || defined(INET6)
+	param_length = ntohs(ph->param_length);
+#endif
+	sa = &store.sa;
+	switch (param_type) {
+#ifdef INET
+	case SCTP_IPV4_ADDRESS:
+		if (param_length != sizeof(struct sctp_ipv4addr_param)) {
+			/* invalid param size */
+			return (NULL);
+		}
+		v4addr = (struct sctp_ipv4addr_param *)ph;
+		sin = &store.sin;
+		bzero(sin, sizeof(*sin));
+		sin->sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+		sin->sin_len = sizeof(struct sockaddr_in);
+#endif
+		sin->sin_port = stcb->rport;
+		sin->sin_addr.s_addr = v4addr->addr;
+		if ((sin->sin_addr.s_addr == INADDR_BROADCAST) ||
+		    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
+			bad_address = 1;
+		}
+		if (sin->sin_addr.s_addr == INADDR_ANY)
+			zero_address = 1;
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_add_ip: adding ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+		break;
+#endif
+#ifdef INET6
+	case SCTP_IPV6_ADDRESS:
+		if (param_length != sizeof(struct sctp_ipv6addr_param)) {
+			/* invalid param size */
+			return (NULL);
+		}
+		v6addr = (struct sctp_ipv6addr_param *)ph;
+		sin6 = &store.sin6;
+		bzero(sin6, sizeof(*sin6));
+		sin6->sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+		sin6->sin6_len = sizeof(struct sockaddr_in6);
+#endif
+		sin6->sin6_port = stcb->rport;
+		memcpy((caddr_t)&sin6->sin6_addr, v6addr->addr,
+		    sizeof(struct in6_addr));
+		if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
+			bad_address = 1;
+		}
+		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+			zero_address = 1;
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_add_ip: adding ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+		break;
+#endif
+	default:
+		m_reply = sctp_asconf_error_response(aph->correlation_id,
+		    SCTP_CAUSE_INVALID_PARAM, (uint8_t *) aph,
+		    aparam_length);
+		return (m_reply);
+	}			/* end switch */
+
+	/* if 0.0.0.0/::0, add the source address instead */
+	if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) {
+		sa = src;
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+		        "process_asconf_add_ip: using source addr ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, src);
+	}
+	/* add the address */
+	if (bad_address) {
+		m_reply = sctp_asconf_error_response(aph->correlation_id,
+		    SCTP_CAUSE_INVALID_PARAM, (uint8_t *) aph,
+		    aparam_length);
+	} else if (sctp_add_remote_addr(stcb, sa, &net, stcb->asoc.port,
+	                                SCTP_DONOT_SETSCOPE,
+	                                SCTP_ADDR_DYNAMIC_ADDED) != 0) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_asconf_add_ip: error adding address\n");
+		m_reply = sctp_asconf_error_response(aph->correlation_id,
+		    SCTP_CAUSE_RESOURCE_SHORTAGE, (uint8_t *) aph,
+		    aparam_length);
+	} else {
+		/* notify upper layer */
+		sctp_ulp_notify(SCTP_NOTIFY_ASCONF_ADD_IP, stcb, 0, sa, SCTP_SO_NOT_LOCKED);
+		if (response_required) {
+			m_reply =
+			    sctp_asconf_success_response(aph->correlation_id);
+		}
+		sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net);
+		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
+		                 stcb, net);
+		if (send_hb) {
+			sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
+		}
+	}
+	return (m_reply);
+}
+
+static int
+sctp_asconf_del_remote_addrs_except(struct sctp_tcb *stcb, struct sockaddr *src)
+{
+	struct sctp_nets *src_net, *net;
+
+	/* make sure the source address exists as a destination net */
+	src_net = sctp_findnet(stcb, src);
+	if (src_net == NULL) {
+		/* not found */
+		return (-1);
+	}
+
+	/* delete all destination addresses except the source */
+	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+		if (net != src_net) {
+			/* delete this address */
+			sctp_remove_net(stcb, net);
+			SCTPDBG(SCTP_DEBUG_ASCONF1,
+				"asconf_del_remote_addrs_except: deleting ");
+			SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1,
+				     (struct sockaddr *)&net->ro._l_addr);
+			/* notify upper layer */
+			sctp_ulp_notify(SCTP_NOTIFY_ASCONF_DELETE_IP, stcb, 0,
+			    (struct sockaddr *)&net->ro._l_addr, SCTP_SO_NOT_LOCKED);
+		}
+	}
+	return (0);
+}
+
+static struct mbuf *
+sctp_process_asconf_delete_ip(struct sockaddr *src,
+                              struct sctp_asconf_paramhdr *aph,
+			      struct sctp_tcb *stcb, int response_required)
+{
+	struct mbuf *m_reply = NULL;
+	union sctp_sockstore store;
+	struct sctp_paramhdr *ph;
+	uint16_t param_type, aparam_length;
+#if defined(INET) || defined(INET6)
+	uint16_t param_length;
+#endif
+	struct sockaddr *sa;
+	int zero_address = 0;
+	int result;
+#ifdef INET
+	struct sockaddr_in *sin;
+	struct sctp_ipv4addr_param *v4addr;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *sin6;
+	struct sctp_ipv6addr_param *v6addr;
+#endif
+
+	aparam_length = ntohs(aph->ph.param_length);
+	ph = (struct sctp_paramhdr *)(aph + 1);
+	param_type = ntohs(ph->param_type);
+#if defined(INET) || defined(INET6)
+	param_length = ntohs(ph->param_length);
+#endif
+	sa = &store.sa;
+	switch (param_type) {
+#ifdef INET
+	case SCTP_IPV4_ADDRESS:
+		if (param_length != sizeof(struct sctp_ipv4addr_param)) {
+			/* invalid param size */
+			return (NULL);
+		}
+		v4addr = (struct sctp_ipv4addr_param *)ph;
+		sin = &store.sin;
+		bzero(sin, sizeof(*sin));
+		sin->sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+		sin->sin_len = sizeof(struct sockaddr_in);
+#endif
+		sin->sin_port = stcb->rport;
+		sin->sin_addr.s_addr = v4addr->addr;
+		if (sin->sin_addr.s_addr == INADDR_ANY)
+			zero_address = 1;
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_asconf_delete_ip: deleting ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+		break;
+#endif
+#ifdef INET6
+	case SCTP_IPV6_ADDRESS:
+		if (param_length != sizeof(struct sctp_ipv6addr_param)) {
+			/* invalid param size */
+			return (NULL);
+		}
+		v6addr = (struct sctp_ipv6addr_param *)ph;
+		sin6 = &store.sin6;
+		bzero(sin6, sizeof(*sin6));
+		sin6->sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+		sin6->sin6_len = sizeof(struct sockaddr_in6);
+#endif
+		sin6->sin6_port = stcb->rport;
+		memcpy(&sin6->sin6_addr, v6addr->addr,
+		    sizeof(struct in6_addr));
+		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+			zero_address = 1;
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_asconf_delete_ip: deleting ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+		break;
+#endif
+	default:
+		m_reply = sctp_asconf_error_response(aph->correlation_id,
+		    SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *) aph,
+		    aparam_length);
+		return (m_reply);
+	}
+
+	/* make sure the source address is not being deleted */
+	if (sctp_cmpaddr(sa, src)) {
+		/* trying to delete the source address! */
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_delete_ip: tried to delete source addr\n");
+		m_reply = sctp_asconf_error_response(aph->correlation_id,
+		    SCTP_CAUSE_DELETING_SRC_ADDR, (uint8_t *) aph,
+		    aparam_length);
+		return (m_reply);
+	}
+
+	/* if deleting 0.0.0.0/::0, delete all addresses except src addr */
+	if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) {
+		result = sctp_asconf_del_remote_addrs_except(stcb, src);
+
+		if (result) {
+			/* src address did not exist? */
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_delete_ip: src addr does not exist?\n");
+			/* what error to reply with?? */
+			m_reply =
+			    sctp_asconf_error_response(aph->correlation_id,
+			    SCTP_CAUSE_REQUEST_REFUSED, (uint8_t *) aph,
+			    aparam_length);
+		} else if (response_required) {
+			m_reply =
+			    sctp_asconf_success_response(aph->correlation_id);
+		}
+		return (m_reply);
+	}
+
+	/* delete the address */
+	result = sctp_del_remote_addr(stcb, sa);
+	/*
+	 * note if result == -2, the address doesn't exist in the asoc but
+	 * since it's being deleted anyways, we just ack the delete -- but
+	 * this probably means something has already gone awry
+	 */
+	if (result == -1) {
+		/* only one address in the asoc */
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_delete_ip: tried to delete last IP addr!\n");
+		m_reply = sctp_asconf_error_response(aph->correlation_id,
+		    SCTP_CAUSE_DELETING_LAST_ADDR, (uint8_t *) aph,
+		    aparam_length);
+	} else {
+		if (response_required) {
+			m_reply = sctp_asconf_success_response(aph->correlation_id);
+		}
+		/* notify upper layer */
+		sctp_ulp_notify(SCTP_NOTIFY_ASCONF_DELETE_IP, stcb, 0, sa, SCTP_SO_NOT_LOCKED);
+	}
+	return (m_reply);
+}
+
+static struct mbuf *
+sctp_process_asconf_set_primary(struct sockaddr *src,
+				struct sctp_asconf_paramhdr *aph,
+				struct sctp_tcb *stcb, int response_required)
+{
+	struct mbuf *m_reply = NULL;
+	union sctp_sockstore store;
+	struct sctp_paramhdr *ph;
+	uint16_t param_type, aparam_length;
+#if defined(INET) || defined(INET6)
+	uint16_t param_length;
+#endif
+	struct sockaddr *sa;
+	int zero_address = 0;
+#ifdef INET
+	struct sockaddr_in *sin;
+	struct sctp_ipv4addr_param *v4addr;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *sin6;
+	struct sctp_ipv6addr_param *v6addr;
+#endif
+
+	aparam_length = ntohs(aph->ph.param_length);
+	ph = (struct sctp_paramhdr *)(aph + 1);
+	param_type = ntohs(ph->param_type);
+#if defined(INET) || defined(INET6)
+	param_length = ntohs(ph->param_length);
+#endif
+	sa = &store.sa;
+	switch (param_type) {
+#ifdef INET
+	case SCTP_IPV4_ADDRESS:
+		if (param_length != sizeof(struct sctp_ipv4addr_param)) {
+			/* invalid param size */
+			return (NULL);
+		}
+		v4addr = (struct sctp_ipv4addr_param *)ph;
+		sin = &store.sin;
+		bzero(sin, sizeof(*sin));
+		sin->sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+		sin->sin_len = sizeof(struct sockaddr_in);
+#endif
+		sin->sin_addr.s_addr = v4addr->addr;
+		if (sin->sin_addr.s_addr == INADDR_ANY)
+			zero_address = 1;
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_set_primary: ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+		break;
+#endif
+#ifdef INET6
+	case SCTP_IPV6_ADDRESS:
+		if (param_length != sizeof(struct sctp_ipv6addr_param)) {
+			/* invalid param size */
+			return (NULL);
+		}
+		v6addr = (struct sctp_ipv6addr_param *)ph;
+		sin6 = &store.sin6;
+		bzero(sin6, sizeof(*sin6));
+		sin6->sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+		sin6->sin6_len = sizeof(struct sockaddr_in6);
+#endif
+		memcpy((caddr_t)&sin6->sin6_addr, v6addr->addr,
+		    sizeof(struct in6_addr));
+		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+			zero_address = 1;
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "process_asconf_set_primary: ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+		break;
+#endif
+	default:
+		m_reply = sctp_asconf_error_response(aph->correlation_id,
+		    SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *) aph,
+		    aparam_length);
+		return (m_reply);
+	}
+
+	/* if 0.0.0.0/::0, use the source address instead */
+	if (zero_address && SCTP_BASE_SYSCTL(sctp_nat_friendly)) {
+		sa = src;
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_asconf_set_primary: using source addr ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, src);
+	}
+	/* set the primary address */
+	if (sctp_set_primary_addr(stcb, sa, NULL) == 0) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_asconf_set_primary: primary address set\n");
+		/* notify upper layer */
+		sctp_ulp_notify(SCTP_NOTIFY_ASCONF_SET_PRIMARY, stcb, 0, sa, SCTP_SO_NOT_LOCKED);
+		if ((stcb->asoc.primary_destination->dest_state & SCTP_ADDR_REACHABLE) &&
+		    (!(stcb->asoc.primary_destination->dest_state & SCTP_ADDR_PF)) &&
+		    (stcb->asoc.alternate)) {
+			sctp_free_remote_addr(stcb->asoc.alternate);
+			stcb->asoc.alternate = NULL;
+		}
+		if (response_required) {
+			m_reply = sctp_asconf_success_response(aph->correlation_id);
+		}
+		/* Mobility adaptation.
+		   Ideally, when the reception of SET PRIMARY with DELETE IP
+		   ADDRESS of the previous primary destination, unacknowledged
+		   DATA are retransmitted immediately to the new primary
+		   destination for seamless handover.
+		   If the destination is UNCONFIRMED and marked to REQ_PRIM,
+		   The retransmission occur when reception of the
+		   HEARTBEAT-ACK.  (See sctp_handle_heartbeat_ack in
+		   sctp_input.c)
+		   Also, when change of the primary destination, it is better
+		   that all subsequent new DATA containing already queued DATA
+		   are transmitted to the new primary destination. (by micchie)
+		 */
+		if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
+		                                 SCTP_MOBILITY_BASE) ||
+		    sctp_is_mobility_feature_on(stcb->sctp_ep,
+		                                SCTP_MOBILITY_FASTHANDOFF)) &&
+		    sctp_is_mobility_feature_on(stcb->sctp_ep,
+		                                SCTP_MOBILITY_PRIM_DELETED) &&
+		    (stcb->asoc.primary_destination->dest_state &
+		     SCTP_ADDR_UNCONFIRMED) == 0) {
+
+			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED,
+			                stcb->sctp_ep, stcb, NULL,
+			                SCTP_FROM_SCTP_ASCONF + SCTP_LOC_1);
+			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+					SCTP_MOBILITY_FASTHANDOFF)) {
+				sctp_assoc_immediate_retrans(stcb,
+						stcb->asoc.primary_destination);
+			}
+			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+					SCTP_MOBILITY_BASE)) {
+				sctp_move_chunks_from_net(stcb,
+						stcb->asoc.deleted_primary);
+			}
+			sctp_delete_prim_timer(stcb->sctp_ep, stcb,
+						stcb->asoc.deleted_primary);
+		}
+	} else {
+		/* couldn't set the requested primary address! */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_asconf_set_primary: set primary failed!\n");
+		/* must have been an invalid address, so report */
+		m_reply = sctp_asconf_error_response(aph->correlation_id,
+		    SCTP_CAUSE_UNRESOLVABLE_ADDR, (uint8_t *) aph,
+		    aparam_length);
+	}
+
+	return (m_reply);
+}
+
+/*
+ * handles an ASCONF chunk.
+ * if all parameters are processed ok, send a plain (empty) ASCONF-ACK
+ */
+void
+sctp_handle_asconf(struct mbuf *m, unsigned int offset,
+                   struct sockaddr *src,
+		   struct sctp_asconf_chunk *cp, struct sctp_tcb *stcb,
+		   int first)
+{
+	struct sctp_association *asoc;
+	uint32_t serial_num;
+	struct mbuf *n, *m_ack, *m_result, *m_tail;
+	struct sctp_asconf_ack_chunk *ack_cp;
+	struct sctp_asconf_paramhdr *aph;
+	struct sctp_ipv6addr_param *p_addr;
+	unsigned int asconf_limit, cnt;
+	int error = 0;		/* did an error occur? */
+
+	/* asconf param buffer */
+	uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE];
+	struct sctp_asconf_ack *ack, *ack_next;
+
+	/* verify minimum length */
+	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_asconf_chunk)) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"handle_asconf: chunk too small = %xh\n",
+			ntohs(cp->ch.chunk_length));
+		return;
+	}
+	asoc = &stcb->asoc;
+	serial_num = ntohl(cp->serial_number);
+
+	if (SCTP_TSN_GE(asoc->asconf_seq_in, serial_num)) {
+		/* got a duplicate ASCONF */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"handle_asconf: got duplicate serial number = %xh\n",
+			serial_num);
+		return;
+	} else if (serial_num != (asoc->asconf_seq_in + 1)) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: incorrect serial number = %xh (expected next = %xh)\n",
+			serial_num, asoc->asconf_seq_in + 1);
+		return;
+	}
+
+	/* it's the expected "next" sequence number, so process it */
+	asoc->asconf_seq_in = serial_num;	/* update sequence */
+	/* get length of all the param's in the ASCONF */
+	asconf_limit = offset + ntohs(cp->ch.chunk_length);
+	SCTPDBG(SCTP_DEBUG_ASCONF1,
+		"handle_asconf: asconf_limit=%u, sequence=%xh\n",
+		asconf_limit, serial_num);
+
+	if (first) {
+		/* delete old cache */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,"handle_asconf: Now processing first ASCONF. Try to delete old cache\n");
+
+		TAILQ_FOREACH_SAFE(ack, &asoc->asconf_ack_sent, next, ack_next) {
+			if (ack->serial_number == serial_num)
+				break;
+			SCTPDBG(SCTP_DEBUG_ASCONF1,"handle_asconf: delete old(%u) < first(%u)\n",
+			    ack->serial_number, serial_num);
+			TAILQ_REMOVE(&asoc->asconf_ack_sent, ack, next);
+			if (ack->data != NULL) {
+				sctp_m_freem(ack->data);
+			}
+			SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), ack);
+		}
+	}
+
+	m_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_ack_chunk), 0,
+				      M_NOWAIT, 1, MT_DATA);
+	if (m_ack == NULL) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"handle_asconf: couldn't get mbuf!\n");
+		return;
+	}
+	m_tail = m_ack;		/* current reply chain's tail */
+
+	/* fill in ASCONF-ACK header */
+	ack_cp = mtod(m_ack, struct sctp_asconf_ack_chunk *);
+	ack_cp->ch.chunk_type = SCTP_ASCONF_ACK;
+	ack_cp->ch.chunk_flags = 0;
+	ack_cp->serial_number = htonl(serial_num);
+	/* set initial lengths (eg. just an ASCONF-ACK), ntohx at the end! */
+	SCTP_BUF_LEN(m_ack) = sizeof(struct sctp_asconf_ack_chunk);
+	ack_cp->ch.chunk_length = sizeof(struct sctp_asconf_ack_chunk);
+
+	/* skip the lookup address parameter */
+	offset += sizeof(struct sctp_asconf_chunk);
+	p_addr = (struct sctp_ipv6addr_param *)sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr), (uint8_t *)&aparam_buf);
+	if (p_addr == NULL) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"handle_asconf: couldn't get lookup addr!\n");
+		/* respond with a missing/invalid mandatory parameter error */
+		return;
+	}
+	/* param_length is already validated in process_control... */
+	offset += ntohs(p_addr->ph.param_length);	/* skip lookup addr */
+	/* get pointer to first asconf param in ASCONF */
+	aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, sizeof(struct sctp_asconf_paramhdr), (uint8_t *)&aparam_buf);
+	if (aph == NULL) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "Empty ASCONF received?\n");
+		goto send_reply;
+	}
+	/* process through all parameters */
+	cnt = 0;
+	while (aph != NULL) {
+		unsigned int param_length, param_type;
+
+		param_type = ntohs(aph->ph.param_type);
+		param_length = ntohs(aph->ph.param_length);
+		if (offset + param_length > asconf_limit) {
+			/* parameter goes beyond end of chunk! */
+			sctp_m_freem(m_ack);
+			return;
+		}
+		m_result = NULL;
+
+		if (param_length > sizeof(aparam_buf)) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: param length (%u) larger than buffer size!\n", param_length);
+			sctp_m_freem(m_ack);
+			return;
+		}
+		if (param_length <= sizeof(struct sctp_paramhdr)) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: param length (%u) too short\n", param_length);
+			sctp_m_freem(m_ack);
+		}
+		/* get the entire parameter */
+		aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, param_length, aparam_buf);
+		if (aph == NULL) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: couldn't get entire param\n");
+			sctp_m_freem(m_ack);
+			return;
+		}
+		switch (param_type) {
+		case SCTP_ADD_IP_ADDRESS:
+			m_result = sctp_process_asconf_add_ip(src, aph, stcb,
+			    (cnt < SCTP_BASE_SYSCTL(sctp_hb_maxburst)), error);
+			cnt++;
+			break;
+		case SCTP_DEL_IP_ADDRESS:
+			m_result = sctp_process_asconf_delete_ip(src, aph, stcb,
+			    error);
+			break;
+		case SCTP_ERROR_CAUSE_IND:
+			/* not valid in an ASCONF chunk */
+			break;
+		case SCTP_SET_PRIM_ADDR:
+			m_result = sctp_process_asconf_set_primary(src, aph,
+			    stcb, error);
+			break;
+		case SCTP_NAT_VTAGS:
+		        SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: sees a NAT VTAG state parameter\n");
+		        break;
+		case SCTP_SUCCESS_REPORT:
+			/* not valid in an ASCONF chunk */
+			break;
+		case SCTP_ULP_ADAPTATION:
+			/* FIX */
+			break;
+		default:
+			if ((param_type & 0x8000) == 0) {
+				/* Been told to STOP at this param */
+				asconf_limit = offset;
+				/*
+				 * FIX FIX - We need to call
+				 * sctp_arethere_unrecognized_parameters()
+				 * to get a operr and send it for any
+				 * param's with the 0x4000 bit set OR do it
+				 * here ourselves... note we still must STOP
+				 * if the 0x8000 bit is clear.
+				 */
+			}
+			/* unknown/invalid param type */
+			break;
+		} /* switch */
+
+		/* add any (error) result to the reply mbuf chain */
+		if (m_result != NULL) {
+			SCTP_BUF_NEXT(m_tail) = m_result;
+			m_tail = m_result;
+			/* update lengths, make sure it's aligned too */
+			SCTP_BUF_LEN(m_result) = SCTP_SIZE32(SCTP_BUF_LEN(m_result));
+			ack_cp->ch.chunk_length += SCTP_BUF_LEN(m_result);
+			/* set flag to force success reports */
+			error = 1;
+		}
+		offset += SCTP_SIZE32(param_length);
+		/* update remaining ASCONF message length to process */
+		if (offset >= asconf_limit) {
+			/* no more data in the mbuf chain */
+			break;
+		}
+		/* get pointer to next asconf param */
+		aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset,
+		    sizeof(struct sctp_asconf_paramhdr),
+		    (uint8_t *)&aparam_buf);
+		if (aph == NULL) {
+			/* can't get an asconf paramhdr */
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: can't get asconf param hdr!\n");
+			/* FIX ME - add error here... */
+		}
+	}
+
+ send_reply:
+	ack_cp->ch.chunk_length = htons(ack_cp->ch.chunk_length);
+	/* save the ASCONF-ACK reply */
+	ack = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_asconf_ack),
+	    struct sctp_asconf_ack);
+	if (ack == NULL) {
+		sctp_m_freem(m_ack);
+		return;
+	}
+	ack->serial_number = serial_num;
+	ack->last_sent_to = NULL;
+	ack->data = m_ack;
+	ack->len = 0;
+	for (n = m_ack; n != NULL; n = SCTP_BUF_NEXT(n)) {
+		ack->len += SCTP_BUF_LEN(n);
+	}
+	TAILQ_INSERT_TAIL(&stcb->asoc.asconf_ack_sent, ack, next);
+
+	/* see if last_control_chunk_from is set properly (use IP src addr) */
+	if (stcb->asoc.last_control_chunk_from == NULL) {
+		/*
+		 * this could happen if the source address was just newly
+		 * added
+		 */
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: looking up net for IP source address\n");
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "Looking for IP source: ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, src);
+		/* look up the from address */
+		stcb->asoc.last_control_chunk_from = sctp_findnet(stcb, src);
+#ifdef SCTP_DEBUG
+		if (stcb->asoc.last_control_chunk_from == NULL) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf: IP source address not found?!\n");
+		}
+#endif
+	}
+}
+
+/*
+ * does the address match? returns 0 if not, 1 if so
+ */
+static uint32_t
+sctp_asconf_addr_match(struct sctp_asconf_addr *aa, struct sockaddr *sa)
+{
+	switch (sa->sa_family) {
+#ifdef INET6
+	case AF_INET6:
+	{
+		/* XXX scopeid */
+		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
+
+		if ((aa->ap.addrp.ph.param_type == SCTP_IPV6_ADDRESS) &&
+		    (memcmp(&aa->ap.addrp.addr, &sin6->sin6_addr,
+		    sizeof(struct in6_addr)) == 0)) {
+			return (1);
+		}
+		break;
+	}
+#endif
+#ifdef INET
+	case AF_INET:
+	{
+		struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+
+		if ((aa->ap.addrp.ph.param_type == SCTP_IPV4_ADDRESS) &&
+		    (memcmp(&aa->ap.addrp.addr, &sin->sin_addr,
+		    sizeof(struct in_addr)) == 0)) {
+			return (1);
+		}
+		break;
+	}
+#endif
+	default:
+		break;
+	}
+	return (0);
+}
+
+/*
+ * does the address match? returns 0 if not, 1 if so
+ */
+static uint32_t
+sctp_addr_match(struct sctp_paramhdr *ph, struct sockaddr *sa)
+{
+#if defined(INET) || defined(INET6)
+	uint16_t param_type, param_length;
+
+	param_type = ntohs(ph->param_type);
+	param_length = ntohs(ph->param_length);
+#endif
+	switch (sa->sa_family) {
+#ifdef INET6
+	case AF_INET6:
+	{
+		/* XXX scopeid */
+		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa;
+		struct sctp_ipv6addr_param *v6addr;
+
+		v6addr = (struct sctp_ipv6addr_param *)ph;
+		if ((param_type == SCTP_IPV6_ADDRESS) &&
+		    (param_length == sizeof(struct sctp_ipv6addr_param)) &&
+		    (memcmp(&v6addr->addr, &sin6->sin6_addr,
+		    sizeof(struct in6_addr)) == 0)) {
+			return (1);
+		}
+		break;
+	}
+#endif
+#ifdef INET
+	case AF_INET:
+	{
+		struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+		struct sctp_ipv4addr_param *v4addr;
+
+		v4addr = (struct sctp_ipv4addr_param *)ph;
+		if ((param_type == SCTP_IPV4_ADDRESS) &&
+		    (param_length == sizeof(struct sctp_ipv4addr_param)) &&
+		    (memcmp(&v4addr->addr, &sin->sin_addr,
+		    sizeof(struct in_addr)) == 0)) {
+			return (1);
+		}
+		break;
+	}
+#endif
+	default:
+		break;
+	}
+	return (0);
+}
+/*
+ * Cleanup for non-responded/OP ERR'd ASCONF
+ */
+void
+sctp_asconf_cleanup(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	/*
+	 * clear out any existing asconfs going out
+	 */
+	sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net,
+			SCTP_FROM_SCTP_ASCONF + SCTP_LOC_2);
+	stcb->asoc.asconf_seq_out_acked = stcb->asoc.asconf_seq_out;
+	/* remove the old ASCONF on our outbound queue */
+	sctp_toss_old_asconf(stcb);
+}
+
+/*
+ * cleanup any cached source addresses that may be topologically
+ * incorrect after a new address has been added to this interface.
+ */
+static void
+sctp_asconf_nets_cleanup(struct sctp_tcb *stcb, struct sctp_ifn *ifn)
+{
+	struct sctp_nets *net;
+
+	/*
+	 * Ideally, we want to only clear cached routes and source addresses
+	 * that are topologically incorrect.  But since there is no easy way
+	 * to know whether the newly added address on the ifn would cause a
+	 * routing change (i.e. a new egress interface would be chosen)
+	 * without doing a new routing lookup and source address selection,
+	 * we will (for now) just flush any cached route using a different
+	 * ifn (and cached source addrs) and let output re-choose them during
+	 * the next send on that net.
+	 */
+	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+		/*
+		 * clear any cached route (and cached source address) if the
+		 * route's interface is NOT the same as the address change.
+		 * If it's the same interface, just clear the cached source
+		 * address.
+		 */
+		if (SCTP_ROUTE_HAS_VALID_IFN(&net->ro) &&
+		    ((ifn == NULL) ||
+		     (SCTP_GET_IF_INDEX_FROM_ROUTE(&net->ro) != ifn->ifn_index))) {
+			/* clear any cached route */
+			RTFREE(net->ro.ro_rt);
+			net->ro.ro_rt = NULL;
+		}
+		/* clear any cached source address */
+		if (net->src_addr_selected) {
+			sctp_free_ifa(net->ro._s_addr);
+			net->ro._s_addr = NULL;
+			net->src_addr_selected = 0;
+		}
+	}
+}
+
+
+void
+sctp_assoc_immediate_retrans(struct sctp_tcb *stcb, struct sctp_nets *dstnet)
+{
+	int error;
+
+	if (dstnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
+		return;
+	}
+	if (stcb->asoc.deleted_primary == NULL) {
+		return;
+	}
+
+	if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "assoc_immediate_retrans: Deleted primary is ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa);
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "Current Primary is ");
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.primary_destination->ro._l_addr.sa);
+		sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb,
+				stcb->asoc.deleted_primary,
+				SCTP_FROM_SCTP_ASCONF + SCTP_LOC_3);
+		stcb->asoc.num_send_timers_up--;
+		if (stcb->asoc.num_send_timers_up < 0) {
+			stcb->asoc.num_send_timers_up = 0;
+		}
+		SCTP_TCB_LOCK_ASSERT(stcb);
+		error = sctp_t3rxt_timer(stcb->sctp_ep, stcb,
+					stcb->asoc.deleted_primary);
+		if (error) {
+			SCTP_INP_DECR_REF(stcb->sctp_ep);
+			return;
+		}
+		SCTP_TCB_LOCK_ASSERT(stcb);
+#ifdef SCTP_AUDITING_ENABLED
+		sctp_auditing(4, stcb->sctp_ep, stcb, stcb->asoc.deleted_primary);
+#endif
+		sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+		if ((stcb->asoc.num_send_timers_up == 0) &&
+		    (stcb->asoc.sent_queue_cnt > 0)) {
+			struct sctp_tmit_chunk *chk;
+
+			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
+			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+					 stcb, chk->whoTo);
+		}
+	}
+	return;
+}
+
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
+static int
+sctp_asconf_queue_mgmt(struct sctp_tcb *, struct sctp_ifa *, uint16_t);
+
+void
+sctp_net_immediate_retrans(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	struct sctp_tmit_chunk *chk;
+
+	SCTPDBG(SCTP_DEBUG_ASCONF1, "net_immediate_retrans: RTO is %d\n", net->RTO);
+	sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net,
+	                SCTP_FROM_SCTP_ASCONF + SCTP_LOC_4);
+	stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net);
+	net->error_count = 0;
+	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+		if (chk->whoTo == net) {
+			if (chk->sent < SCTP_DATAGRAM_RESEND) {
+				chk->sent = SCTP_DATAGRAM_RESEND;
+				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+				sctp_flight_size_decrease(chk);
+				sctp_total_flight_decrease(stcb, chk);
+				net->marked_retrans++;
+				stcb->asoc.marked_retrans++;
+			}
+		}
+	}
+	if (net->marked_retrans) {
+		sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+	}
+}
+
+static void
+sctp_path_check_and_react(struct sctp_tcb *stcb, struct sctp_ifa *newifa)
+{
+	struct sctp_nets *net;
+	int addrnum, changed;
+
+	/*   If number of local valid addresses is 1, the valid address is
+	     probably newly added address.
+	     Several valid addresses in this association.  A source address
+	     may not be changed.  Additionally, they can be configured on a
+	     same interface as "alias" addresses.  (by micchie)
+	 */
+	addrnum = sctp_local_addr_count(stcb);
+	SCTPDBG(SCTP_DEBUG_ASCONF1, "p_check_react(): %d local addresses\n",
+		addrnum);
+	if (addrnum == 1) {
+		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+			/* clear any cached route and source address */
+			if (net->ro.ro_rt) {
+				RTFREE(net->ro.ro_rt);
+				net->ro.ro_rt = NULL;
+			}
+			if (net->src_addr_selected) {
+				sctp_free_ifa(net->ro._s_addr);
+				net->ro._s_addr = NULL;
+				net->src_addr_selected = 0;
+			}
+			/* Retransmit unacknowledged DATA chunks immediately */
+			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+			                                SCTP_MOBILITY_FASTHANDOFF)) {
+				sctp_net_immediate_retrans(stcb, net);
+			}
+			/* also, SET PRIMARY is maybe already sent */
+		}
+		return;
+	}
+
+	/* Multiple local addresses exsist in the association.  */
+	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+		/* clear any cached route and source address */
+		if (net->ro.ro_rt) {
+			RTFREE(net->ro.ro_rt);
+			net->ro.ro_rt = NULL;
+		}
+		if (net->src_addr_selected) {
+			sctp_free_ifa(net->ro._s_addr);
+			net->ro._s_addr = NULL;
+			net->src_addr_selected = 0;
+		}
+		/* Check if the nexthop is corresponding to the new address.
+		   If the new address is corresponding to the current nexthop,
+		   the path will be changed.
+		   If the new address is NOT corresponding to the current
+		   nexthop, the path will not be changed.
+		 */
+		SCTP_RTALLOC((sctp_route_t *)&net->ro,
+			     stcb->sctp_ep->def_vrf_id,
+			     stcb->sctp_ep->fibnum);
+		if (net->ro.ro_rt == NULL)
+			continue;
+
+		changed = 0;
+		switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+		case AF_INET:
+			if (sctp_v4src_match_nexthop(newifa, (sctp_route_t *)&net->ro)) {
+				changed = 1;
+			}
+			break;
+#endif
+#ifdef INET6
+		case AF_INET6:
+			if (sctp_v6src_match_nexthop(
+			    &newifa->address.sin6, (sctp_route_t *)&net->ro)) {
+				changed = 1;
+			}
+			break;
+#endif
+		default:
+			break;
+		}
+		/* if the newly added address does not relate routing
+		   information, we skip.
+		 */
+		if (changed == 0)
+			continue;
+		/* Retransmit unacknowledged DATA chunks immediately */
+		if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+		                                SCTP_MOBILITY_FASTHANDOFF)) {
+			sctp_net_immediate_retrans(stcb, net);
+		}
+		/* Send SET PRIMARY for this new address */
+		if (net == stcb->asoc.primary_destination) {
+			(void)sctp_asconf_queue_mgmt(stcb, newifa,
+						     SCTP_SET_PRIM_ADDR);
+		}
+	}
+}
+#endif /* __FreeBSD__  __APPLE__  __Userspace__ */
+
+/*
+ * process an ADD/DELETE IP ack from peer.
+ * addr: corresponding sctp_ifa to the address being added/deleted.
+ * type: SCTP_ADD_IP_ADDRESS or SCTP_DEL_IP_ADDRESS.
+ * flag: 1=success, 0=failure.
+ */
+static void
+sctp_asconf_addr_mgmt_ack(struct sctp_tcb *stcb, struct sctp_ifa *addr, uint32_t flag)
+{
+	/*
+	 * do the necessary asoc list work- if we get a failure indication,
+	 * leave the address on the assoc's restricted list.  If we get a
+	 * success indication, remove the address from the restricted list.
+	 */
+	/*
+	 * Note: this will only occur for ADD_IP_ADDRESS, since
+	 * DEL_IP_ADDRESS is never actually added to the list...
+	 */
+	if (flag) {
+		/* success case, so remove from the restricted list */
+		sctp_del_local_addr_restricted(stcb, addr);
+
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
+		if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+		                                SCTP_MOBILITY_BASE) ||
+		    sctp_is_mobility_feature_on(stcb->sctp_ep,
+		                                SCTP_MOBILITY_FASTHANDOFF)) {
+			sctp_path_check_and_react(stcb, addr);
+			return;
+		}
+#endif /* __FreeBSD__ __APPLE__ __Userspace__ */
+		/* clear any cached/topologically incorrect source addresses */
+		sctp_asconf_nets_cleanup(stcb, addr->ifn_p);
+	}
+	/* else, leave it on the list */
+}
+
+/*
+ * add an asconf add/delete/set primary IP address parameter to the queue.
+ * type = SCTP_ADD_IP_ADDRESS, SCTP_DEL_IP_ADDRESS, SCTP_SET_PRIM_ADDR.
+ * returns 0 if queued, -1 if not queued/removed.
+ * NOTE: if adding, but a delete for the same address is already scheduled
+ * (and not yet sent out), simply remove it from queue.  Same for deleting
+ * an address already scheduled for add.  If a duplicate operation is found,
+ * ignore the new one.
+ */
+static int
+sctp_asconf_queue_mgmt(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
+		       uint16_t type)
+{
+	struct sctp_asconf_addr *aa, *aa_next;
+
+	/* make sure the request isn't already in the queue */
+	TAILQ_FOREACH_SAFE(aa, &stcb->asoc.asconf_queue, next, aa_next) {
+		/* address match? */
+		if (sctp_asconf_addr_match(aa, &ifa->address.sa) == 0)
+			continue;
+		/* is the request already in queue but not sent?
+		 * pass the request already sent in order to resolve the following case:
+		 *  1. arrival of ADD, then sent
+		 *  2. arrival of DEL. we can't remove the ADD request already sent
+		 *  3. arrival of ADD
+		 */
+		if (aa->ap.aph.ph.param_type == type && aa->sent == 0) {
+			return (-1);
+		}
+		/* is the negative request already in queue, and not sent */
+		if ((aa->sent == 0) && (type == SCTP_ADD_IP_ADDRESS) &&
+		    (aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS)) {
+			/* add requested, delete already queued */
+			TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next);
+			/* remove the ifa from the restricted list */
+			sctp_del_local_addr_restricted(stcb, ifa);
+			/* free the asconf param */
+			SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+			SCTPDBG(SCTP_DEBUG_ASCONF2, "asconf_queue_mgmt: add removes queued entry\n");
+			return (-1);
+		}
+		if ((aa->sent == 0) && (type == SCTP_DEL_IP_ADDRESS) &&
+		    (aa->ap.aph.ph.param_type == SCTP_ADD_IP_ADDRESS)) {
+			/* delete requested, add already queued */
+			TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next);
+			/* remove the aa->ifa from the restricted list */
+			sctp_del_local_addr_restricted(stcb, aa->ifa);
+			/* free the asconf param */
+			SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+			SCTPDBG(SCTP_DEBUG_ASCONF2, "asconf_queue_mgmt: delete removes queued entry\n");
+			return (-1);
+		}
+	} /* for each aa */
+
+	/* adding new request to the queue */
+	SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+		    SCTP_M_ASC_ADDR);
+	if (aa == NULL) {
+		/* didn't get memory */
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "asconf_queue_mgmt: failed to get memory!\n");
+		return (-1);
+	}
+	aa->special_del = 0;
+	/* fill in asconf address parameter fields */
+	/* top level elements are "networked" during send */
+	aa->ap.aph.ph.param_type = type;
+	aa->ifa = ifa;
+	atomic_add_int(&ifa->refcount, 1);
+	/* correlation_id filled in during send routine later... */
+	switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+	case AF_INET6:
+	{
+		struct sockaddr_in6 *sin6;
+
+		sin6 = &ifa->address.sin6;
+		aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+		aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv6addr_param));
+		aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) +
+		    sizeof(struct sctp_ipv6addr_param);
+		memcpy(&aa->ap.addrp.addr, &sin6->sin6_addr,
+		       sizeof(struct in6_addr));
+		break;
+	}
+#endif
+#ifdef INET
+	case AF_INET:
+	{
+		struct sockaddr_in *sin;
+
+		sin = &ifa->address.sin;
+		aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+		aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv4addr_param));
+		aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) +
+		    sizeof(struct sctp_ipv4addr_param);
+		memcpy(&aa->ap.addrp.addr, &sin->sin_addr,
+		       sizeof(struct in_addr));
+		break;
+	}
+#endif
+	default:
+		/* invalid family! */
+		SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+		sctp_free_ifa(ifa);
+		return (-1);
+	}
+	aa->sent = 0;		/* clear sent flag */
+
+	TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+#ifdef SCTP_DEBUG
+	if (SCTP_BASE_SYSCTL(sctp_debug_on) & SCTP_DEBUG_ASCONF2) {
+		if (type == SCTP_ADD_IP_ADDRESS) {
+			SCTP_PRINTF("asconf_queue_mgmt: inserted asconf ADD_IP_ADDRESS: ");
+			SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, &ifa->address.sa);
+		} else if (type == SCTP_DEL_IP_ADDRESS) {
+			SCTP_PRINTF("asconf_queue_mgmt: appended asconf DEL_IP_ADDRESS: ");
+			SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, &ifa->address.sa);
+		} else {
+			SCTP_PRINTF("asconf_queue_mgmt: appended asconf SET_PRIM_ADDR: ");
+			SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, &ifa->address.sa);
+		}
+	}
+#endif
+
+	return (0);
+}
+
+
+/*
+ * add an asconf operation for the given ifa and type.
+ * type = SCTP_ADD_IP_ADDRESS, SCTP_DEL_IP_ADDRESS, SCTP_SET_PRIM_ADDR.
+ * returns 0 if completed, -1 if not completed, 1 if immediate send is
+ * advisable.
+ */
+static int
+sctp_asconf_queue_add(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
+		      uint16_t type)
+{
+	uint32_t status;
+	int pending_delete_queued = 0;
+	int last;
+
+	/* see if peer supports ASCONF */
+	if (stcb->asoc.asconf_supported == 0) {
+		return (-1);
+	}
+
+	/*
+	 * if this is deleting the last address from the assoc, mark it as
+	 * pending.
+	 */
+	if ((type == SCTP_DEL_IP_ADDRESS) && !stcb->asoc.asconf_del_pending) {
+		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+			last = (sctp_local_addr_count(stcb) == 0);
+		} else {
+			last = (sctp_local_addr_count(stcb) == 1);
+		}
+		if (last) {
+			/* set the pending delete info only */
+			stcb->asoc.asconf_del_pending = 1;
+			stcb->asoc.asconf_addr_del_pending = ifa;
+			atomic_add_int(&ifa->refcount, 1);
+			SCTPDBG(SCTP_DEBUG_ASCONF2,
+				"asconf_queue_add: mark delete last address pending\n");
+			return (-1);
+		}
+	}
+
+	/* queue an asconf parameter */
+	status = sctp_asconf_queue_mgmt(stcb, ifa, type);
+
+	/*
+	 * if this is an add, and there is a delete also pending (i.e. the
+	 * last local address is being changed), queue the pending delete too.
+	 */
+	if ((type == SCTP_ADD_IP_ADDRESS) && stcb->asoc.asconf_del_pending && (status == 0)) {
+		/* queue in the pending delete */
+		if (sctp_asconf_queue_mgmt(stcb,
+					   stcb->asoc.asconf_addr_del_pending,
+					   SCTP_DEL_IP_ADDRESS) == 0) {
+			SCTPDBG(SCTP_DEBUG_ASCONF2, "asconf_queue_add: queing pending delete\n");
+			pending_delete_queued = 1;
+			/* clear out the pending delete info */
+			stcb->asoc.asconf_del_pending = 0;
+			sctp_free_ifa(stcb->asoc.asconf_addr_del_pending);
+			stcb->asoc.asconf_addr_del_pending = NULL;
+		}
+	}
+
+	if (pending_delete_queued) {
+		struct sctp_nets *net;
+		/*
+		 * since we know that the only/last address is now being
+		 * changed in this case, reset the cwnd/rto on all nets to
+		 * start as a new address and path.  Also clear the error
+		 * counts to give the assoc the best chance to complete the
+		 * address change.
+		 */
+		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+			stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb,
+									  net);
+			net->RTO = 0;
+			net->error_count = 0;
+		}
+		stcb->asoc.overall_error_count = 0;
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+				       stcb->asoc.overall_error_count,
+				       0,
+				       SCTP_FROM_SCTP_ASCONF,
+				       __LINE__);
+		}
+
+		/* queue in an advisory set primary too */
+		(void)sctp_asconf_queue_mgmt(stcb, ifa, SCTP_SET_PRIM_ADDR);
+		/* let caller know we should send this out immediately */
+		status = 1;
+	}
+	return (status);
+}
+
+/*-
+ * add an asconf delete IP address parameter to the queue by sockaddr and
+ * possibly with no sctp_ifa available.  This is only called by the routine
+ * that checks the addresses in an INIT-ACK against the current address list.
+ * returns 0 if completed, non-zero if not completed.
+ * NOTE: if an add is already scheduled (and not yet sent out), simply
+ * remove it from queue.  If a duplicate operation is found, ignore the
+ * new one.
+ */
+static int
+sctp_asconf_queue_sa_delete(struct sctp_tcb *stcb, struct sockaddr *sa)
+{
+	struct sctp_ifa *ifa;
+	struct sctp_asconf_addr *aa, *aa_next;
+
+	if (stcb == NULL) {
+		return (-1);
+	}
+	/* see if peer supports ASCONF */
+	if (stcb->asoc.asconf_supported == 0) {
+		return (-1);
+	}
+	/* make sure the request isn't already in the queue */
+	TAILQ_FOREACH_SAFE(aa, &stcb->asoc.asconf_queue, next, aa_next) {
+		/* address match? */
+		if (sctp_asconf_addr_match(aa, sa) == 0)
+			continue;
+		/* is the request already in queue (sent or not) */
+		if (aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS) {
+			return (-1);
+		}
+		/* is the negative request already in queue, and not sent */
+		if (aa->sent == 1)
+			continue;
+		if (aa->ap.aph.ph.param_type == SCTP_ADD_IP_ADDRESS) {
+			/* add already queued, so remove existing entry */
+			TAILQ_REMOVE(&stcb->asoc.asconf_queue, aa, next);
+			sctp_del_local_addr_restricted(stcb, aa->ifa);
+			/* free the entry */
+			SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+			return (-1);
+		}
+	} /* for each aa */
+
+	/* find any existing ifa-- NOTE ifa CAN be allowed to be NULL */
+	ifa = sctp_find_ifa_by_addr(sa, stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED);
+
+	/* adding new request to the queue */
+	SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+		    SCTP_M_ASC_ADDR);
+	if (aa == NULL) {
+		/* didn't get memory */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"sctp_asconf_queue_sa_delete: failed to get memory!\n");
+		return (-1);
+	}
+	aa->special_del = 0;
+	/* fill in asconf address parameter fields */
+	/* top level elements are "networked" during send */
+	aa->ap.aph.ph.param_type = SCTP_DEL_IP_ADDRESS;
+	aa->ifa = ifa;
+	if (ifa)
+		atomic_add_int(&ifa->refcount, 1);
+	/* correlation_id filled in during send routine later... */
+	switch (sa->sa_family) {
+#ifdef INET6
+	case AF_INET6:
+	{
+		/* IPv6 address */
+		struct sockaddr_in6 *sin6;
+
+		sin6 = (struct sockaddr_in6 *)sa;
+		aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+		aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv6addr_param));
+		aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) + sizeof(struct sctp_ipv6addr_param);
+		memcpy(&aa->ap.addrp.addr, &sin6->sin6_addr,
+		    sizeof(struct in6_addr));
+		break;
+	}
+#endif
+#ifdef INET
+	case AF_INET:
+	{
+		/* IPv4 address */
+		struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+
+		aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+		aa->ap.addrp.ph.param_length = (sizeof(struct sctp_ipv4addr_param));
+		aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_paramhdr) + sizeof(struct sctp_ipv4addr_param);
+		memcpy(&aa->ap.addrp.addr, &sin->sin_addr,
+		    sizeof(struct in_addr));
+		break;
+	}
+#endif
+	default:
+		/* invalid family! */
+		SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+		if (ifa)
+			sctp_free_ifa(ifa);
+		return (-1);
+	}
+	aa->sent = 0;		/* clear sent flag */
+
+	/* delete goes to the back of the queue */
+	TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+
+	/* sa_ignore MEMLEAK {memory is put on the tailq} */
+	return (0);
+}
+
+/*
+ * find a specific asconf param on our "sent" queue
+ */
+static struct sctp_asconf_addr *
+sctp_asconf_find_param(struct sctp_tcb *stcb, uint32_t correlation_id)
+{
+	struct sctp_asconf_addr *aa;
+
+	TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) {
+		if (aa->ap.aph.correlation_id == correlation_id &&
+		    aa->sent == 1) {
+			/* found it */
+			return (aa);
+		}
+	}
+	/* didn't find it */
+	return (NULL);
+}
+
+/*
+ * process an SCTP_ERROR_CAUSE_IND for a ASCONF-ACK parameter and do
+ * notifications based on the error response
+ */
+static void
+sctp_asconf_process_error(struct sctp_tcb *stcb SCTP_UNUSED,
+			  struct sctp_asconf_paramhdr *aph)
+{
+	struct sctp_error_cause *eh;
+	struct sctp_paramhdr *ph;
+	uint16_t param_type;
+	uint16_t error_code;
+
+	eh = (struct sctp_error_cause *)(aph + 1);
+	ph = (struct sctp_paramhdr *)(eh + 1);
+	/* validate lengths */
+	if (htons(eh->length) + sizeof(struct sctp_error_cause) >
+	    htons(aph->ph.param_length)) {
+		/* invalid error cause length */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"asconf_process_error: cause element too long\n");
+		return;
+	}
+	if (htons(ph->param_length) + sizeof(struct sctp_paramhdr) >
+	    htons(eh->length)) {
+		/* invalid included TLV length */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"asconf_process_error: included TLV too long\n");
+		return;
+	}
+	/* which error code ? */
+	error_code = ntohs(eh->code);
+	param_type = ntohs(aph->ph.param_type);
+	/* FIX: this should go back up the REMOTE_ERROR ULP notify */
+	switch (error_code) {
+	case SCTP_CAUSE_RESOURCE_SHORTAGE:
+		/* we allow ourselves to "try again" for this error */
+		break;
+	default:
+		/* peer can't handle it... */
+		switch (param_type) {
+		case SCTP_ADD_IP_ADDRESS:
+		case SCTP_DEL_IP_ADDRESS:
+		case SCTP_SET_PRIM_ADDR:
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * process an asconf queue param.
+ * aparam: parameter to process, will be removed from the queue.
+ * flag: 1=success case, 0=failure case
+ */
+static void
+sctp_asconf_process_param_ack(struct sctp_tcb *stcb,
+			      struct sctp_asconf_addr *aparam, uint32_t flag)
+{
+	uint16_t param_type;
+
+	/* process this param */
+	param_type = aparam->ap.aph.ph.param_type;
+	switch (param_type) {
+	case SCTP_ADD_IP_ADDRESS:
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_param_ack: added IP address\n");
+		sctp_asconf_addr_mgmt_ack(stcb, aparam->ifa, flag);
+		break;
+	case SCTP_DEL_IP_ADDRESS:
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_param_ack: deleted IP address\n");
+		/* nothing really to do... lists already updated */
+		break;
+	case SCTP_SET_PRIM_ADDR:
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"process_param_ack: set primary IP address\n");
+		/* nothing to do... peer may start using this addr */
+		break;
+	default:
+		/* should NEVER happen */
+		break;
+	}
+
+	/* remove the param and free it */
+	TAILQ_REMOVE(&stcb->asoc.asconf_queue, aparam, next);
+	if (aparam->ifa)
+		sctp_free_ifa(aparam->ifa);
+	SCTP_FREE(aparam, SCTP_M_ASC_ADDR);
+}
+
+/*
+ * cleanup from a bad asconf ack parameter
+ */
+static void
+sctp_asconf_ack_clear(struct sctp_tcb *stcb SCTP_UNUSED)
+{
+	/* assume peer doesn't really know how to do asconfs */
+	/* XXX we could free the pending queue here */
+
+}
+
+void
+sctp_handle_asconf_ack(struct mbuf *m, int offset,
+		       struct sctp_asconf_ack_chunk *cp, struct sctp_tcb *stcb,
+		       struct sctp_nets *net, int *abort_no_unlock)
+{
+	struct sctp_association *asoc;
+	uint32_t serial_num;
+	uint16_t ack_length;
+	struct sctp_asconf_paramhdr *aph;
+	struct sctp_asconf_addr *aa, *aa_next;
+	uint32_t last_error_id = 0;	/* last error correlation id */
+	uint32_t id;
+	struct sctp_asconf_addr *ap;
+
+	/* asconf param buffer */
+	uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE];
+
+	/* verify minimum length */
+	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_asconf_ack_chunk)) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"handle_asconf_ack: chunk too small = %xh\n",
+			ntohs(cp->ch.chunk_length));
+		return;
+	}
+	asoc = &stcb->asoc;
+	serial_num = ntohl(cp->serial_number);
+
+	/*
+	 * NOTE: we may want to handle this differently- currently, we will
+	 * abort when we get an ack for the expected serial number + 1 (eg.
+	 * we didn't send it), process an ack normally if it is the expected
+	 * serial number, and re-send the previous ack for *ALL* other
+	 * serial numbers
+	 */
+
+	/*
+	 * if the serial number is the next expected, but I didn't send it,
+	 * abort the asoc, since someone probably just hijacked us...
+	 */
+	if (serial_num == (asoc->asconf_seq_out + 1)) {
+		struct mbuf *op_err;
+		char msg[SCTP_DIAG_INFO_LEN];
+
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf_ack: got unexpected next serial number! Aborting asoc!\n");
+		snprintf(msg, sizeof(msg), "Never sent serial number %8.8x",
+			 serial_num);
+		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+		*abort_no_unlock = 1;
+		return;
+	}
+	if (serial_num != asoc->asconf_seq_out_acked + 1) {
+		/* got a duplicate/unexpected ASCONF-ACK */
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "handle_asconf_ack: got duplicate/unexpected serial number = %xh (expected = %xh)\n",
+			serial_num, asoc->asconf_seq_out_acked + 1);
+		return;
+	}
+
+	if (serial_num == asoc->asconf_seq_out - 1) {
+		/* stop our timer */
+		sctp_timer_stop(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep, stcb, net,
+				SCTP_FROM_SCTP_ASCONF + SCTP_LOC_5);
+	}
+
+	/* process the ASCONF-ACK contents */
+	ack_length = ntohs(cp->ch.chunk_length) -
+	    sizeof(struct sctp_asconf_ack_chunk);
+	offset += sizeof(struct sctp_asconf_ack_chunk);
+	/* process through all parameters */
+	while (ack_length >= sizeof(struct sctp_asconf_paramhdr)) {
+		unsigned int param_length, param_type;
+
+		/* get pointer to next asconf parameter */
+		aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset,
+		    sizeof(struct sctp_asconf_paramhdr), aparam_buf);
+		if (aph == NULL) {
+			/* can't get an asconf paramhdr */
+			sctp_asconf_ack_clear(stcb);
+			return;
+		}
+		param_type = ntohs(aph->ph.param_type);
+		param_length = ntohs(aph->ph.param_length);
+		if (param_length > ack_length) {
+			sctp_asconf_ack_clear(stcb);
+			return;
+		}
+		if (param_length < sizeof(struct sctp_paramhdr)) {
+			sctp_asconf_ack_clear(stcb);
+			return;
+		}
+		/* get the complete parameter... */
+		if (param_length > sizeof(aparam_buf)) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1,
+				"param length (%u) larger than buffer size!\n", param_length);
+			sctp_asconf_ack_clear(stcb);
+			return;
+		}
+		aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(m, offset, param_length, aparam_buf);
+		if (aph == NULL) {
+			sctp_asconf_ack_clear(stcb);
+			return;
+		}
+		/* correlation_id is transparent to peer, no ntohl needed */
+		id = aph->correlation_id;
+
+		switch (param_type) {
+		case SCTP_ERROR_CAUSE_IND:
+			last_error_id = id;
+			/* find the corresponding asconf param in our queue */
+			ap = sctp_asconf_find_param(stcb, id);
+			if (ap == NULL) {
+				/* hmm... can't find this in our queue! */
+				break;
+			}
+			/* process the parameter, failed flag */
+			sctp_asconf_process_param_ack(stcb, ap, 0);
+			/* process the error response */
+			sctp_asconf_process_error(stcb, aph);
+			break;
+		case SCTP_SUCCESS_REPORT:
+			/* find the corresponding asconf param in our queue */
+			ap = sctp_asconf_find_param(stcb, id);
+			if (ap == NULL) {
+				/* hmm... can't find this in our queue! */
+				break;
+			}
+			/* process the parameter, success flag */
+			sctp_asconf_process_param_ack(stcb, ap, 1);
+			break;
+		default:
+			break;
+		}		/* switch */
+
+		/* update remaining ASCONF-ACK message length to process */
+		ack_length -= SCTP_SIZE32(param_length);
+		if (ack_length <= 0) {
+			/* no more data in the mbuf chain */
+			break;
+		}
+		offset += SCTP_SIZE32(param_length);
+	} /* while */
+
+	/*
+	 * if there are any "sent" params still on the queue, these are
+	 * implicitly "success", or "failed" (if we got an error back) ...
+	 * so process these appropriately
+	 *
+	 * we assume that the correlation_id's are monotonically increasing
+	 * beginning from 1 and that we don't have *that* many outstanding
+	 * at any given time
+	 */
+	if (last_error_id == 0)
+		last_error_id--;	/* set to "max" value */
+	TAILQ_FOREACH_SAFE(aa, &stcb->asoc.asconf_queue, next, aa_next) {
+		if (aa->sent == 1) {
+			/*
+			 * implicitly successful or failed if correlation_id
+			 * < last_error_id, then success else, failure
+			 */
+			if (aa->ap.aph.correlation_id < last_error_id)
+				sctp_asconf_process_param_ack(stcb, aa, 1);
+			else
+				sctp_asconf_process_param_ack(stcb, aa, 0);
+		} else {
+			/*
+			 * since we always process in order (FIFO queue) if
+			 * we reach one that hasn't been sent, the rest
+			 * should not have been sent either. so, we're
+			 * done...
+			 */
+			break;
+		}
+	}
+
+	/* update the next sequence number to use */
+	asoc->asconf_seq_out_acked++;
+	/* remove the old ASCONF on our outbound queue */
+	sctp_toss_old_asconf(stcb);
+	if (!TAILQ_EMPTY(&stcb->asoc.asconf_queue)) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+		/* we have more params, so restart our timer */
+		sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, stcb->sctp_ep,
+				 stcb, net);
+#else
+		/* we have more params, so send out more */
+		sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED);
+#endif
+	}
+}
+
+#ifdef INET6
+static uint32_t
+sctp_is_scopeid_in_nets(struct sctp_tcb *stcb, struct sockaddr *sa)
+{
+	struct sockaddr_in6 *sin6, *net6;
+	struct sctp_nets *net;
+
+	if (sa->sa_family != AF_INET6) {
+		/* wrong family */
+		return (0);
+	}
+	sin6 = (struct sockaddr_in6 *)sa;
+	if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) == 0) {
+		/* not link local address */
+		return (0);
+	}
+	/* hunt through our destination nets list for this scope_id */
+	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+		if (((struct sockaddr *)(&net->ro._l_addr))->sa_family !=
+		    AF_INET6)
+			continue;
+		net6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+		if (IN6_IS_ADDR_LINKLOCAL(&net6->sin6_addr) == 0)
+			continue;
+		if (sctp_is_same_scope(sin6, net6)) {
+			/* found one */
+			return (1);
+		}
+	}
+	/* didn't find one */
+	return (0);
+}
+#endif
+
+/*
+ * address management functions
+ */
+static void
+sctp_addr_mgmt_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+		     struct sctp_ifa *ifa, uint16_t type, int addr_locked)
+{
+	int status;
+
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0 ||
+	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
+		/* subset bound, no ASCONF allowed case, so ignore */
+		return;
+	}
+	/*
+	 * note: we know this is not the subset bound, no ASCONF case eg.
+	 * this is boundall or subset bound w/ASCONF allowed
+	 */
+
+	/* first, make sure that the address is IPv4 or IPv6 and not jailed */
+	switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+	case AF_INET6:
+#if defined(__FreeBSD__)
+		if (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+		                     &ifa->address.sin6.sin6_addr) != 0) {
+			return;
+		}
+#endif
+		break;
+#endif
+#ifdef INET
+	case AF_INET:
+#if defined(__FreeBSD__)
+		if (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+		                     &ifa->address.sin.sin_addr) != 0) {
+			return;
+		}
+#endif
+		break;
+#endif
+	default:
+		return;
+	}
+#ifdef INET6
+	/* make sure we're "allowed" to add this type of addr */
+	if (ifa->address.sa.sa_family == AF_INET6) {
+		/* invalid if we're not a v6 endpoint */
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0)
+			return;
+		/* is the v6 addr really valid ? */
+		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+			return;
+		}
+	}
+#endif
+	/* put this address on the "pending/do not use yet" list */
+	sctp_add_local_addr_restricted(stcb, ifa);
+	/*
+	 * check address scope if address is out of scope, don't queue
+	 * anything... note: this would leave the address on both inp and
+	 * asoc lists
+	 */
+	switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+	case AF_INET6:
+	{
+		struct sockaddr_in6 *sin6;
+
+		sin6 = &ifa->address.sin6;
+		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+			/* we skip unspecifed addresses */
+			return;
+		}
+		if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+			if (stcb->asoc.scope.local_scope == 0) {
+				return;
+			}
+			/* is it the right link local scope? */
+			if (sctp_is_scopeid_in_nets(stcb, &ifa->address.sa) == 0) {
+				return;
+			}
+		}
+		if (stcb->asoc.scope.site_scope == 0 &&
+		    IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
+			return;
+		}
+		break;
+	}
+#endif
+#ifdef INET
+	case AF_INET:
+	{
+		struct sockaddr_in *sin;
+		struct in6pcb *inp6;
+
+		inp6 = (struct in6pcb *)&inp->ip_inp.inp;
+		/* invalid if we are a v6 only endpoint */
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+		    SCTP_IPV6_V6ONLY(inp6))
+			return;
+
+		sin = &ifa->address.sin;
+		if (sin->sin_addr.s_addr == 0) {
+			/* we skip unspecifed addresses */
+			return;
+		}
+		if (stcb->asoc.scope.ipv4_local_scope == 0 &&
+		    IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+			return;
+		}
+		break;
+	}
+#endif
+	default:
+		/* else, not AF_INET or AF_INET6, so skip */
+		return;
+	}
+
+	/* queue an asconf for this address add/delete */
+	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
+		/* does the peer do asconf? */
+		if (stcb->asoc.asconf_supported) {
+			/* queue an asconf for this addr */
+			status = sctp_asconf_queue_add(stcb, ifa, type);
+
+			/*
+			 * if queued ok, and in the open state, send out the
+			 * ASCONF.  If in the non-open state, these will be
+			 * sent when the state goes open.
+			 */
+			if (status == 0 &&
+			    ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+			     (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED))) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+				sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
+				    stcb, stcb->asoc.primary_destination);
+#else
+				sctp_send_asconf(stcb, NULL, addr_locked);
+#endif
+			}
+		}
+	}
+}
+
+
+int
+sctp_asconf_iterator_ep(struct sctp_inpcb *inp, void *ptr, uint32_t val SCTP_UNUSED)
+{
+	struct sctp_asconf_iterator *asc;
+	struct sctp_ifa *ifa;
+	struct sctp_laddr *l;
+	int cnt_invalid = 0;
+
+	asc = (struct sctp_asconf_iterator *)ptr;
+	LIST_FOREACH(l, &asc->list_of_work, sctp_nxt_addr) {
+		ifa = l->ifa;
+		switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+		case AF_INET6:
+			/* invalid if we're not a v6 endpoint */
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+				cnt_invalid++;
+				if (asc->cnt == cnt_invalid)
+					return (1);
+			}
+			break;
+#endif
+#ifdef INET
+		case AF_INET:
+		{
+			/* invalid if we are a v6 only endpoint */
+			struct in6pcb *inp6;
+			inp6 = (struct in6pcb *)&inp->ip_inp.inp;
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+			    SCTP_IPV6_V6ONLY(inp6)) {
+				cnt_invalid++;
+				if (asc->cnt == cnt_invalid)
+					return (1);
+			}
+			break;
+		}
+#endif
+		default:
+			/* invalid address family */
+			cnt_invalid++;
+			if (asc->cnt == cnt_invalid)
+				return (1);
+		}
+	}
+	return (0);
+}
+
+static int
+sctp_asconf_iterator_ep_end(struct sctp_inpcb *inp, void *ptr, uint32_t val SCTP_UNUSED)
+{
+	struct sctp_ifa *ifa;
+	struct sctp_asconf_iterator *asc;
+	struct sctp_laddr *laddr, *nladdr, *l;
+
+	/* Only for specific case not bound all */
+	asc = (struct sctp_asconf_iterator *)ptr;
+	LIST_FOREACH(l, &asc->list_of_work, sctp_nxt_addr) {
+		ifa = l->ifa;
+		if (l->action == SCTP_ADD_IP_ADDRESS) {
+			LIST_FOREACH(laddr, &inp->sctp_addr_list,
+				     sctp_nxt_addr) {
+				if (laddr->ifa == ifa) {
+					laddr->action = 0;
+					break;
+				}
+
+			}
+		} else if (l->action == SCTP_DEL_IP_ADDRESS) {
+			LIST_FOREACH_SAFE(laddr, &inp->sctp_addr_list, sctp_nxt_addr, nladdr) {
+				/* remove only after all guys are done */
+				if (laddr->ifa == ifa) {
+					sctp_del_local_addr_ep(inp, ifa);
+				}
+			}
+		}
+	}
+	return (0);
+}
+
+void
+sctp_asconf_iterator_stcb(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+			  void *ptr, uint32_t val SCTP_UNUSED)
+{
+	struct sctp_asconf_iterator *asc;
+	struct sctp_ifa *ifa;
+	struct sctp_laddr *l;
+	int cnt_invalid = 0;
+	int type, status;
+	int num_queued = 0;
+
+	asc = (struct sctp_asconf_iterator *)ptr;
+	LIST_FOREACH(l, &asc->list_of_work, sctp_nxt_addr) {
+		ifa = l->ifa;
+		type = l->action;
+
+		/* address's vrf_id must be the vrf_id of the assoc */
+		if (ifa->vrf_id != stcb->asoc.vrf_id) {
+			continue;
+		}
+
+		/* Same checks again for assoc */
+		switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+		case AF_INET6:
+		{
+			/* invalid if we're not a v6 endpoint */
+			struct sockaddr_in6 *sin6;
+
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+				cnt_invalid++;
+				if (asc->cnt == cnt_invalid)
+					return;
+				else
+					continue;
+			}
+			sin6 = &ifa->address.sin6;
+			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+				/* we skip unspecifed addresses */
+				continue;
+			}
+#if defined(__FreeBSD__)
+			if (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+			                     &sin6->sin6_addr) != 0) {
+				continue;
+			}
+#endif
+			if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+				if (stcb->asoc.scope.local_scope == 0) {
+					continue;
+				}
+				/* is it the right link local scope? */
+				if (sctp_is_scopeid_in_nets(stcb, &ifa->address.sa) == 0) {
+					continue;
+				}
+			}
+			break;
+		}
+#endif
+#ifdef INET
+		case AF_INET:
+		{
+			/* invalid if we are a v6 only endpoint */
+			struct in6pcb *inp6;
+			struct sockaddr_in *sin;
+
+			inp6 = (struct in6pcb *)&inp->ip_inp.inp;
+			/* invalid if we are a v6 only endpoint */
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+			    SCTP_IPV6_V6ONLY(inp6))
+				continue;
+
+			sin = &ifa->address.sin;
+			if (sin->sin_addr.s_addr == 0) {
+				/* we skip unspecifed addresses */
+				continue;
+			}
+#if defined(__FreeBSD__)
+			if (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+			                     &sin->sin_addr) != 0) {
+				continue;
+			}
+#endif
+			if (stcb->asoc.scope.ipv4_local_scope == 0 &&
+			    IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+				continue;
+			}
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+			    SCTP_IPV6_V6ONLY(inp6)) {
+				cnt_invalid++;
+				if (asc->cnt == cnt_invalid)
+					return;
+				else
+					continue;
+			}
+			break;
+		}
+#endif
+		default:
+			/* invalid address family */
+			cnt_invalid++;
+			if (asc->cnt == cnt_invalid)
+				return;
+			else
+				continue;
+			break;
+		}
+
+		if (type == SCTP_ADD_IP_ADDRESS) {
+			/* prevent this address from being used as a source */
+			sctp_add_local_addr_restricted(stcb, ifa);
+		} else if (type == SCTP_DEL_IP_ADDRESS) {
+			struct sctp_nets *net;
+			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+				sctp_rtentry_t *rt;
+
+				/* delete this address if cached */
+				if (net->ro._s_addr == ifa) {
+					sctp_free_ifa(net->ro._s_addr);
+					net->ro._s_addr = NULL;
+					net->src_addr_selected = 0;
+					rt = net->ro.ro_rt;
+					if (rt) {
+						RTFREE(rt);
+						net->ro.ro_rt = NULL;
+					}
+					/*
+					 * Now we deleted our src address,
+					 * should we not also now reset the
+					 * cwnd/rto to start as if its a new
+					 * address?
+					 */
+					stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net);
+					net->RTO = 0;
+
+				}
+			}
+		} else if (type == SCTP_SET_PRIM_ADDR) {
+			if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+				/* must validate the ifa is in the ep */
+				if (sctp_is_addr_in_ep(stcb->sctp_ep, ifa) == 0) {
+					continue;
+				}
+			} else {
+				/* Need to check scopes for this guy */
+				if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
+					continue;
+				}
+			}
+		}
+		/* queue an asconf for this address add/delete */
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF) &&
+		    stcb->asoc.asconf_supported == 1) {
+			/* queue an asconf for this addr */
+			status = sctp_asconf_queue_add(stcb, ifa, type);
+			/*
+			 * if queued ok, and in the open state, update the
+			 * count of queued params.  If in the non-open state,
+			 * these get sent when the assoc goes open.
+			 */
+			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+				if (status >= 0) {
+					num_queued++;
+				}
+			}
+		}
+	}
+	/*
+	 * If we have queued params in the open state, send out an ASCONF.
+	 */
+	if (num_queued > 0) {
+		sctp_send_asconf(stcb, NULL, SCTP_ADDR_NOT_LOCKED);
+	}
+}
+
+void
+sctp_asconf_iterator_end(void *ptr, uint32_t val SCTP_UNUSED)
+{
+	struct sctp_asconf_iterator *asc;
+	struct sctp_ifa *ifa;
+	struct sctp_laddr *l, *nl;
+
+	asc = (struct sctp_asconf_iterator *)ptr;
+	LIST_FOREACH_SAFE(l, &asc->list_of_work, sctp_nxt_addr, nl) {
+		ifa = l->ifa;
+		if (l->action == SCTP_ADD_IP_ADDRESS) {
+			/* Clear the defer use flag */
+			ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+		}
+		sctp_free_ifa(ifa);
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), l);
+		SCTP_DECR_LADDR_COUNT();
+	}
+	SCTP_FREE(asc, SCTP_M_ASC_IT);
+}
+
+/*
+ * sa is the sockaddr to ask the peer to set primary to.
+ * returns: 0 = completed, -1 = error
+ */
+int32_t
+sctp_set_primary_ip_address_sa(struct sctp_tcb *stcb, struct sockaddr *sa)
+{
+	uint32_t vrf_id;
+	struct sctp_ifa *ifa;
+
+	/* find the ifa for the desired set primary */
+	vrf_id = stcb->asoc.vrf_id;
+	ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED);
+	if (ifa == NULL) {
+		/* Invalid address */
+		return (-1);
+	}
+
+	/* queue an ASCONF:SET_PRIM_ADDR to be sent */
+	if (!sctp_asconf_queue_add(stcb, ifa, SCTP_SET_PRIM_ADDR)) {
+		/* set primary queuing succeeded */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"set_primary_ip_address_sa: queued on tcb=%p, ",
+			(void *)stcb);
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+					 stcb->sctp_ep, stcb,
+					 stcb->asoc.primary_destination);
+#else
+			sctp_send_asconf(stcb, NULL, SCTP_ADDR_NOT_LOCKED);
+#endif
+		}
+	} else {
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "set_primary_ip_address_sa: failed to add to queue on tcb=%p, ",
+			(void *)stcb);
+		SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, sa);
+		return (-1);
+	}
+	return (0);
+}
+
+void
+sctp_set_primary_ip_address(struct sctp_ifa *ifa)
+{
+	struct sctp_inpcb *inp;
+
+	/* go through all our PCB's */
+	LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) {
+		struct sctp_tcb *stcb;
+
+		/* process for all associations for this endpoint */
+		LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+			/* queue an ASCONF:SET_PRIM_ADDR to be sent */
+			if (!sctp_asconf_queue_add(stcb, ifa,
+						   SCTP_SET_PRIM_ADDR)) {
+				/* set primary queuing succeeded */
+				SCTPDBG(SCTP_DEBUG_ASCONF1, "set_primary_ip_address: queued on stcb=%p, ",
+					(void *)stcb);
+				SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &ifa->address.sa);
+				if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+				    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+					sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+							 stcb->sctp_ep, stcb,
+							 stcb->asoc.primary_destination);
+#else
+					sctp_send_asconf(stcb, NULL, SCTP_ADDR_NOT_LOCKED);
+#endif
+				}
+			}
+		} /* for each stcb */
+	} /* for each inp */
+}
+
+int
+sctp_is_addr_pending(struct sctp_tcb *stcb, struct sctp_ifa *sctp_ifa)
+{
+	struct sctp_tmit_chunk *chk, *nchk;
+	unsigned int offset, asconf_limit;
+	struct sctp_asconf_chunk *acp;
+	struct sctp_asconf_paramhdr *aph;
+	uint8_t aparam_buf[SCTP_PARAM_BUFFER_SIZE];
+	struct sctp_paramhdr *ph;
+	int add_cnt, del_cnt;
+	uint16_t last_param_type;
+
+	add_cnt = del_cnt = 0;
+	last_param_type = 0;
+	TAILQ_FOREACH_SAFE(chk, &stcb->asoc.asconf_send_queue, sctp_next, nchk) {
+		if (chk->data == NULL) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: No mbuf data?\n");
+			continue;
+		}
+		offset = 0;
+		acp = mtod(chk->data, struct sctp_asconf_chunk *);
+		offset += sizeof(struct sctp_asconf_chunk);
+		asconf_limit = ntohs(acp->ch.chunk_length);
+		ph = (struct sctp_paramhdr *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_paramhdr), aparam_buf);
+		if (ph == NULL) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: couldn't get lookup addr!\n");
+			continue;
+		}
+		offset += ntohs(ph->param_length);
+
+		aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_asconf_paramhdr), aparam_buf);
+		if (aph == NULL) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: Empty ASCONF will be sent?\n");
+			continue;
+		}
+		while (aph != NULL) {
+			unsigned int param_length, param_type;
+
+			param_type = ntohs(aph->ph.param_type);
+			param_length = ntohs(aph->ph.param_length);
+			if (offset + param_length > asconf_limit) {
+				/* parameter goes beyond end of chunk! */
+				break;
+			}
+			if (param_length > sizeof(aparam_buf)) {
+				SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: param length (%u) larger than buffer size!\n", param_length);
+				break;
+			}
+			if (param_length <= sizeof(struct sctp_paramhdr)) {
+				SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: param length(%u) too short\n", param_length);
+				break;
+			}
+
+			aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, param_length, aparam_buf);
+			if (aph == NULL) {
+				SCTPDBG(SCTP_DEBUG_ASCONF1, "is_addr_pending: couldn't get entire param\n");
+				break;
+			}
+
+			ph = (struct sctp_paramhdr *)(aph + 1);
+			if (sctp_addr_match(ph, &sctp_ifa->address.sa) != 0) {
+				switch (param_type) {
+				case SCTP_ADD_IP_ADDRESS:
+					add_cnt++;
+					break;
+				case SCTP_DEL_IP_ADDRESS:
+					del_cnt++;
+					break;
+				default:
+					break;
+				}
+				last_param_type = param_type;
+			}
+
+			offset += SCTP_SIZE32(param_length);
+			if (offset >= asconf_limit) {
+				/* no more data in the mbuf chain */
+				break;
+			}
+			/* get pointer to next asconf param */
+			aph = (struct sctp_asconf_paramhdr *)sctp_m_getptr(chk->data, offset, sizeof(struct sctp_asconf_paramhdr), aparam_buf);
+		}
+	}
+
+	/* we want to find the sequences which consist of ADD -> DEL -> ADD or DEL -> ADD */
+	if (add_cnt > del_cnt ||
+	    (add_cnt == del_cnt && last_param_type == SCTP_ADD_IP_ADDRESS)) {
+		return (1);
+	}
+	return (0);
+}
+
+static struct sockaddr *
+sctp_find_valid_localaddr(struct sctp_tcb *stcb, int addr_locked)
+{
+	struct sctp_vrf *vrf = NULL;
+	struct sctp_ifn *sctp_ifn;
+	struct sctp_ifa *sctp_ifa;
+
+	if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+		SCTP_IPI_ADDR_RLOCK();
+	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
+	if (vrf == NULL) {
+		if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+			SCTP_IPI_ADDR_RUNLOCK();
+		return (NULL);
+	}
+	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+		if (stcb->asoc.scope.loopback_scope == 0 &&
+		    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+			/* Skip if loopback_scope not set */
+			continue;
+		}
+		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+			switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+			case AF_INET:
+				if (stcb->asoc.scope.ipv4_addr_legal) {
+					struct sockaddr_in *sin;
+
+					sin = &sctp_ifa->address.sin;
+					if (sin->sin_addr.s_addr == 0) {
+						/* skip unspecifed addresses */
+						continue;
+					}
+#if defined(__FreeBSD__)
+					if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
+					                     &sin->sin_addr) != 0) {
+						continue;
+					}
+#endif
+					if (stcb->asoc.scope.ipv4_local_scope == 0 &&
+					    IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))
+						continue;
+
+					if (sctp_is_addr_restricted(stcb, sctp_ifa) &&
+					    (!sctp_is_addr_pending(stcb, sctp_ifa)))
+						continue;
+					/* found a valid local v4 address to use */
+					if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+						SCTP_IPI_ADDR_RUNLOCK();
+					return (&sctp_ifa->address.sa);
+				}
+				break;
+#endif
+#ifdef INET6
+			case AF_INET6:
+				if (stcb->asoc.scope.ipv6_addr_legal) {
+					struct sockaddr_in6 *sin6;
+
+					if (sctp_ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+						continue;
+					}
+
+					sin6 = &sctp_ifa->address.sin6;
+					if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+						/* we skip unspecifed addresses */
+						continue;
+					}
+#if defined(__FreeBSD__)
+					if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
+					                     &sin6->sin6_addr) != 0) {
+						continue;
+					}
+#endif
+					if (stcb->asoc.scope.local_scope == 0 &&
+					    IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))
+						continue;
+					if (stcb->asoc.scope.site_scope == 0 &&
+					    IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))
+						continue;
+
+					if (sctp_is_addr_restricted(stcb, sctp_ifa) &&
+					    (!sctp_is_addr_pending(stcb, sctp_ifa)))
+						continue;
+					/* found a valid local v6 address to use */
+					if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+						SCTP_IPI_ADDR_RUNLOCK();
+					return (&sctp_ifa->address.sa);
+				}
+				break;
+#endif
+			default:
+				break;
+			}
+		}
+	}
+	/* no valid addresses found */
+	if (addr_locked == SCTP_ADDR_NOT_LOCKED)
+		SCTP_IPI_ADDR_RUNLOCK();
+	return (NULL);
+}
+
+static struct sockaddr *
+sctp_find_valid_localaddr_ep(struct sctp_tcb *stcb)
+{
+	struct sctp_laddr *laddr;
+
+	LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+		if (laddr->ifa == NULL) {
+			continue;
+		}
+		/* is the address restricted ? */
+		if (sctp_is_addr_restricted(stcb, laddr->ifa) &&
+		    (!sctp_is_addr_pending(stcb, laddr->ifa)))
+			continue;
+
+		/* found a valid local address to use */
+		return (&laddr->ifa->address.sa);
+	}
+	/* no valid addresses found */
+	return (NULL);
+}
+
+/*
+ * builds an ASCONF chunk from queued ASCONF params.
+ * returns NULL on error (no mbuf, no ASCONF params queued, etc).
+ */
+struct mbuf *
+sctp_compose_asconf(struct sctp_tcb *stcb, int *retlen, int addr_locked)
+{
+	struct mbuf *m_asconf, *m_asconf_chk;
+	struct sctp_asconf_addr *aa;
+	struct sctp_asconf_chunk *acp;
+	struct sctp_asconf_paramhdr *aph;
+	struct sctp_asconf_addr_param *aap;
+	uint32_t p_length;
+	uint32_t correlation_id = 1;	/* 0 is reserved... */
+	caddr_t ptr, lookup_ptr;
+	uint8_t lookup_used = 0;
+
+	/* are there any asconf params to send? */
+	TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) {
+		if (aa->sent == 0)
+			break;
+	}
+	if (aa == NULL)
+		return (NULL);
+
+	/*
+	 * get a chunk header mbuf and a cluster for the asconf params since
+	 * it's simpler to fill in the asconf chunk header lookup address on
+	 * the fly
+	 */
+	m_asconf_chk = sctp_get_mbuf_for_msg(sizeof(struct sctp_asconf_chunk), 0, M_NOWAIT, 1, MT_DATA);
+	if (m_asconf_chk == NULL) {
+		/* no mbuf's */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"compose_asconf: couldn't get chunk mbuf!\n");
+		return (NULL);
+	}
+	m_asconf = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+	if (m_asconf == NULL) {
+		/* no mbuf's */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"compose_asconf: couldn't get mbuf!\n");
+		sctp_m_freem(m_asconf_chk);
+		return (NULL);
+	}
+	SCTP_BUF_LEN(m_asconf_chk) = sizeof(struct sctp_asconf_chunk);
+	SCTP_BUF_LEN(m_asconf) = 0;
+	acp = mtod(m_asconf_chk, struct sctp_asconf_chunk *);
+	bzero(acp, sizeof(struct sctp_asconf_chunk));
+	/* save pointers to lookup address and asconf params */
+	lookup_ptr = (caddr_t)(acp + 1);	/* after the header */
+	ptr = mtod(m_asconf, caddr_t);	/* beginning of cluster */
+
+	/* fill in chunk header info */
+	acp->ch.chunk_type = SCTP_ASCONF;
+	acp->ch.chunk_flags = 0;
+	acp->serial_number = htonl(stcb->asoc.asconf_seq_out);
+	stcb->asoc.asconf_seq_out++;
+
+	/* add parameters... up to smallest MTU allowed */
+	TAILQ_FOREACH(aa, &stcb->asoc.asconf_queue, next) {
+		if (aa->sent)
+			continue;
+		/* get the parameter length */
+		p_length = SCTP_SIZE32(aa->ap.aph.ph.param_length);
+		/* will it fit in current chunk? */
+		if ((SCTP_BUF_LEN(m_asconf) + p_length > stcb->asoc.smallest_mtu) ||
+		    (SCTP_BUF_LEN(m_asconf) + p_length > MCLBYTES)) {
+			/* won't fit, so we're done with this chunk */
+			break;
+		}
+		/* assign (and store) a correlation id */
+		aa->ap.aph.correlation_id = correlation_id++;
+
+		/*
+		 * fill in address if we're doing a delete this is a simple
+		 * way for us to fill in the correlation address, which
+		 * should only be used by the peer if we're deleting our
+		 * source address and adding a new address (e.g. renumbering
+		 * case)
+		 */
+		if (lookup_used == 0 &&
+		    (aa->special_del == 0) &&
+		    aa->ap.aph.ph.param_type == SCTP_DEL_IP_ADDRESS) {
+			struct sctp_ipv6addr_param *lookup;
+			uint16_t p_size, addr_size;
+
+			lookup = (struct sctp_ipv6addr_param *)lookup_ptr;
+			lookup->ph.param_type =
+			    htons(aa->ap.addrp.ph.param_type);
+			if (aa->ap.addrp.ph.param_type == SCTP_IPV6_ADDRESS) {
+				/* copy IPv6 address */
+				p_size = sizeof(struct sctp_ipv6addr_param);
+				addr_size = sizeof(struct in6_addr);
+			} else {
+				/* copy IPv4 address */
+				p_size = sizeof(struct sctp_ipv4addr_param);
+				addr_size = sizeof(struct in_addr);
+			}
+			lookup->ph.param_length = htons(SCTP_SIZE32(p_size));
+			memcpy(lookup->addr, &aa->ap.addrp.addr, addr_size);
+			SCTP_BUF_LEN(m_asconf_chk) += SCTP_SIZE32(p_size);
+			lookup_used = 1;
+		}
+		/* copy into current space */
+		memcpy(ptr, &aa->ap, p_length);
+
+		/* network elements and update lengths */
+		aph = (struct sctp_asconf_paramhdr *)ptr;
+		aap = (struct sctp_asconf_addr_param *)ptr;
+		/* correlation_id is transparent to peer, no htonl needed */
+		aph->ph.param_type = htons(aph->ph.param_type);
+		aph->ph.param_length = htons(aph->ph.param_length);
+		aap->addrp.ph.param_type = htons(aap->addrp.ph.param_type);
+		aap->addrp.ph.param_length = htons(aap->addrp.ph.param_length);
+
+		SCTP_BUF_LEN(m_asconf) += SCTP_SIZE32(p_length);
+		ptr += SCTP_SIZE32(p_length);
+
+		/*
+		 * these params are removed off the pending list upon
+		 * getting an ASCONF-ACK back from the peer, just set flag
+		 */
+		aa->sent = 1;
+	}
+	/* check to see if the lookup addr has been populated yet */
+	if (lookup_used == 0) {
+		/* NOTE: if the address param is optional, can skip this... */
+		/* add any valid (existing) address... */
+		struct sctp_ipv6addr_param *lookup;
+		uint16_t p_size, addr_size;
+		struct sockaddr *found_addr;
+		caddr_t addr_ptr;
+
+		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)
+			found_addr = sctp_find_valid_localaddr(stcb,
+							       addr_locked);
+		else
+			found_addr = sctp_find_valid_localaddr_ep(stcb);
+
+		lookup = (struct sctp_ipv6addr_param *)lookup_ptr;
+		if (found_addr != NULL) {
+			switch (found_addr->sa_family) {
+#ifdef INET6
+			case AF_INET6:
+				/* copy IPv6 address */
+				lookup->ph.param_type =
+				    htons(SCTP_IPV6_ADDRESS);
+				p_size = sizeof(struct sctp_ipv6addr_param);
+				addr_size = sizeof(struct in6_addr);
+				addr_ptr = (caddr_t)&((struct sockaddr_in6 *)
+				    found_addr)->sin6_addr;
+				break;
+#endif
+#ifdef INET
+			case AF_INET:
+				/* copy IPv4 address */
+				lookup->ph.param_type =
+				    htons(SCTP_IPV4_ADDRESS);
+				p_size = sizeof(struct sctp_ipv4addr_param);
+				addr_size = sizeof(struct in_addr);
+				addr_ptr = (caddr_t)&((struct sockaddr_in *)
+				    found_addr)->sin_addr;
+				break;
+#endif
+			default:
+				p_size = 0;
+				addr_size = 0;
+				addr_ptr = NULL;
+				break;
+			}
+			lookup->ph.param_length = htons(SCTP_SIZE32(p_size));
+			memcpy(lookup->addr, addr_ptr, addr_size);
+			SCTP_BUF_LEN(m_asconf_chk) += SCTP_SIZE32(p_size);
+		} else {
+			/* uh oh... don't have any address?? */
+			SCTPDBG(SCTP_DEBUG_ASCONF1,
+				"compose_asconf: no lookup addr!\n");
+			/* XXX for now, we send a IPv4 address of 0.0.0.0 */
+			lookup->ph.param_type = htons(SCTP_IPV4_ADDRESS);
+			lookup->ph.param_length = htons(SCTP_SIZE32(sizeof(struct sctp_ipv4addr_param)));
+			bzero(lookup->addr, sizeof(struct in_addr));
+			SCTP_BUF_LEN(m_asconf_chk) += SCTP_SIZE32(sizeof(struct sctp_ipv4addr_param));
+		}
+	}
+	/* chain it all together */
+	SCTP_BUF_NEXT(m_asconf_chk) = m_asconf;
+	*retlen = SCTP_BUF_LEN(m_asconf_chk) + SCTP_BUF_LEN(m_asconf);
+	acp->ch.chunk_length = htons(*retlen);
+
+	return (m_asconf_chk);
+}
+
+/*
+ * section to handle address changes before an association is up eg. changes
+ * during INIT/INIT-ACK/COOKIE-ECHO handshake
+ */
+
+/*
+ * processes the (local) addresses in the INIT-ACK chunk
+ */
+static void
+sctp_process_initack_addresses(struct sctp_tcb *stcb, struct mbuf *m,
+    unsigned int offset, unsigned int length)
+{
+	struct sctp_paramhdr tmp_param, *ph;
+	uint16_t plen, ptype;
+	struct sctp_ifa *sctp_ifa;
+	union sctp_sockstore store;
+#ifdef INET6
+	struct sctp_ipv6addr_param addr6_store;
+#endif
+#ifdef INET
+	struct sctp_ipv4addr_param addr4_store;
+#endif
+
+	SCTPDBG(SCTP_DEBUG_ASCONF2, "processing init-ack addresses\n");
+	if (stcb == NULL) /* Un-needed check for SA */
+		return;
+
+	/* convert to upper bound */
+	length += offset;
+
+	if ((offset + sizeof(struct sctp_paramhdr)) > length) {
+		return;
+	}
+	/* go through the addresses in the init-ack */
+	ph = (struct sctp_paramhdr *)
+	     sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr),
+	                   (uint8_t *)&tmp_param);
+	while (ph != NULL) {
+		ptype = ntohs(ph->param_type);
+		plen = ntohs(ph->param_length);
+		switch (ptype) {
+#ifdef INET6
+		case SCTP_IPV6_ADDRESS:
+		{
+			struct sctp_ipv6addr_param *a6p;
+
+			/* get the entire IPv6 address param */
+			a6p = (struct sctp_ipv6addr_param *)
+			    sctp_m_getptr(m, offset,
+			    sizeof(struct sctp_ipv6addr_param),
+			    (uint8_t *)&addr6_store);
+			if (plen != sizeof(struct sctp_ipv6addr_param) ||
+			    a6p == NULL) {
+				return;
+			}
+			memset(&store, 0, sizeof(union sctp_sockstore));
+			store.sin6.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+			store.sin6.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+			store.sin6.sin6_port = stcb->rport;
+			memcpy(&store.sin6.sin6_addr, a6p->addr, sizeof(struct in6_addr));
+			break;
+		}
+#endif
+#ifdef INET
+		case SCTP_IPV4_ADDRESS:
+		{
+			struct sctp_ipv4addr_param *a4p;
+
+			/* get the entire IPv4 address param */
+			a4p = (struct sctp_ipv4addr_param *)sctp_m_getptr(m, offset,
+									  sizeof(struct sctp_ipv4addr_param),
+									  (uint8_t *)&addr4_store);
+			if (plen != sizeof(struct sctp_ipv4addr_param) ||
+			    a4p == NULL) {
+				return;
+			}
+			memset(&store, 0, sizeof(union sctp_sockstore));
+			store.sin.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+			store.sin.sin_len = sizeof(struct sockaddr_in);
+#endif
+			store.sin.sin_port = stcb->rport;
+			store.sin.sin_addr.s_addr = a4p->addr;
+			break;
+		}
+#endif
+		default:
+			goto next_addr;
+		}
+
+		/* see if this address really (still) exists */
+		sctp_ifa = sctp_find_ifa_by_addr(&store.sa, stcb->asoc.vrf_id,
+						 SCTP_ADDR_NOT_LOCKED);
+		if (sctp_ifa == NULL) {
+			/* address doesn't exist anymore */
+			int status;
+
+			/* are ASCONFs allowed ? */
+			if ((sctp_is_feature_on(stcb->sctp_ep,
+			    SCTP_PCB_FLAGS_DO_ASCONF)) &&
+			    stcb->asoc.asconf_supported) {
+				/* queue an ASCONF DEL_IP_ADDRESS */
+				status = sctp_asconf_queue_sa_delete(stcb, &store.sa);
+				/*
+				 * if queued ok, and in correct state, send
+				 * out the ASCONF.
+				 */
+				if (status == 0 &&
+				    SCTP_GET_STATE(&stcb->asoc) ==
+				    SCTP_STATE_OPEN) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+					sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+							 stcb->sctp_ep, stcb,
+							 stcb->asoc.primary_destination);
+#else
+					sctp_send_asconf(stcb, NULL, SCTP_ADDR_NOT_LOCKED);
+#endif
+				}
+			}
+		}
+
+next_addr:
+		/*
+		 * Sanity check:  Make sure the length isn't 0, otherwise
+		 * we'll be stuck in this loop for a long time...
+		 */
+		if (SCTP_SIZE32(plen) == 0) {
+			SCTP_PRINTF("process_initack_addrs: bad len (%d) type=%xh\n",
+				    plen, ptype);
+			return;
+		}
+		/* get next parameter */
+		offset += SCTP_SIZE32(plen);
+		if ((offset + sizeof(struct sctp_paramhdr)) > length)
+			return;
+		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset,
+		    sizeof(struct sctp_paramhdr), (uint8_t *)&tmp_param);
+	} /* while */
+}
+
+/* FIX ME: need to verify return result for v6 address type if v6 disabled */
+/*
+ * checks to see if a specific address is in the initack address list returns
+ * 1 if found, 0 if not
+ */
+static uint32_t
+sctp_addr_in_initack(struct mbuf *m, uint32_t offset, uint32_t length, struct sockaddr *sa)
+{
+	struct sctp_paramhdr tmp_param, *ph;
+	uint16_t plen, ptype;
+#ifdef INET
+	struct sockaddr_in *sin;
+	struct sctp_ipv4addr_param *a4p;
+	struct sctp_ipv6addr_param addr4_store;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *sin6;
+	struct sctp_ipv6addr_param *a6p;
+	struct sctp_ipv6addr_param addr6_store;
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+	struct sockaddr_in6 sin6_tmp;
+#endif
+#endif
+
+	switch (sa->sa_family) {
+#ifdef INET
+	case AF_INET:
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		break;
+#endif
+	default:
+		return (0);
+	}
+
+	SCTPDBG(SCTP_DEBUG_ASCONF2, "find_initack_addr: starting search for ");
+	SCTPDBG_ADDR(SCTP_DEBUG_ASCONF2, sa);
+	/* convert to upper bound */
+	length += offset;
+
+	if ((offset + sizeof(struct sctp_paramhdr)) > length) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+			"find_initack_addr: invalid offset?\n");
+		return (0);
+	}
+	/* go through the addresses in the init-ack */
+	ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset,
+	    sizeof(struct sctp_paramhdr), (uint8_t *) & tmp_param);
+	while (ph != NULL) {
+		ptype = ntohs(ph->param_type);
+		plen = ntohs(ph->param_length);
+		switch (ptype) {
+#ifdef INET6
+		case SCTP_IPV6_ADDRESS:
+			if (sa->sa_family == AF_INET6) {
+				/* get the entire IPv6 address param */
+				if (plen != sizeof(struct sctp_ipv6addr_param)) {
+					break;
+				}
+				/* get the entire IPv6 address param */
+				a6p = (struct sctp_ipv6addr_param *)
+				      sctp_m_getptr(m, offset,
+				                    sizeof(struct sctp_ipv6addr_param),
+				                    (uint8_t *)&addr6_store);
+				if (a6p == NULL) {
+					return (0);
+				}
+				sin6 = (struct sockaddr_in6 *)sa;
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+				if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
+					/* create a copy and clear scope */
+					memcpy(&sin6_tmp, sin6,
+					       sizeof(struct sockaddr_in6));
+					sin6 = &sin6_tmp;
+					in6_clearscope(&sin6->sin6_addr);
+				}
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+				if (memcmp(&sin6->sin6_addr, a6p->addr,
+				           sizeof(struct in6_addr)) == 0) {
+					/* found it */
+					return (1);
+				}
+			}
+			break;
+#endif /* INET6 */
+#ifdef INET
+		case SCTP_IPV4_ADDRESS:
+			if (sa->sa_family == AF_INET) {
+				if (plen != sizeof(struct sctp_ipv4addr_param)) {
+					break;
+				}
+				/* get the entire IPv4 address param */
+				a4p = (struct sctp_ipv4addr_param *)
+				      sctp_m_getptr(m, offset,
+				                    sizeof(struct sctp_ipv4addr_param),
+				                    (uint8_t *)&addr4_store);
+				if (a4p == NULL) {
+					return (0);
+				}
+				sin = (struct sockaddr_in *)sa;
+				if (sin->sin_addr.s_addr == a4p->addr) {
+					/* found it */
+					return (1);
+				}
+			}
+			break;
+#endif
+		default:
+			break;
+		}
+		/* get next parameter */
+		offset += SCTP_SIZE32(plen);
+		if (offset + sizeof(struct sctp_paramhdr) > length) {
+			return (0);
+		}
+		ph = (struct sctp_paramhdr *)
+		    sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr),
+		    (uint8_t *) & tmp_param);
+	} /* while */
+	/* not found! */
+	return (0);
+}
+
+/*
+ * makes sure that the current endpoint local addr list is consistent with
+ * the new association (eg. subset bound, asconf allowed) adds addresses as
+ * necessary
+ */
+static void
+sctp_check_address_list_ep(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+    int length, struct sockaddr *init_addr)
+{
+	struct sctp_laddr *laddr;
+
+	/* go through the endpoint list */
+	LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+		/* be paranoid and validate the laddr */
+		if (laddr->ifa == NULL) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1,
+				"check_addr_list_ep: laddr->ifa is NULL");
+			continue;
+		}
+		if (laddr->ifa == NULL) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "check_addr_list_ep: laddr->ifa->ifa_addr is NULL");
+			continue;
+		}
+		/* do i have it implicitly? */
+		if (sctp_cmpaddr(&laddr->ifa->address.sa, init_addr)) {
+			continue;
+		}
+		/* check to see if in the init-ack */
+		if (!sctp_addr_in_initack(m, offset, length, &laddr->ifa->address.sa)) {
+			/* try to add it */
+			sctp_addr_mgmt_assoc(stcb->sctp_ep, stcb, laddr->ifa,
+			    SCTP_ADD_IP_ADDRESS, SCTP_ADDR_NOT_LOCKED);
+		}
+	}
+}
+
+/*
+ * makes sure that the current kernel address list is consistent with the new
+ * association (with all addrs bound) adds addresses as necessary
+ */
+static void
+sctp_check_address_list_all(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+    int length, struct sockaddr *init_addr,
+    uint16_t local_scope, uint16_t site_scope,
+    uint16_t ipv4_scope, uint16_t loopback_scope)
+{
+	struct sctp_vrf *vrf = NULL;
+	struct sctp_ifn *sctp_ifn;
+	struct sctp_ifa *sctp_ifa;
+	uint32_t vrf_id;
+#ifdef INET
+	struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *sin6;
+#endif
+
+	if (stcb) {
+		vrf_id = stcb->asoc.vrf_id;
+	} else {
+		return;
+	}
+	SCTP_IPI_ADDR_RLOCK();
+	vrf = sctp_find_vrf(vrf_id);
+	if (vrf == NULL) {
+		SCTP_IPI_ADDR_RUNLOCK();
+		return;
+	}
+	/* go through all our known interfaces */
+	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+		if (loopback_scope == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+			/* skip loopback interface */
+			continue;
+		}
+		/* go through each interface address */
+		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+			/* do i have it implicitly? */
+			if (sctp_cmpaddr(&sctp_ifa->address.sa, init_addr)) {
+				continue;
+			}
+			switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+			case AF_INET:
+				sin = &sctp_ifa->address.sin;
+#if defined(__FreeBSD__)
+				if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
+				                     &sin->sin_addr) != 0) {
+					continue;
+				}
+#endif
+				if ((ipv4_scope == 0) &&
+				    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+					/* private address not in scope */
+					continue;
+				}
+				break;
+#endif
+#ifdef INET6
+			case AF_INET6:
+				sin6 = &sctp_ifa->address.sin6;
+#if defined(__FreeBSD__)
+				if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
+				                     &sin6->sin6_addr) != 0) {
+					continue;
+				}
+#endif
+				if ((local_scope == 0) &&
+				    (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
+					continue;
+				}
+				if ((site_scope == 0) &&
+				    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
+					continue;
+				}
+				break;
+#endif
+			default:
+				break;
+			}
+			/* check to see if in the init-ack */
+			if (!sctp_addr_in_initack(m, offset, length, &sctp_ifa->address.sa)) {
+				/* try to add it */
+				sctp_addr_mgmt_assoc(stcb->sctp_ep, stcb,
+				    sctp_ifa, SCTP_ADD_IP_ADDRESS,
+				    SCTP_ADDR_LOCKED);
+			}
+		} /* end foreach ifa */
+	} /* end foreach ifn */
+	SCTP_IPI_ADDR_RUNLOCK();
+}
+
+/*
+ * validates an init-ack chunk (from a cookie-echo) with current addresses
+ * adds addresses from the init-ack into our local address list, if needed
+ * queues asconf adds/deletes addresses as needed and makes appropriate list
+ * changes for source address selection m, offset: points to the start of the
+ * address list in an init-ack chunk length: total length of the address
+ * params only init_addr: address where my INIT-ACK was sent from
+ */
+void
+sctp_check_address_list(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+    int length, struct sockaddr *init_addr,
+    uint16_t local_scope, uint16_t site_scope,
+    uint16_t ipv4_scope, uint16_t loopback_scope)
+{
+	/* process the local addresses in the initack */
+	sctp_process_initack_addresses(stcb, m, offset, length);
+
+	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		/* bound all case */
+		sctp_check_address_list_all(stcb, m, offset, length, init_addr,
+		    local_scope, site_scope, ipv4_scope, loopback_scope);
+	} else {
+		/* subset bound case */
+		if (sctp_is_feature_on(stcb->sctp_ep,
+		    SCTP_PCB_FLAGS_DO_ASCONF)) {
+			/* asconf's allowed */
+			sctp_check_address_list_ep(stcb, m, offset, length,
+			    init_addr);
+		}
+		/* else, no asconfs allowed, so what we sent is what we get */
+	}
+}
+
+/*
+ * sctp_bindx() support
+ */
+uint32_t
+sctp_addr_mgmt_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa,
+    uint32_t type, uint32_t vrf_id, struct sctp_ifa *sctp_ifap)
+{
+	struct sctp_ifa *ifa;
+	struct sctp_laddr *laddr, *nladdr;
+
+#ifdef HAVE_SA_LEN
+	if (sa->sa_len == 0) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EINVAL);
+		return (EINVAL);
+	}
+#endif
+	if (sctp_ifap) {
+		ifa = sctp_ifap;
+	} else if (type == SCTP_ADD_IP_ADDRESS) {
+		/* For an add the address MUST be on the system */
+		ifa = sctp_find_ifa_by_addr(sa, vrf_id, SCTP_ADDR_NOT_LOCKED);
+	} else if (type == SCTP_DEL_IP_ADDRESS) {
+		/* For a delete we need to find it in the inp */
+		ifa = sctp_find_ifa_in_ep(inp, sa, SCTP_ADDR_NOT_LOCKED);
+	} else {
+		ifa = NULL;
+	}
+	if (ifa != NULL) {
+		if (type == SCTP_ADD_IP_ADDRESS) {
+			sctp_add_local_addr_ep(inp, ifa, type);
+		} else if (type == SCTP_DEL_IP_ADDRESS) {
+			if (inp->laddr_count < 2) {
+				/* can't delete the last local address */
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EINVAL);
+				return (EINVAL);
+			}
+			LIST_FOREACH(laddr, &inp->sctp_addr_list,
+				     sctp_nxt_addr) {
+				if (ifa == laddr->ifa) {
+					/* Mark in the delete */
+					laddr->action = type;
+				}
+			}
+		}
+		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
+			/*
+			 * There is no need to start the iterator if
+			 * the inp has no associations.
+			 */
+			if (type == SCTP_DEL_IP_ADDRESS) {
+				LIST_FOREACH_SAFE(laddr, &inp->sctp_addr_list, sctp_nxt_addr, nladdr) {
+					if (laddr->ifa == ifa) {
+						sctp_del_local_addr_ep(inp, ifa);
+					}
+				}
+			}
+		} else {
+			struct sctp_asconf_iterator *asc;
+			struct sctp_laddr *wi;
+			int ret;
+
+			SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
+			            sizeof(struct sctp_asconf_iterator),
+			            SCTP_M_ASC_IT);
+			if (asc == NULL) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, ENOMEM);
+				return (ENOMEM);
+			}
+			wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+			if (wi == NULL) {
+				SCTP_FREE(asc, SCTP_M_ASC_IT);
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, ENOMEM);
+				return (ENOMEM);
+			}
+			LIST_INIT(&asc->list_of_work);
+			asc->cnt = 1;
+			SCTP_INCR_LADDR_COUNT();
+			wi->ifa = ifa;
+			wi->action = type;
+			atomic_add_int(&ifa->refcount, 1);
+			LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
+			ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
+			                             sctp_asconf_iterator_stcb,
+			                             sctp_asconf_iterator_ep_end,
+			                             SCTP_PCB_ANY_FLAGS,
+			                             SCTP_PCB_ANY_FEATURES,
+			                             SCTP_ASOC_ANY_STATE,
+			                             (void *)asc, 0,
+			                             sctp_asconf_iterator_end, inp, 0);
+			if (ret) {
+				SCTP_PRINTF("Failed to initiate iterator for addr_mgmt_ep_sa\n");
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EFAULT);
+				                    sctp_asconf_iterator_end(asc, 0);
+				return (EFAULT);
+			}
+		}
+		return (0);
+	} else {
+		/* invalid address! */
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_ASCONF, EADDRNOTAVAIL);
+		return (EADDRNOTAVAIL);
+	}
+}
+
+void
+sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb,
+				  struct sctp_nets *net)
+{
+	struct sctp_asconf_addr *aa;
+	struct sctp_ifa *sctp_ifap;
+	struct sctp_asconf_tag_param *vtag;
+#ifdef INET
+	struct sockaddr_in *to;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *to6;
+#endif
+	if (net == NULL) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: Missing net\n");
+		return;
+	}
+	if (stcb == NULL) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "sctp_asconf_send_nat_state_update: Missing stcb\n");
+		return;
+	}
+  /* Need to have in the asconf:
+   * - vtagparam(my_vtag/peer_vtag)
+   * - add(0.0.0.0)
+   * - del(0.0.0.0)
+   * - Any global addresses add(addr)
+   */
+	SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+	            SCTP_M_ASC_ADDR);
+	if (aa == NULL) {
+		/* didn't get memory */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+		        "sctp_asconf_send_nat_state_update: failed to get memory!\n");
+		return;
+	}
+	aa->special_del = 0;
+	/* fill in asconf address parameter fields */
+	/* top level elements are "networked" during send */
+	aa->ifa = NULL;
+	aa->sent = 0;		/* clear sent flag */
+	vtag = (struct sctp_asconf_tag_param *)&aa->ap.aph;
+	vtag->aph.ph.param_type = SCTP_NAT_VTAGS;
+	vtag->aph.ph.param_length = sizeof(struct sctp_asconf_tag_param);
+	vtag->local_vtag = htonl(stcb->asoc.my_vtag);
+	vtag->remote_vtag = htonl(stcb->asoc.peer_vtag);
+	TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+
+	SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+	            SCTP_M_ASC_ADDR);
+	if (aa == NULL) {
+		/* didn't get memory */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+		        "sctp_asconf_send_nat_state_update: failed to get memory!\n");
+		return;
+	}
+	memset(aa, 0, sizeof(struct sctp_asconf_addr));
+	/* fill in asconf address parameter fields */
+	/* ADD(0.0.0.0) */
+	switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+	case AF_INET:
+		aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
+		aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param);
+		aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+		aa->ap.addrp.ph.param_length = sizeof (struct sctp_ipv4addr_param);
+		/* No need to add an address, we are using 0.0.0.0 */
+		TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
+		aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param);
+		aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+		aa->ap.addrp.ph.param_length = sizeof (struct sctp_ipv6addr_param);
+		/* No need to add an address, we are using 0.0.0.0 */
+		TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+		break;
+#endif
+	default:
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+		        "sctp_asconf_send_nat_state_update: unknown address family\n");
+		SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+		return;
+	}
+	SCTP_MALLOC(aa, struct sctp_asconf_addr *, sizeof(*aa),
+	            SCTP_M_ASC_ADDR);
+	if (aa == NULL) {
+		/* didn't get memory */
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+		        "sctp_asconf_send_nat_state_update: failed to get memory!\n");
+		return;
+	}
+	memset(aa, 0, sizeof(struct sctp_asconf_addr));
+	/* fill in asconf address parameter fields */
+	/* ADD(0.0.0.0) */
+	switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+	case AF_INET:
+		aa->ap.aph.ph.param_type = SCTP_ADD_IP_ADDRESS;
+		aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addrv4_param);
+		aa->ap.addrp.ph.param_type = SCTP_IPV4_ADDRESS;
+		aa->ap.addrp.ph.param_length = sizeof (struct sctp_ipv4addr_param);
+		/* No need to add an address, we are using 0.0.0.0 */
+		TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		aa->ap.aph.ph.param_type = SCTP_DEL_IP_ADDRESS;
+		aa->ap.aph.ph.param_length = sizeof(struct sctp_asconf_addr_param);
+		aa->ap.addrp.ph.param_type = SCTP_IPV6_ADDRESS;
+		aa->ap.addrp.ph.param_length = sizeof (struct sctp_ipv6addr_param);
+		/* No need to add an address, we are using 0.0.0.0 */
+		TAILQ_INSERT_TAIL(&stcb->asoc.asconf_queue, aa, next);
+		break;
+#endif
+	default:
+		SCTPDBG(SCTP_DEBUG_ASCONF1,
+		        "sctp_asconf_send_nat_state_update: unknown address family\n");
+		SCTP_FREE(aa, SCTP_M_ASC_ADDR);
+		return;
+	}
+	/* Now we must hunt the addresses and add all global addresses */
+	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		struct sctp_vrf *vrf = NULL;
+		struct sctp_ifn *sctp_ifnp;
+		uint32_t vrf_id;
+
+		vrf_id = stcb->sctp_ep->def_vrf_id;
+		vrf = sctp_find_vrf(vrf_id);
+		if (vrf == NULL) {
+			goto skip_rest;
+		}
+
+		SCTP_IPI_ADDR_RLOCK();
+		LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
+			LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
+				switch (sctp_ifap->address.sa.sa_family) {
+#ifdef INET
+				case AF_INET:
+					to = &sctp_ifap->address.sin;
+#if defined(__FreeBSD__)
+					if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
+					                     &to->sin_addr) != 0) {
+						continue;
+					}
+#endif
+					if (IN4_ISPRIVATE_ADDRESS(&to->sin_addr)) {
+						continue;
+					}
+					if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
+						continue;
+					}
+					break;
+#endif
+#ifdef INET6
+				case AF_INET6:
+					to6 = &sctp_ifap->address.sin6;
+#if defined(__FreeBSD__)
+					if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
+					                     &to6->sin6_addr) != 0) {
+						continue;
+					}
+#endif
+					if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
+						continue;
+					}
+					if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
+						continue;
+					}
+					break;
+#endif
+				default:
+					continue;
+				}
+				sctp_asconf_queue_mgmt(stcb, sctp_ifap, SCTP_ADD_IP_ADDRESS);
+			}
+		}
+		SCTP_IPI_ADDR_RUNLOCK();
+	} else {
+		struct sctp_laddr *laddr;
+
+		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+			if (laddr->ifa == NULL) {
+				continue;
+			}
+			if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
+				/* Address being deleted by the system, dont
+				 * list.
+				 */
+				continue;
+			if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+				/* Address being deleted on this ep
+				 * don't list.
+				 */
+				continue;
+			}
+			sctp_ifap = laddr->ifa;
+			switch (sctp_ifap->address.sa.sa_family) {
+#ifdef INET
+			case AF_INET:
+				to = &sctp_ifap->address.sin;
+				if (IN4_ISPRIVATE_ADDRESS(&to->sin_addr)) {
+					continue;
+				}
+				if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
+					continue;
+				}
+				break;
+#endif
+#ifdef INET6
+			case AF_INET6:
+				to6 = &sctp_ifap->address.sin6;
+				if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
+					continue;
+				}
+				if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
+					continue;
+				}
+				break;
+#endif
+			default:
+				continue;
+			}
+			sctp_asconf_queue_mgmt(stcb, sctp_ifap, SCTP_ADD_IP_ADDRESS);
+		}
+	}
+ skip_rest:
+	/* Now we must send the asconf into the queue */
+	sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED);
+}
diff --git a/usrsctplib/netinet/sctp_asconf.h b/usrsctplib/netinet/sctp_asconf.h
new file mode 100755
index 0000000..69accd2
--- /dev/null
+++ b/usrsctplib/netinet/sctp_asconf.h
@@ -0,0 +1,97 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_asconf.h 309607 2016-12-06 10:21:25Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_ASCONF_H_
+#define _NETINET_SCTP_ASCONF_H_
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+/*
+ * function prototypes
+ */
+extern void sctp_asconf_cleanup(struct sctp_tcb *, struct sctp_nets *);
+
+extern struct mbuf *sctp_compose_asconf(struct sctp_tcb *, int *, int);
+
+extern void
+sctp_handle_asconf(struct mbuf *, unsigned int, struct sockaddr *,
+                   struct sctp_asconf_chunk *, struct sctp_tcb *, int);
+
+extern void
+sctp_handle_asconf_ack(struct mbuf *, int, struct sctp_asconf_ack_chunk *,
+     struct sctp_tcb *, struct sctp_nets *, int *);
+
+extern uint32_t
+sctp_addr_mgmt_ep_sa(struct sctp_inpcb *, struct sockaddr *,
+		     uint32_t, uint32_t, struct sctp_ifa *);
+
+
+extern int sctp_asconf_iterator_ep(struct sctp_inpcb *inp, void *ptr,
+				   uint32_t val);
+extern void sctp_asconf_iterator_stcb(struct sctp_inpcb *inp,
+				      struct sctp_tcb *stcb,
+				      void *ptr, uint32_t type);
+extern void sctp_asconf_iterator_end(void *ptr, uint32_t val);
+
+
+extern int32_t
+sctp_set_primary_ip_address_sa(struct sctp_tcb *,
+    struct sockaddr *);
+
+extern void
+sctp_set_primary_ip_address(struct sctp_ifa *ifa);
+
+extern void
+sctp_check_address_list(struct sctp_tcb *, struct mbuf *, int, int,
+    struct sockaddr *, uint16_t, uint16_t, uint16_t, uint16_t);
+
+extern void
+sctp_assoc_immediate_retrans(struct sctp_tcb *, struct sctp_nets *);
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
+extern void
+sctp_net_immediate_retrans(struct sctp_tcb *, struct sctp_nets *);
+#endif
+
+extern void
+sctp_asconf_send_nat_state_update(struct sctp_tcb *stcb,
+				  struct sctp_nets *net);
+
+extern int
+sctp_is_addr_pending(struct sctp_tcb *, struct sctp_ifa *);
+#endif				/* _KERNEL */
+
+#endif				/* !_NETINET_SCTP_ASCONF_H_ */
diff --git a/usrsctplib/netinet/sctp_auth.c b/usrsctplib/netinet/sctp_auth.c
new file mode 100755
index 0000000..de81ffd
--- /dev/null
+++ b/usrsctplib/netinet/sctp_auth.c
@@ -0,0 +1,2338 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_auth.c 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_auth.h>
+
+#ifdef SCTP_DEBUG
+#define SCTP_AUTH_DEBUG		(SCTP_BASE_SYSCTL(sctp_debug_on) & SCTP_DEBUG_AUTH1)
+#define SCTP_AUTH_DEBUG2	(SCTP_BASE_SYSCTL(sctp_debug_on) & SCTP_DEBUG_AUTH2)
+#endif /* SCTP_DEBUG */
+
+
+void
+sctp_clear_chunklist(sctp_auth_chklist_t *chklist)
+{
+	bzero(chklist, sizeof(*chklist));
+	/* chklist->num_chunks = 0; */
+}
+
+sctp_auth_chklist_t *
+sctp_alloc_chunklist(void)
+{
+	sctp_auth_chklist_t *chklist;
+
+	SCTP_MALLOC(chklist, sctp_auth_chklist_t *, sizeof(*chklist),
+		    SCTP_M_AUTH_CL);
+	if (chklist == NULL) {
+		SCTPDBG(SCTP_DEBUG_AUTH1, "sctp_alloc_chunklist: failed to get memory!\n");
+	} else {
+		sctp_clear_chunklist(chklist);
+	}
+	return (chklist);
+}
+
+void
+sctp_free_chunklist(sctp_auth_chklist_t *list)
+{
+	if (list != NULL)
+		SCTP_FREE(list, SCTP_M_AUTH_CL);
+}
+
+sctp_auth_chklist_t *
+sctp_copy_chunklist(sctp_auth_chklist_t *list)
+{
+	sctp_auth_chklist_t *new_list;
+
+	if (list == NULL)
+		return (NULL);
+
+	/* get a new list */
+	new_list = sctp_alloc_chunklist();
+	if (new_list == NULL)
+		return (NULL);
+	/* copy it */
+	bcopy(list, new_list, sizeof(*new_list));
+
+	return (new_list);
+}
+
+
+/*
+ * add a chunk to the required chunks list
+ */
+int
+sctp_auth_add_chunk(uint8_t chunk, sctp_auth_chklist_t *list)
+{
+	if (list == NULL)
+		return (-1);
+
+	/* is chunk restricted? */
+	if ((chunk == SCTP_INITIATION) ||
+	    (chunk == SCTP_INITIATION_ACK) ||
+	    (chunk == SCTP_SHUTDOWN_COMPLETE) ||
+	    (chunk == SCTP_AUTHENTICATION)) {
+		return (-1);
+	}
+	if (list->chunks[chunk] == 0) {
+		list->chunks[chunk] = 1;
+		list->num_chunks++;
+		SCTPDBG(SCTP_DEBUG_AUTH1,
+			"SCTP: added chunk %u (0x%02x) to Auth list\n",
+			chunk, chunk);
+	}
+	return (0);
+}
+
+/*
+ * delete a chunk from the required chunks list
+ */
+int
+sctp_auth_delete_chunk(uint8_t chunk, sctp_auth_chklist_t *list)
+{
+	if (list == NULL)
+		return (-1);
+
+	if (list->chunks[chunk] == 1) {
+		list->chunks[chunk] = 0;
+		list->num_chunks--;
+		SCTPDBG(SCTP_DEBUG_AUTH1,
+			"SCTP: deleted chunk %u (0x%02x) from Auth list\n",
+			chunk, chunk);
+	}
+	return (0);
+}
+
+size_t
+sctp_auth_get_chklist_size(const sctp_auth_chklist_t *list)
+{
+	if (list == NULL)
+		return (0);
+	else
+		return (list->num_chunks);
+}
+
+/*
+ * return the current number and list of required chunks caller must
+ * guarantee ptr has space for up to 256 bytes
+ */
+int
+sctp_serialize_auth_chunks(const sctp_auth_chklist_t *list, uint8_t *ptr)
+{
+	int i, count = 0;
+
+	if (list == NULL)
+		return (0);
+
+	for (i = 0; i < 256; i++) {
+		if (list->chunks[i] != 0) {
+			*ptr++ = i;
+			count++;
+		}
+	}
+	return (count);
+}
+
+int
+sctp_pack_auth_chunks(const sctp_auth_chklist_t *list, uint8_t *ptr)
+{
+	int i, size = 0;
+
+	if (list == NULL)
+		return (0);
+
+	if (list->num_chunks <= 32) {
+		/* just list them, one byte each */
+		for (i = 0; i < 256; i++) {
+			if (list->chunks[i] != 0) {
+				*ptr++ = i;
+				size++;
+			}
+		}
+	} else {
+		int index, offset;
+
+		/* pack into a 32 byte bitfield */
+		for (i = 0; i < 256; i++) {
+			if (list->chunks[i] != 0) {
+				index = i / 8;
+				offset = i % 8;
+				ptr[index] |= (1 << offset);
+			}
+		}
+		size = 32;
+	}
+	return (size);
+}
+
+int
+sctp_unpack_auth_chunks(const uint8_t *ptr, uint8_t num_chunks,
+    sctp_auth_chklist_t *list)
+{
+	int i;
+	int size;
+
+	if (list == NULL)
+		return (0);
+
+	if (num_chunks <= 32) {
+		/* just pull them, one byte each */
+		for (i = 0; i < num_chunks; i++) {
+			(void)sctp_auth_add_chunk(*ptr++, list);
+		}
+		size = num_chunks;
+	} else {
+		int index, offset;
+
+		/* unpack from a 32 byte bitfield */
+		for (index = 0; index < 32; index++) {
+			for (offset = 0; offset < 8; offset++) {
+				if (ptr[index] & (1 << offset)) {
+					(void)sctp_auth_add_chunk((index * 8) + offset, list);
+				}
+			}
+		}
+		size = 32;
+	}
+	return (size);
+}
+
+
+/*
+ * allocate structure space for a key of length keylen
+ */
+sctp_key_t *
+sctp_alloc_key(uint32_t keylen)
+{
+	sctp_key_t *new_key;
+
+	SCTP_MALLOC(new_key, sctp_key_t *, sizeof(*new_key) + keylen,
+		    SCTP_M_AUTH_KY);
+	if (new_key == NULL) {
+		/* out of memory */
+		return (NULL);
+	}
+	new_key->keylen = keylen;
+	return (new_key);
+}
+
+void
+sctp_free_key(sctp_key_t *key)
+{
+	if (key != NULL)
+		SCTP_FREE(key,SCTP_M_AUTH_KY);
+}
+
+void
+sctp_print_key(sctp_key_t *key, const char *str)
+{
+	uint32_t i;
+
+	if (key == NULL) {
+		SCTP_PRINTF("%s: [Null key]\n", str);
+		return;
+	}
+	SCTP_PRINTF("%s: len %u, ", str, key->keylen);
+	if (key->keylen) {
+		for (i = 0; i < key->keylen; i++)
+			SCTP_PRINTF("%02x", key->key[i]);
+		SCTP_PRINTF("\n");
+	} else {
+		SCTP_PRINTF("[Null key]\n");
+	}
+}
+
+void
+sctp_show_key(sctp_key_t *key, const char *str)
+{
+	uint32_t i;
+
+	if (key == NULL) {
+		SCTP_PRINTF("%s: [Null key]\n", str);
+		return;
+	}
+	SCTP_PRINTF("%s: len %u, ", str, key->keylen);
+	if (key->keylen) {
+		for (i = 0; i < key->keylen; i++)
+			SCTP_PRINTF("%02x", key->key[i]);
+		SCTP_PRINTF("\n");
+	} else {
+		SCTP_PRINTF("[Null key]\n");
+	}
+}
+
+static uint32_t
+sctp_get_keylen(sctp_key_t *key)
+{
+	if (key != NULL)
+		return (key->keylen);
+	else
+		return (0);
+}
+
+/*
+ * generate a new random key of length 'keylen'
+ */
+sctp_key_t *
+sctp_generate_random_key(uint32_t keylen)
+{
+	sctp_key_t *new_key;
+
+	new_key = sctp_alloc_key(keylen);
+	if (new_key == NULL) {
+		/* out of memory */
+		return (NULL);
+	}
+	SCTP_READ_RANDOM(new_key->key, keylen);
+	new_key->keylen = keylen;
+	return (new_key);
+}
+
+sctp_key_t *
+sctp_set_key(uint8_t *key, uint32_t keylen)
+{
+	sctp_key_t *new_key;
+
+	new_key = sctp_alloc_key(keylen);
+	if (new_key == NULL) {
+		/* out of memory */
+		return (NULL);
+	}
+	bcopy(key, new_key->key, keylen);
+	return (new_key);
+}
+
+/*-
+ * given two keys of variable size, compute which key is "larger/smaller"
+ * returns:  1 if key1 > key2
+ *          -1 if key1 < key2
+ *           0 if key1 = key2
+ */
+static int
+sctp_compare_key(sctp_key_t *key1, sctp_key_t *key2)
+{
+	uint32_t maxlen;
+	uint32_t i;
+	uint32_t key1len, key2len;
+	uint8_t *key_1, *key_2;
+	uint8_t val1, val2;
+
+	/* sanity/length check */
+	key1len = sctp_get_keylen(key1);
+	key2len = sctp_get_keylen(key2);
+	if ((key1len == 0) && (key2len == 0))
+		return (0);
+	else if (key1len == 0)
+		return (-1);
+	else if (key2len == 0)
+		return (1);
+
+	if (key1len < key2len) {
+		maxlen = key2len;
+	} else {
+		maxlen = key1len;
+	}
+	key_1 = key1->key;
+	key_2 = key2->key;
+	/* check for numeric equality */
+	for (i = 0; i < maxlen; i++) {
+		/* left-pad with zeros */
+		val1 = (i < (maxlen - key1len)) ? 0 : *(key_1++);
+		val2 = (i < (maxlen - key2len)) ? 0 : *(key_2++);
+		if (val1 > val2) {
+			return (1);
+		} else if (val1 < val2) {
+			return (-1);
+		}
+	}
+	/* keys are equal value, so check lengths */
+	if (key1len == key2len)
+		return (0);
+	else if (key1len < key2len)
+		return (-1);
+	else
+		return (1);
+}
+
+/*
+ * generate the concatenated keying material based on the two keys and the
+ * shared key (if available). draft-ietf-tsvwg-auth specifies the specific
+ * order for concatenation
+ */
+sctp_key_t *
+sctp_compute_hashkey(sctp_key_t *key1, sctp_key_t *key2, sctp_key_t *shared)
+{
+	uint32_t keylen;
+	sctp_key_t *new_key;
+	uint8_t *key_ptr;
+
+	keylen = sctp_get_keylen(key1) + sctp_get_keylen(key2) +
+	    sctp_get_keylen(shared);
+
+	if (keylen > 0) {
+		/* get space for the new key */
+		new_key = sctp_alloc_key(keylen);
+		if (new_key == NULL) {
+			/* out of memory */
+			return (NULL);
+		}
+		new_key->keylen = keylen;
+		key_ptr = new_key->key;
+	} else {
+		/* all keys empty/null?! */
+		return (NULL);
+	}
+
+	/* concatenate the keys */
+	if (sctp_compare_key(key1, key2) <= 0) {
+		/* key is shared + key1 + key2 */
+		if (sctp_get_keylen(shared)) {
+			bcopy(shared->key, key_ptr, shared->keylen);
+			key_ptr += shared->keylen;
+		}
+		if (sctp_get_keylen(key1)) {
+			bcopy(key1->key, key_ptr, key1->keylen);
+			key_ptr += key1->keylen;
+		}
+		if (sctp_get_keylen(key2)) {
+			bcopy(key2->key, key_ptr, key2->keylen);
+		}
+	} else {
+		/* key is shared + key2 + key1 */
+		if (sctp_get_keylen(shared)) {
+			bcopy(shared->key, key_ptr, shared->keylen);
+			key_ptr += shared->keylen;
+		}
+		if (sctp_get_keylen(key2)) {
+			bcopy(key2->key, key_ptr, key2->keylen);
+			key_ptr += key2->keylen;
+		}
+		if (sctp_get_keylen(key1)) {
+			bcopy(key1->key, key_ptr, key1->keylen);
+		}
+	}
+	return (new_key);
+}
+
+
+sctp_sharedkey_t *
+sctp_alloc_sharedkey(void)
+{
+	sctp_sharedkey_t *new_key;
+
+	SCTP_MALLOC(new_key, sctp_sharedkey_t *, sizeof(*new_key),
+		    SCTP_M_AUTH_KY);
+	if (new_key == NULL) {
+		/* out of memory */
+		return (NULL);
+	}
+	new_key->keyid = 0;
+	new_key->key = NULL;
+	new_key->refcount = 1;
+	new_key->deactivated = 0;
+	return (new_key);
+}
+
+void
+sctp_free_sharedkey(sctp_sharedkey_t *skey)
+{
+	if (skey == NULL)
+		return;
+
+	if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&skey->refcount)) {
+		if (skey->key != NULL)
+			sctp_free_key(skey->key);
+		SCTP_FREE(skey, SCTP_M_AUTH_KY);
+	}
+}
+
+sctp_sharedkey_t *
+sctp_find_sharedkey(struct sctp_keyhead *shared_keys, uint16_t key_id)
+{
+	sctp_sharedkey_t *skey;
+
+	LIST_FOREACH(skey, shared_keys, next) {
+		if (skey->keyid == key_id)
+			return (skey);
+	}
+	return (NULL);
+}
+
+int
+sctp_insert_sharedkey(struct sctp_keyhead *shared_keys,
+		      sctp_sharedkey_t *new_skey)
+{
+	sctp_sharedkey_t *skey;
+
+	if ((shared_keys == NULL) || (new_skey == NULL))
+		return (EINVAL);
+
+	/* insert into an empty list? */
+	if (LIST_EMPTY(shared_keys)) {
+		LIST_INSERT_HEAD(shared_keys, new_skey, next);
+		return (0);
+	}
+	/* insert into the existing list, ordered by key id */
+	LIST_FOREACH(skey, shared_keys, next) {
+		if (new_skey->keyid < skey->keyid) {
+			/* insert it before here */
+			LIST_INSERT_BEFORE(skey, new_skey, next);
+			return (0);
+		} else if (new_skey->keyid == skey->keyid) {
+			/* replace the existing key */
+			/* verify this key *can* be replaced */
+			if ((skey->deactivated) && (skey->refcount > 1)) {
+				SCTPDBG(SCTP_DEBUG_AUTH1,
+					"can't replace shared key id %u\n",
+					new_skey->keyid);
+				return (EBUSY);
+			}
+			SCTPDBG(SCTP_DEBUG_AUTH1,
+				"replacing shared key id %u\n",
+				new_skey->keyid);
+			LIST_INSERT_BEFORE(skey, new_skey, next);
+			LIST_REMOVE(skey, next);
+			sctp_free_sharedkey(skey);
+			return (0);
+		}
+		if (LIST_NEXT(skey, next) == NULL) {
+			/* belongs at the end of the list */
+			LIST_INSERT_AFTER(skey, new_skey, next);
+			return (0);
+		}
+	}
+	/* shouldn't reach here */
+	return (EINVAL);
+}
+
+void
+sctp_auth_key_acquire(struct sctp_tcb *stcb, uint16_t key_id)
+{
+	sctp_sharedkey_t *skey;
+
+	/* find the shared key */
+	skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, key_id);
+
+	/* bump the ref count */
+	if (skey) {
+		atomic_add_int(&skey->refcount, 1);
+		SCTPDBG(SCTP_DEBUG_AUTH2,
+			"%s: stcb %p key %u refcount acquire to %d\n",
+			__func__, (void *)stcb, key_id, skey->refcount);
+	}
+}
+
+void
+sctp_auth_key_release(struct sctp_tcb *stcb, uint16_t key_id, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+	SCTP_UNUSED
+#endif
+)
+{
+	sctp_sharedkey_t *skey;
+
+	/* find the shared key */
+	skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, key_id);
+
+	/* decrement the ref count */
+	if (skey) {
+		SCTPDBG(SCTP_DEBUG_AUTH2,
+			"%s: stcb %p key %u refcount release to %d\n",
+			__func__, (void *)stcb, key_id, skey->refcount);
+
+		/* see if a notification should be generated */
+		if ((skey->refcount <= 2) && (skey->deactivated)) {
+			/* notify ULP that key is no longer used */
+			sctp_ulp_notify(SCTP_NOTIFY_AUTH_FREE_KEY, stcb,
+					key_id, 0, so_locked);
+			SCTPDBG(SCTP_DEBUG_AUTH2,
+				"%s: stcb %p key %u no longer used, %d\n",
+				__func__, (void *)stcb, key_id, skey->refcount);
+		}
+		sctp_free_sharedkey(skey);
+	}
+}
+
+static sctp_sharedkey_t *
+sctp_copy_sharedkey(const sctp_sharedkey_t *skey)
+{
+	sctp_sharedkey_t *new_skey;
+
+	if (skey == NULL)
+		return (NULL);
+	new_skey = sctp_alloc_sharedkey();
+	if (new_skey == NULL)
+		return (NULL);
+	if (skey->key != NULL)
+		new_skey->key = sctp_set_key(skey->key->key, skey->key->keylen);
+	else
+		new_skey->key = NULL;
+	new_skey->keyid = skey->keyid;
+	return (new_skey);
+}
+
+int
+sctp_copy_skeylist(const struct sctp_keyhead *src, struct sctp_keyhead *dest)
+{
+	sctp_sharedkey_t *skey, *new_skey;
+	int count = 0;
+
+	if ((src == NULL) || (dest == NULL))
+		return (0);
+	LIST_FOREACH(skey, src, next) {
+		new_skey = sctp_copy_sharedkey(skey);
+		if (new_skey != NULL) {
+			if (sctp_insert_sharedkey(dest, new_skey)) {
+				sctp_free_sharedkey(new_skey);
+			} else {
+				count++;
+			}
+		}
+	}
+	return (count);
+}
+
+
+sctp_hmaclist_t *
+sctp_alloc_hmaclist(uint16_t num_hmacs)
+{
+	sctp_hmaclist_t *new_list;
+	int alloc_size;
+
+	alloc_size = sizeof(*new_list) + num_hmacs * sizeof(new_list->hmac[0]);
+	SCTP_MALLOC(new_list, sctp_hmaclist_t *, alloc_size,
+		    SCTP_M_AUTH_HL);
+	if (new_list == NULL) {
+		/* out of memory */
+		return (NULL);
+	}
+	new_list->max_algo = num_hmacs;
+	new_list->num_algo = 0;
+	return (new_list);
+}
+
+void
+sctp_free_hmaclist(sctp_hmaclist_t *list)
+{
+	if (list != NULL) {
+		SCTP_FREE(list,SCTP_M_AUTH_HL);
+		list = NULL;
+	}
+}
+
+int
+sctp_auth_add_hmacid(sctp_hmaclist_t *list, uint16_t hmac_id)
+{
+	int i;
+	if (list == NULL)
+		return (-1);
+	if (list->num_algo == list->max_algo) {
+		SCTPDBG(SCTP_DEBUG_AUTH1,
+			"SCTP: HMAC id list full, ignoring add %u\n", hmac_id);
+		return (-1);
+	}
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+	if ((hmac_id != SCTP_AUTH_HMAC_ID_SHA1) &&
+	    (hmac_id != SCTP_AUTH_HMAC_ID_SHA256)) {
+#else
+	if (hmac_id != SCTP_AUTH_HMAC_ID_SHA1) {
+#endif
+		return (-1);
+	}
+	/* Now is it already in the list */
+	for (i = 0; i < list->num_algo; i++) {
+		if (list->hmac[i] == hmac_id) {
+			/* already in list */
+			return (-1);
+		}
+	}
+	SCTPDBG(SCTP_DEBUG_AUTH1, "SCTP: add HMAC id %u to list\n", hmac_id);
+	list->hmac[list->num_algo++] = hmac_id;
+	return (0);
+}
+
+sctp_hmaclist_t *
+sctp_copy_hmaclist(sctp_hmaclist_t *list)
+{
+	sctp_hmaclist_t *new_list;
+	int i;
+
+	if (list == NULL)
+		return (NULL);
+	/* get a new list */
+	new_list = sctp_alloc_hmaclist(list->max_algo);
+	if (new_list == NULL)
+		return (NULL);
+	/* copy it */
+	new_list->max_algo = list->max_algo;
+	new_list->num_algo = list->num_algo;
+	for (i = 0; i < list->num_algo; i++)
+		new_list->hmac[i] = list->hmac[i];
+	return (new_list);
+}
+
+sctp_hmaclist_t *
+sctp_default_supported_hmaclist(void)
+{
+	sctp_hmaclist_t *new_list;
+
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+	new_list = sctp_alloc_hmaclist(2);
+#else
+	new_list = sctp_alloc_hmaclist(1);
+#endif
+	if (new_list == NULL)
+		return (NULL);
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+	/* We prefer SHA256, so list it first */
+	(void)sctp_auth_add_hmacid(new_list, SCTP_AUTH_HMAC_ID_SHA256);
+#endif
+	(void)sctp_auth_add_hmacid(new_list, SCTP_AUTH_HMAC_ID_SHA1);
+	return (new_list);
+}
+
+/*-
+ * HMAC algos are listed in priority/preference order
+ * find the best HMAC id to use for the peer based on local support
+ */
+uint16_t
+sctp_negotiate_hmacid(sctp_hmaclist_t *peer, sctp_hmaclist_t *local)
+{
+	int i, j;
+
+	if ((local == NULL) || (peer == NULL))
+		return (SCTP_AUTH_HMAC_ID_RSVD);
+
+	for (i = 0; i < peer->num_algo; i++) {
+		for (j = 0; j < local->num_algo; j++) {
+			if (peer->hmac[i] == local->hmac[j]) {
+				/* found the "best" one */
+				SCTPDBG(SCTP_DEBUG_AUTH1,
+					"SCTP: negotiated peer HMAC id %u\n",
+					peer->hmac[i]);
+				return (peer->hmac[i]);
+			}
+		}
+	}
+	/* didn't find one! */
+	return (SCTP_AUTH_HMAC_ID_RSVD);
+}
+
+/*-
+ * serialize the HMAC algo list and return space used
+ * caller must guarantee ptr has appropriate space
+ */
+int
+sctp_serialize_hmaclist(sctp_hmaclist_t *list, uint8_t *ptr)
+{
+	int i;
+	uint16_t hmac_id;
+
+	if (list == NULL)
+		return (0);
+
+	for (i = 0; i < list->num_algo; i++) {
+		hmac_id = htons(list->hmac[i]);
+		bcopy(&hmac_id, ptr, sizeof(hmac_id));
+		ptr += sizeof(hmac_id);
+	}
+	return (list->num_algo * sizeof(hmac_id));
+}
+
+int
+sctp_verify_hmac_param (struct sctp_auth_hmac_algo *hmacs, uint32_t num_hmacs)
+{
+	uint32_t i;
+
+	for (i = 0; i < num_hmacs; i++) {
+		if (ntohs(hmacs->hmac_ids[i]) == SCTP_AUTH_HMAC_ID_SHA1) {
+			return (0);
+		}
+	}
+	return (-1);
+}
+
+sctp_authinfo_t *
+sctp_alloc_authinfo(void)
+{
+	sctp_authinfo_t *new_authinfo;
+
+	SCTP_MALLOC(new_authinfo, sctp_authinfo_t *, sizeof(*new_authinfo),
+		    SCTP_M_AUTH_IF);
+
+	if (new_authinfo == NULL) {
+		/* out of memory */
+		return (NULL);
+	}
+	bzero(new_authinfo, sizeof(*new_authinfo));
+	return (new_authinfo);
+}
+
+void
+sctp_free_authinfo(sctp_authinfo_t *authinfo)
+{
+	if (authinfo == NULL)
+		return;
+
+	if (authinfo->random != NULL)
+		sctp_free_key(authinfo->random);
+	if (authinfo->peer_random != NULL)
+		sctp_free_key(authinfo->peer_random);
+	if (authinfo->assoc_key != NULL)
+		sctp_free_key(authinfo->assoc_key);
+	if (authinfo->recv_key != NULL)
+		sctp_free_key(authinfo->recv_key);
+
+	/* We are NOT dynamically allocating authinfo's right now... */
+	/* SCTP_FREE(authinfo, SCTP_M_AUTH_??); */
+}
+
+
+uint32_t
+sctp_get_auth_chunk_len(uint16_t hmac_algo)
+{
+	int size;
+
+	size = sizeof(struct sctp_auth_chunk) + sctp_get_hmac_digest_len(hmac_algo);
+	return (SCTP_SIZE32(size));
+}
+
+uint32_t
+sctp_get_hmac_digest_len(uint16_t hmac_algo)
+{
+	switch (hmac_algo) {
+	case SCTP_AUTH_HMAC_ID_SHA1:
+		return (SCTP_AUTH_DIGEST_LEN_SHA1);
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+	case SCTP_AUTH_HMAC_ID_SHA256:
+		return (SCTP_AUTH_DIGEST_LEN_SHA256);
+#endif
+	default:
+		/* unknown HMAC algorithm: can't do anything */
+		return (0);
+	} /* end switch */
+}
+
+static inline int
+sctp_get_hmac_block_len(uint16_t hmac_algo)
+{
+	switch (hmac_algo) {
+	case SCTP_AUTH_HMAC_ID_SHA1:
+		return (64);
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+	case SCTP_AUTH_HMAC_ID_SHA256:
+		return (64);
+#endif
+	case SCTP_AUTH_HMAC_ID_RSVD:
+	default:
+		/* unknown HMAC algorithm: can't do anything */
+		return (0);
+	} /* end switch */
+}
+
+#if defined(__Userspace__)
+/* __Userspace__ SHA1_Init is defined in libcrypto.a (libssl-dev on Ubuntu) */
+#endif
+static void
+sctp_hmac_init(uint16_t hmac_algo, sctp_hash_context_t *ctx)
+{
+	switch (hmac_algo) {
+	case SCTP_AUTH_HMAC_ID_SHA1:
+		SCTP_SHA1_INIT(&ctx->sha1);
+		break;
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+	case SCTP_AUTH_HMAC_ID_SHA256:
+		SCTP_SHA256_INIT(&ctx->sha256);
+		break;
+#endif
+	case SCTP_AUTH_HMAC_ID_RSVD:
+	default:
+		/* unknown HMAC algorithm: can't do anything */
+		return;
+	} /* end switch */
+}
+
+static void
+sctp_hmac_update(uint16_t hmac_algo, sctp_hash_context_t *ctx,
+    uint8_t *text, uint32_t textlen)
+{
+	switch (hmac_algo) {
+	case SCTP_AUTH_HMAC_ID_SHA1:
+		SCTP_SHA1_UPDATE(&ctx->sha1, text, textlen);
+		break;
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+	case SCTP_AUTH_HMAC_ID_SHA256:
+		SCTP_SHA256_UPDATE(&ctx->sha256, text, textlen);
+		break;
+#endif
+	case SCTP_AUTH_HMAC_ID_RSVD:
+	default:
+		/* unknown HMAC algorithm: can't do anything */
+		return;
+	} /* end switch */
+}
+
+static void
+sctp_hmac_final(uint16_t hmac_algo, sctp_hash_context_t *ctx,
+    uint8_t *digest)
+{
+	switch (hmac_algo) {
+	case SCTP_AUTH_HMAC_ID_SHA1:
+		SCTP_SHA1_FINAL(digest, &ctx->sha1);
+		break;
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+	case SCTP_AUTH_HMAC_ID_SHA256:
+		SCTP_SHA256_FINAL(digest, &ctx->sha256);
+		break;
+#endif
+	case SCTP_AUTH_HMAC_ID_RSVD:
+	default:
+		/* unknown HMAC algorithm: can't do anything */
+		return;
+	} /* end switch */
+}
+
+/*-
+ * Keyed-Hashing for Message Authentication: FIPS 198 (RFC 2104)
+ *
+ * Compute the HMAC digest using the desired hash key, text, and HMAC
+ * algorithm.  Resulting digest is placed in 'digest' and digest length
+ * is returned, if the HMAC was performed.
+ *
+ * WARNING: it is up to the caller to supply sufficient space to hold the
+ * resultant digest.
+ */
+uint32_t
+sctp_hmac(uint16_t hmac_algo, uint8_t *key, uint32_t keylen,
+    uint8_t *text, uint32_t textlen, uint8_t *digest)
+{
+	uint32_t digestlen;
+	uint32_t blocklen;
+	sctp_hash_context_t ctx;
+	uint8_t ipad[128], opad[128];	/* keyed hash inner/outer pads */
+	uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+	uint32_t i;
+
+	/* sanity check the material and length */
+	if ((key == NULL) || (keylen == 0) || (text == NULL) ||
+	    (textlen == 0) || (digest == NULL)) {
+		/* can't do HMAC with empty key or text or digest store */
+		return (0);
+	}
+	/* validate the hmac algo and get the digest length */
+	digestlen = sctp_get_hmac_digest_len(hmac_algo);
+	if (digestlen == 0)
+		return (0);
+
+	/* hash the key if it is longer than the hash block size */
+	blocklen = sctp_get_hmac_block_len(hmac_algo);
+	if (keylen > blocklen) {
+		sctp_hmac_init(hmac_algo, &ctx);
+		sctp_hmac_update(hmac_algo, &ctx, key, keylen);
+		sctp_hmac_final(hmac_algo, &ctx, temp);
+		/* set the hashed key as the key */
+		keylen = digestlen;
+		key = temp;
+	}
+	/* initialize the inner/outer pads with the key and "append" zeroes */
+	bzero(ipad, blocklen);
+	bzero(opad, blocklen);
+	bcopy(key, ipad, keylen);
+	bcopy(key, opad, keylen);
+
+	/* XOR the key with ipad and opad values */
+	for (i = 0; i < blocklen; i++) {
+		ipad[i] ^= 0x36;
+		opad[i] ^= 0x5c;
+	}
+
+	/* perform inner hash */
+	sctp_hmac_init(hmac_algo, &ctx);
+	sctp_hmac_update(hmac_algo, &ctx, ipad, blocklen);
+	sctp_hmac_update(hmac_algo, &ctx, text, textlen);
+	sctp_hmac_final(hmac_algo, &ctx, temp);
+
+	/* perform outer hash */
+	sctp_hmac_init(hmac_algo, &ctx);
+	sctp_hmac_update(hmac_algo, &ctx, opad, blocklen);
+	sctp_hmac_update(hmac_algo, &ctx, temp, digestlen);
+	sctp_hmac_final(hmac_algo, &ctx, digest);
+
+	return (digestlen);
+}
+
+/* mbuf version */
+uint32_t
+sctp_hmac_m(uint16_t hmac_algo, uint8_t *key, uint32_t keylen,
+    struct mbuf *m, uint32_t m_offset, uint8_t *digest, uint32_t trailer)
+{
+	uint32_t digestlen;
+	uint32_t blocklen;
+	sctp_hash_context_t ctx;
+	uint8_t ipad[128], opad[128];	/* keyed hash inner/outer pads */
+	uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+	uint32_t i;
+	struct mbuf *m_tmp;
+
+	/* sanity check the material and length */
+	if ((key == NULL) || (keylen == 0) || (m == NULL) || (digest == NULL)) {
+		/* can't do HMAC with empty key or text or digest store */
+		return (0);
+	}
+	/* validate the hmac algo and get the digest length */
+	digestlen = sctp_get_hmac_digest_len(hmac_algo);
+	if (digestlen == 0)
+		return (0);
+
+	/* hash the key if it is longer than the hash block size */
+	blocklen = sctp_get_hmac_block_len(hmac_algo);
+	if (keylen > blocklen) {
+		sctp_hmac_init(hmac_algo, &ctx);
+		sctp_hmac_update(hmac_algo, &ctx, key, keylen);
+		sctp_hmac_final(hmac_algo, &ctx, temp);
+		/* set the hashed key as the key */
+		keylen = digestlen;
+		key = temp;
+	}
+	/* initialize the inner/outer pads with the key and "append" zeroes */
+	bzero(ipad, blocklen);
+	bzero(opad, blocklen);
+	bcopy(key, ipad, keylen);
+	bcopy(key, opad, keylen);
+
+	/* XOR the key with ipad and opad values */
+	for (i = 0; i < blocklen; i++) {
+		ipad[i] ^= 0x36;
+		opad[i] ^= 0x5c;
+	}
+
+	/* perform inner hash */
+	sctp_hmac_init(hmac_algo, &ctx);
+	sctp_hmac_update(hmac_algo, &ctx, ipad, blocklen);
+	/* find the correct starting mbuf and offset (get start of text) */
+	m_tmp = m;
+	while ((m_tmp != NULL) && (m_offset >= (uint32_t) SCTP_BUF_LEN(m_tmp))) {
+		m_offset -= SCTP_BUF_LEN(m_tmp);
+		m_tmp = SCTP_BUF_NEXT(m_tmp);
+	}
+	/* now use the rest of the mbuf chain for the text */
+	while (m_tmp != NULL) {
+		if ((SCTP_BUF_NEXT(m_tmp) == NULL) && trailer) {
+			sctp_hmac_update(hmac_algo, &ctx, mtod(m_tmp, uint8_t *) + m_offset,
+					 SCTP_BUF_LEN(m_tmp) - (trailer+m_offset));
+		} else {
+			sctp_hmac_update(hmac_algo, &ctx, mtod(m_tmp, uint8_t *) + m_offset,
+					 SCTP_BUF_LEN(m_tmp) - m_offset);
+		}
+
+		/* clear the offset since it's only for the first mbuf */
+		m_offset = 0;
+		m_tmp = SCTP_BUF_NEXT(m_tmp);
+	}
+	sctp_hmac_final(hmac_algo, &ctx, temp);
+
+	/* perform outer hash */
+	sctp_hmac_init(hmac_algo, &ctx);
+	sctp_hmac_update(hmac_algo, &ctx, opad, blocklen);
+	sctp_hmac_update(hmac_algo, &ctx, temp, digestlen);
+	sctp_hmac_final(hmac_algo, &ctx, digest);
+
+	return (digestlen);
+}
+
+/*-
+ * verify the HMAC digest using the desired hash key, text, and HMAC
+ * algorithm.
+ * Returns -1 on error, 0 on success.
+ */
+int
+sctp_verify_hmac(uint16_t hmac_algo, uint8_t *key, uint32_t keylen,
+    uint8_t *text, uint32_t textlen,
+    uint8_t *digest, uint32_t digestlen)
+{
+	uint32_t len;
+	uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+
+	/* sanity check the material and length */
+	if ((key == NULL) || (keylen == 0) ||
+	    (text == NULL) || (textlen == 0) || (digest == NULL)) {
+		/* can't do HMAC with empty key or text or digest */
+		return (-1);
+	}
+	len = sctp_get_hmac_digest_len(hmac_algo);
+	if ((len == 0) || (digestlen != len))
+		return (-1);
+
+	/* compute the expected hash */
+	if (sctp_hmac(hmac_algo, key, keylen, text, textlen, temp) != len)
+		return (-1);
+
+	if (memcmp(digest, temp, digestlen) != 0)
+		return (-1);
+	else
+		return (0);
+}
+
+
+/*
+ * computes the requested HMAC using a key struct (which may be modified if
+ * the keylen exceeds the HMAC block len).
+ */
+uint32_t
+sctp_compute_hmac(uint16_t hmac_algo, sctp_key_t *key, uint8_t *text,
+    uint32_t textlen, uint8_t *digest)
+{
+	uint32_t digestlen;
+	uint32_t blocklen;
+	sctp_hash_context_t ctx;
+	uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+
+	/* sanity check */
+	if ((key == NULL) || (text == NULL) || (textlen == 0) ||
+	    (digest == NULL)) {
+		/* can't do HMAC with empty key or text or digest store */
+		return (0);
+	}
+	/* validate the hmac algo and get the digest length */
+	digestlen = sctp_get_hmac_digest_len(hmac_algo);
+	if (digestlen == 0)
+		return (0);
+
+	/* hash the key if it is longer than the hash block size */
+	blocklen = sctp_get_hmac_block_len(hmac_algo);
+	if (key->keylen > blocklen) {
+		sctp_hmac_init(hmac_algo, &ctx);
+		sctp_hmac_update(hmac_algo, &ctx, key->key, key->keylen);
+		sctp_hmac_final(hmac_algo, &ctx, temp);
+		/* save the hashed key as the new key */
+		key->keylen = digestlen;
+		bcopy(temp, key->key, key->keylen);
+	}
+	return (sctp_hmac(hmac_algo, key->key, key->keylen, text, textlen,
+	    digest));
+}
+
+/* mbuf version */
+uint32_t
+sctp_compute_hmac_m(uint16_t hmac_algo, sctp_key_t *key, struct mbuf *m,
+    uint32_t m_offset, uint8_t *digest)
+{
+	uint32_t digestlen;
+	uint32_t blocklen;
+	sctp_hash_context_t ctx;
+	uint8_t temp[SCTP_AUTH_DIGEST_LEN_MAX];
+
+	/* sanity check */
+	if ((key == NULL) || (m == NULL) || (digest == NULL)) {
+		/* can't do HMAC with empty key or text or digest store */
+		return (0);
+	}
+	/* validate the hmac algo and get the digest length */
+	digestlen = sctp_get_hmac_digest_len(hmac_algo);
+	if (digestlen == 0)
+		return (0);
+
+	/* hash the key if it is longer than the hash block size */
+	blocklen = sctp_get_hmac_block_len(hmac_algo);
+	if (key->keylen > blocklen) {
+		sctp_hmac_init(hmac_algo, &ctx);
+		sctp_hmac_update(hmac_algo, &ctx, key->key, key->keylen);
+		sctp_hmac_final(hmac_algo, &ctx, temp);
+		/* save the hashed key as the new key */
+		key->keylen = digestlen;
+		bcopy(temp, key->key, key->keylen);
+	}
+	return (sctp_hmac_m(hmac_algo, key->key, key->keylen, m, m_offset, digest, 0));
+}
+
+int
+sctp_auth_is_supported_hmac(sctp_hmaclist_t *list, uint16_t id)
+{
+	int i;
+
+	if ((list == NULL) || (id == SCTP_AUTH_HMAC_ID_RSVD))
+		return (0);
+
+	for (i = 0; i < list->num_algo; i++)
+		if (list->hmac[i] == id)
+			return (1);
+
+	/* not in the list */
+	return (0);
+}
+
+
+/*-
+ * clear any cached key(s) if they match the given key id on an association.
+ * the cached key(s) will be recomputed and re-cached at next use.
+ * ASSUMES TCB_LOCK is already held
+ */
+void
+sctp_clear_cachedkeys(struct sctp_tcb *stcb, uint16_t keyid)
+{
+	if (stcb == NULL)
+		return;
+
+	if (keyid == stcb->asoc.authinfo.assoc_keyid) {
+		sctp_free_key(stcb->asoc.authinfo.assoc_key);
+		stcb->asoc.authinfo.assoc_key = NULL;
+	}
+	if (keyid == stcb->asoc.authinfo.recv_keyid) {
+		sctp_free_key(stcb->asoc.authinfo.recv_key);
+		stcb->asoc.authinfo.recv_key = NULL;
+	}
+}
+
+/*-
+ * clear any cached key(s) if they match the given key id for all assocs on
+ * an endpoint.
+ * ASSUMES INP_WLOCK is already held
+ */
+void
+sctp_clear_cachedkeys_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+	struct sctp_tcb *stcb;
+
+	if (inp == NULL)
+		return;
+
+	/* clear the cached keys on all assocs on this instance */
+	LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+		SCTP_TCB_LOCK(stcb);
+		sctp_clear_cachedkeys(stcb, keyid);
+		SCTP_TCB_UNLOCK(stcb);
+	}
+}
+
+/*-
+ * delete a shared key from an association
+ * ASSUMES TCB_LOCK is already held
+ */
+int
+sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
+{
+	sctp_sharedkey_t *skey;
+
+	if (stcb == NULL)
+		return (-1);
+
+	/* is the keyid the assoc active sending key */
+	if (keyid == stcb->asoc.authinfo.active_keyid)
+		return (-1);
+
+	/* does the key exist? */
+	skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+	if (skey == NULL)
+		return (-1);
+
+	/* are there other refcount holders on the key? */
+	if (skey->refcount > 1)
+		return (-1);
+
+	/* remove it */
+	LIST_REMOVE(skey, next);
+	sctp_free_sharedkey(skey);	/* frees skey->key as well */
+
+	/* clear any cached keys */
+	sctp_clear_cachedkeys(stcb, keyid);
+	return (0);
+}
+
+/*-
+ * deletes a shared key from the endpoint
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_delete_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+	sctp_sharedkey_t *skey;
+
+	if (inp == NULL)
+		return (-1);
+
+	/* is the keyid the active sending key on the endpoint */
+	if (keyid == inp->sctp_ep.default_keyid)
+		return (-1);
+
+	/* does the key exist? */
+	skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
+	if (skey == NULL)
+		return (-1);
+
+	/* endpoint keys are not refcounted */
+
+	/* remove it */
+	LIST_REMOVE(skey, next);
+	sctp_free_sharedkey(skey);	/* frees skey->key as well */
+
+	/* clear any cached keys */
+	sctp_clear_cachedkeys_ep(inp, keyid);
+	return (0);
+}
+
+/*-
+ * set the active key on an association
+ * ASSUMES TCB_LOCK is already held
+ */
+int
+sctp_auth_setactivekey(struct sctp_tcb *stcb, uint16_t keyid)
+{
+	sctp_sharedkey_t *skey = NULL;
+
+	/* find the key on the assoc */
+	skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+	if (skey == NULL) {
+		/* that key doesn't exist */
+		return (-1);
+	}
+	if ((skey->deactivated) && (skey->refcount > 1)) {
+		/* can't reactivate a deactivated key with other refcounts */
+		return (-1);
+	}
+
+	/* set the (new) active key */
+	stcb->asoc.authinfo.active_keyid = keyid;
+	/* reset the deactivated flag */
+	skey->deactivated = 0;
+
+	return (0);
+}
+
+/*-
+ * set the active key on an endpoint
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_auth_setactivekey_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+	sctp_sharedkey_t *skey;
+
+	/* find the key */
+	skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
+	if (skey == NULL) {
+		/* that key doesn't exist */
+		return (-1);
+	}
+	inp->sctp_ep.default_keyid = keyid;
+	return (0);
+}
+
+/*-
+ * deactivates a shared key from the association
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_deact_sharedkey(struct sctp_tcb *stcb, uint16_t keyid)
+{
+	sctp_sharedkey_t *skey;
+
+	if (stcb == NULL)
+		return (-1);
+
+	/* is the keyid the assoc active sending key */
+	if (keyid == stcb->asoc.authinfo.active_keyid)
+		return (-1);
+
+	/* does the key exist? */
+	skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+	if (skey == NULL)
+		return (-1);
+
+	/* are there other refcount holders on the key? */
+	if (skey->refcount == 1) {
+		/* no other users, send a notification for this key */
+		sctp_ulp_notify(SCTP_NOTIFY_AUTH_FREE_KEY, stcb, keyid, 0,
+				SCTP_SO_LOCKED);
+	}
+
+	/* mark the key as deactivated */
+	skey->deactivated = 1;
+
+	return (0);
+}
+
+/*-
+ * deactivates a shared key from the endpoint
+ * ASSUMES INP_WLOCK is already held
+ */
+int
+sctp_deact_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid)
+{
+	sctp_sharedkey_t *skey;
+
+	if (inp == NULL)
+		return (-1);
+
+	/* is the keyid the active sending key on the endpoint */
+	if (keyid == inp->sctp_ep.default_keyid)
+		return (-1);
+
+	/* does the key exist? */
+	skey = sctp_find_sharedkey(&inp->sctp_ep.shared_keys, keyid);
+	if (skey == NULL)
+		return (-1);
+
+	/* endpoint keys are not refcounted */
+
+	/* remove it */
+	LIST_REMOVE(skey, next);
+	sctp_free_sharedkey(skey);	/* frees skey->key as well */
+
+	return (0);
+}
+
+/*
+ * get local authentication parameters from cookie (from INIT-ACK)
+ */
+void
+sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
+    uint32_t offset, uint32_t length)
+{
+	struct sctp_paramhdr *phdr, tmp_param;
+	uint16_t plen, ptype;
+	uint8_t random_store[SCTP_PARAM_BUFFER_SIZE];
+	struct sctp_auth_random *p_random = NULL;
+	uint16_t random_len = 0;
+	uint8_t hmacs_store[SCTP_PARAM_BUFFER_SIZE];
+	struct sctp_auth_hmac_algo *hmacs = NULL;
+	uint16_t hmacs_len = 0;
+	uint8_t chunks_store[SCTP_PARAM_BUFFER_SIZE];
+	struct sctp_auth_chunk_list *chunks = NULL;
+	uint16_t num_chunks = 0;
+	sctp_key_t *new_key;
+	uint32_t keylen;
+
+	/* convert to upper bound */
+	length += offset;
+
+	phdr = (struct sctp_paramhdr *)sctp_m_getptr(m, offset,
+	    sizeof(struct sctp_paramhdr), (uint8_t *)&tmp_param);
+	while (phdr != NULL) {
+		ptype = ntohs(phdr->param_type);
+		plen = ntohs(phdr->param_length);
+
+		if ((plen == 0) || (offset + plen > length))
+			break;
+
+		if (ptype == SCTP_RANDOM) {
+			if (plen > sizeof(random_store))
+				break;
+			phdr = sctp_get_next_param(m, offset,
+			    (struct sctp_paramhdr *)random_store, min(plen, sizeof(random_store)));
+			if (phdr == NULL)
+				return;
+			/* save the random and length for the key */
+			p_random = (struct sctp_auth_random *)phdr;
+			random_len = plen - sizeof(*p_random);
+		} else if (ptype == SCTP_HMAC_LIST) {
+			uint16_t num_hmacs;
+			uint16_t i;
+
+			if (plen > sizeof(hmacs_store))
+				break;
+			phdr = sctp_get_next_param(m, offset,
+			    (struct sctp_paramhdr *)hmacs_store, min(plen,sizeof(hmacs_store)));
+			if (phdr == NULL)
+				return;
+			/* save the hmacs list and num for the key */
+			hmacs = (struct sctp_auth_hmac_algo *)phdr;
+			hmacs_len = plen - sizeof(*hmacs);
+			num_hmacs = hmacs_len / sizeof(hmacs->hmac_ids[0]);
+			if (stcb->asoc.local_hmacs != NULL)
+				sctp_free_hmaclist(stcb->asoc.local_hmacs);
+			stcb->asoc.local_hmacs = sctp_alloc_hmaclist(num_hmacs);
+			if (stcb->asoc.local_hmacs != NULL) {
+				for (i = 0; i < num_hmacs; i++) {
+					(void)sctp_auth_add_hmacid(stcb->asoc.local_hmacs,
+					    ntohs(hmacs->hmac_ids[i]));
+				}
+			}
+		} else if (ptype == SCTP_CHUNK_LIST) {
+			int i;
+
+			if (plen > sizeof(chunks_store))
+				break;
+			phdr = sctp_get_next_param(m, offset,
+			    (struct sctp_paramhdr *)chunks_store, min(plen,sizeof(chunks_store)));
+			if (phdr == NULL)
+				return;
+			chunks = (struct sctp_auth_chunk_list *)phdr;
+			num_chunks = plen - sizeof(*chunks);
+			/* save chunks list and num for the key */
+			if (stcb->asoc.local_auth_chunks != NULL)
+				sctp_clear_chunklist(stcb->asoc.local_auth_chunks);
+			else
+				stcb->asoc.local_auth_chunks = sctp_alloc_chunklist();
+			for (i = 0; i < num_chunks; i++) {
+				(void)sctp_auth_add_chunk(chunks->chunk_types[i],
+				    stcb->asoc.local_auth_chunks);
+			}
+		}
+		/* get next parameter */
+		offset += SCTP_SIZE32(plen);
+		if (offset + sizeof(struct sctp_paramhdr) > length)
+			break;
+		phdr = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(struct sctp_paramhdr),
+		    (uint8_t *)&tmp_param);
+	}
+	/* concatenate the full random key */
+	keylen = sizeof(*p_random) + random_len + sizeof(*hmacs) + hmacs_len;
+	if (chunks != NULL) {
+		keylen += sizeof(*chunks) + num_chunks;
+	}
+	new_key = sctp_alloc_key(keylen);
+	if (new_key != NULL) {
+	    /* copy in the RANDOM */
+	    if (p_random != NULL) {
+		keylen = sizeof(*p_random) + random_len;
+		bcopy(p_random, new_key->key, keylen);
+	    }
+	    /* append in the AUTH chunks */
+	    if (chunks != NULL) {
+		bcopy(chunks, new_key->key + keylen,
+		      sizeof(*chunks) + num_chunks);
+		keylen += sizeof(*chunks) + num_chunks;
+	    }
+	    /* append in the HMACs */
+	    if (hmacs != NULL) {
+		bcopy(hmacs, new_key->key + keylen,
+		      sizeof(*hmacs) + hmacs_len);
+	    }
+	}
+	if (stcb->asoc.authinfo.random != NULL)
+		sctp_free_key(stcb->asoc.authinfo.random);
+	stcb->asoc.authinfo.random = new_key;
+	stcb->asoc.authinfo.random_len = random_len;
+	sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.assoc_keyid);
+	sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.recv_keyid);
+
+	/* negotiate what HMAC to use for the peer */
+	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
+	    stcb->asoc.local_hmacs);
+
+	/* copy defaults from the endpoint */
+	/* FIX ME: put in cookie? */
+	stcb->asoc.authinfo.active_keyid = stcb->sctp_ep->sctp_ep.default_keyid;
+	/* copy out the shared key list (by reference) from the endpoint */
+	(void)sctp_copy_skeylist(&stcb->sctp_ep->sctp_ep.shared_keys,
+				 &stcb->asoc.shared_keys);
+}
+
+/*
+ * compute and fill in the HMAC digest for a packet
+ */
+void
+sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
+    struct sctp_auth_chunk *auth, struct sctp_tcb *stcb, uint16_t keyid)
+{
+	uint32_t digestlen;
+	sctp_sharedkey_t *skey;
+	sctp_key_t *key;
+
+	if ((stcb == NULL) || (auth == NULL))
+		return;
+
+	/* zero the digest + chunk padding */
+	digestlen = sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
+	bzero(auth->hmac, SCTP_SIZE32(digestlen));
+
+	/* is the desired key cached? */
+	if ((keyid != stcb->asoc.authinfo.assoc_keyid) ||
+	    (stcb->asoc.authinfo.assoc_key == NULL)) {
+		if (stcb->asoc.authinfo.assoc_key != NULL) {
+			/* free the old cached key */
+			sctp_free_key(stcb->asoc.authinfo.assoc_key);
+		}
+		skey = sctp_find_sharedkey(&stcb->asoc.shared_keys, keyid);
+		/* the only way skey is NULL is if null key id 0 is used */
+		if (skey != NULL)
+			key = skey->key;
+		else
+			key = NULL;
+		/* compute a new assoc key and cache it */
+		stcb->asoc.authinfo.assoc_key =
+		    sctp_compute_hashkey(stcb->asoc.authinfo.random,
+					 stcb->asoc.authinfo.peer_random, key);
+		stcb->asoc.authinfo.assoc_keyid = keyid;
+		SCTPDBG(SCTP_DEBUG_AUTH1, "caching key id %u\n",
+			stcb->asoc.authinfo.assoc_keyid);
+#ifdef SCTP_DEBUG
+		if (SCTP_AUTH_DEBUG)
+			sctp_print_key(stcb->asoc.authinfo.assoc_key,
+				       "Assoc Key");
+#endif
+	}
+
+	/* set in the active key id */
+	auth->shared_key_id = htons(keyid);
+
+	/* compute and fill in the digest */
+	(void)sctp_compute_hmac_m(stcb->asoc.peer_hmac_id, stcb->asoc.authinfo.assoc_key,
+				  m, auth_offset, auth->hmac);
+}
+
+
+static void
+sctp_bzero_m(struct mbuf *m, uint32_t m_offset, uint32_t size)
+{
+	struct mbuf *m_tmp;
+	uint8_t *data;
+
+	/* sanity check */
+	if (m == NULL)
+		return;
+
+	/* find the correct starting mbuf and offset (get start position) */
+	m_tmp = m;
+	while ((m_tmp != NULL) && (m_offset >= (uint32_t) SCTP_BUF_LEN(m_tmp))) {
+		m_offset -= SCTP_BUF_LEN(m_tmp);
+		m_tmp = SCTP_BUF_NEXT(m_tmp);
+	}
+	/* now use the rest of the mbuf chain */
+	while ((m_tmp != NULL) && (size > 0)) {
+		data = mtod(m_tmp, uint8_t *) + m_offset;
+		if (size > (uint32_t) SCTP_BUF_LEN(m_tmp)) {
+			bzero(data, SCTP_BUF_LEN(m_tmp));
+			size -= SCTP_BUF_LEN(m_tmp);
+		} else {
+			bzero(data, size);
+			size = 0;
+		}
+		/* clear the offset since it's only for the first mbuf */
+		m_offset = 0;
+		m_tmp = SCTP_BUF_NEXT(m_tmp);
+	}
+}
+
+/*-
+ * process the incoming Authentication chunk
+ * return codes:
+ *   -1 on any authentication error
+ *    0 on authentication verification
+ */
+int
+sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *auth,
+    struct mbuf *m, uint32_t offset)
+{
+	uint16_t chunklen;
+	uint16_t shared_key_id;
+	uint16_t hmac_id;
+	sctp_sharedkey_t *skey;
+	uint32_t digestlen;
+	uint8_t digest[SCTP_AUTH_DIGEST_LEN_MAX];
+	uint8_t computed_digest[SCTP_AUTH_DIGEST_LEN_MAX];
+
+	/* auth is checked for NULL by caller */
+	chunklen = ntohs(auth->ch.chunk_length);
+	if (chunklen < sizeof(*auth)) {
+		SCTP_STAT_INCR(sctps_recvauthfailed);
+		return (-1);
+	}
+	SCTP_STAT_INCR(sctps_recvauth);
+
+	/* get the auth params */
+	shared_key_id = ntohs(auth->shared_key_id);
+	hmac_id = ntohs(auth->hmac_id);
+	SCTPDBG(SCTP_DEBUG_AUTH1,
+		"SCTP AUTH Chunk: shared key %u, HMAC id %u\n",
+		shared_key_id, hmac_id);
+
+	/* is the indicated HMAC supported? */
+	if (!sctp_auth_is_supported_hmac(stcb->asoc.local_hmacs, hmac_id)) {
+		struct mbuf *op_err;
+		struct sctp_error_auth_invalid_hmac *cause;
+
+		SCTP_STAT_INCR(sctps_recvivalhmacid);
+		SCTPDBG(SCTP_DEBUG_AUTH1,
+			"SCTP Auth: unsupported HMAC id %u\n",
+			hmac_id);
+		/*
+		 * report this in an Error Chunk: Unsupported HMAC
+		 * Identifier
+		 */
+		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_auth_invalid_hmac),
+		                               0, M_NOWAIT, 1, MT_HEADER);
+		if (op_err != NULL) {
+			/* pre-reserve some space */
+			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
+			/* fill in the error */
+			cause = mtod(op_err, struct sctp_error_auth_invalid_hmac *);
+			cause->cause.code = htons(SCTP_CAUSE_UNSUPPORTED_HMACID);
+			cause->cause.length = htons(sizeof(struct sctp_error_auth_invalid_hmac));
+			cause->hmac_id = ntohs(hmac_id);
+			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_auth_invalid_hmac);
+			/* queue it */
+			sctp_queue_op_err(stcb, op_err);
+		}
+		return (-1);
+	}
+	/* get the indicated shared key, if available */
+	if ((stcb->asoc.authinfo.recv_key == NULL) ||
+	    (stcb->asoc.authinfo.recv_keyid != shared_key_id)) {
+		/* find the shared key on the assoc first */
+		skey = sctp_find_sharedkey(&stcb->asoc.shared_keys,
+					   shared_key_id);
+		/* if the shared key isn't found, discard the chunk */
+		if (skey == NULL) {
+			SCTP_STAT_INCR(sctps_recvivalkeyid);
+			SCTPDBG(SCTP_DEBUG_AUTH1,
+				"SCTP Auth: unknown key id %u\n",
+				shared_key_id);
+			return (-1);
+		}
+		/* generate a notification if this is a new key id */
+		if (stcb->asoc.authinfo.recv_keyid != shared_key_id)
+			/*
+			 * sctp_ulp_notify(SCTP_NOTIFY_AUTH_NEW_KEY, stcb,
+			 * shared_key_id, (void
+			 * *)stcb->asoc.authinfo.recv_keyid);
+			 */
+			sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY,
+			    shared_key_id, stcb->asoc.authinfo.recv_keyid,
+			    SCTP_SO_NOT_LOCKED);
+		/* compute a new recv assoc key and cache it */
+		if (stcb->asoc.authinfo.recv_key != NULL)
+			sctp_free_key(stcb->asoc.authinfo.recv_key);
+		stcb->asoc.authinfo.recv_key =
+		    sctp_compute_hashkey(stcb->asoc.authinfo.random,
+		    stcb->asoc.authinfo.peer_random, skey->key);
+		stcb->asoc.authinfo.recv_keyid = shared_key_id;
+#ifdef SCTP_DEBUG
+		if (SCTP_AUTH_DEBUG)
+			sctp_print_key(stcb->asoc.authinfo.recv_key, "Recv Key");
+#endif
+	}
+	/* validate the digest length */
+	digestlen = sctp_get_hmac_digest_len(hmac_id);
+	if (chunklen < (sizeof(*auth) + digestlen)) {
+		/* invalid digest length */
+		SCTP_STAT_INCR(sctps_recvauthfailed);
+		SCTPDBG(SCTP_DEBUG_AUTH1,
+			"SCTP Auth: chunk too short for HMAC\n");
+		return (-1);
+	}
+	/* save a copy of the digest, zero the pseudo header, and validate */
+	bcopy(auth->hmac, digest, digestlen);
+	sctp_bzero_m(m, offset + sizeof(*auth), SCTP_SIZE32(digestlen));
+	(void)sctp_compute_hmac_m(hmac_id, stcb->asoc.authinfo.recv_key,
+	    m, offset, computed_digest);
+
+	/* compare the computed digest with the one in the AUTH chunk */
+	if (memcmp(digest, computed_digest, digestlen) != 0) {
+		SCTP_STAT_INCR(sctps_recvauthfailed);
+		SCTPDBG(SCTP_DEBUG_AUTH1,
+			"SCTP Auth: HMAC digest check failed\n");
+		return (-1);
+	}
+	return (0);
+}
+
+/*
+ * Generate NOTIFICATION
+ */
+void
+sctp_notify_authentication(struct sctp_tcb *stcb, uint32_t indication,
+			   uint16_t keyid, uint16_t alt_keyid, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+	SCTP_UNUSED
+#endif
+)
+{
+	struct mbuf *m_notify;
+	struct sctp_authkey_event *auth;
+	struct sctp_queued_to_read *control;
+
+	if ((stcb == NULL) ||
+	   (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+	   (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+	   (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
+		) {
+		/* If the socket is gone we are out of here */
+		return;
+	}
+
+	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_AUTHEVNT))
+		/* event not enabled */
+		return;
+
+	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_authkey_event),
+					  0, M_NOWAIT, 1, MT_HEADER);
+	if (m_notify == NULL)
+		/* no space left */
+		return;
+
+	SCTP_BUF_LEN(m_notify) = 0;
+	auth = mtod(m_notify, struct sctp_authkey_event *);
+	memset(auth, 0, sizeof(struct sctp_authkey_event));
+	auth->auth_type = SCTP_AUTHENTICATION_EVENT;
+	auth->auth_flags = 0;
+	auth->auth_length = sizeof(*auth);
+	auth->auth_keynumber = keyid;
+	auth->auth_altkeynumber = alt_keyid;
+	auth->auth_indication = indication;
+	auth->auth_assoc_id = sctp_get_associd(stcb);
+
+	SCTP_BUF_LEN(m_notify) = sizeof(*auth);
+	SCTP_BUF_NEXT(m_notify) = NULL;
+
+	/* append to socket */
+	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+	    0, 0, stcb->asoc.context, 0, 0, 0, m_notify);
+	if (control == NULL) {
+		/* no memory */
+		sctp_m_freem(m_notify);
+		return;
+	}
+	control->spec_flags = M_NOTIFICATION;
+	control->length = SCTP_BUF_LEN(m_notify);
+	/* not that we need this */
+	control->tail_mbuf = m_notify;
+	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
+	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
+}
+
+
+/*-
+ * validates the AUTHentication related parameters in an INIT/INIT-ACK
+ * Note: currently only used for INIT as INIT-ACK is handled inline
+ * with sctp_load_addresses_from_init()
+ */
+int
+sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit)
+{
+	struct sctp_paramhdr *phdr, parm_buf;
+	uint16_t ptype, plen;
+	int peer_supports_asconf = 0;
+	int peer_supports_auth = 0;
+	int got_random = 0, got_hmacs = 0, got_chklist = 0;
+	uint8_t saw_asconf = 0;
+	uint8_t saw_asconf_ack = 0;
+
+	/* go through each of the params. */
+	phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
+	while (phdr) {
+		ptype = ntohs(phdr->param_type);
+		plen = ntohs(phdr->param_length);
+
+		if (offset + plen > limit) {
+			break;
+		}
+		if (plen < sizeof(struct sctp_paramhdr)) {
+			break;
+		}
+		if (ptype == SCTP_SUPPORTED_CHUNK_EXT) {
+			/* A supported extension chunk */
+			struct sctp_supported_chunk_types_param *pr_supported;
+			uint8_t local_store[SCTP_PARAM_BUFFER_SIZE];
+			int num_ent, i;
+
+			phdr = sctp_get_next_param(m, offset,
+			    (struct sctp_paramhdr *)&local_store, min(plen,sizeof(local_store)));
+			if (phdr == NULL) {
+				return (-1);
+			}
+			pr_supported = (struct sctp_supported_chunk_types_param *)phdr;
+			num_ent = plen - sizeof(struct sctp_paramhdr);
+			for (i = 0; i < num_ent; i++) {
+				switch (pr_supported->chunk_types[i]) {
+				case SCTP_ASCONF:
+				case SCTP_ASCONF_ACK:
+					peer_supports_asconf = 1;
+					break;
+				default:
+					/* one we don't care about */
+					break;
+				}
+			}
+		} else if (ptype == SCTP_RANDOM) {
+			got_random = 1;
+			/* enforce the random length */
+			if (plen != (sizeof(struct sctp_auth_random) +
+				     SCTP_AUTH_RANDOM_SIZE_REQUIRED)) {
+				SCTPDBG(SCTP_DEBUG_AUTH1,
+					"SCTP: invalid RANDOM len\n");
+				return (-1);
+			}
+		} else if (ptype == SCTP_HMAC_LIST) {
+			uint8_t store[SCTP_PARAM_BUFFER_SIZE];
+			struct sctp_auth_hmac_algo *hmacs;
+			int num_hmacs;
+
+			if (plen > sizeof(store))
+				break;
+			phdr = sctp_get_next_param(m, offset,
+			    (struct sctp_paramhdr *)store, min(plen,sizeof(store)));
+			if (phdr == NULL)
+				return (-1);
+			hmacs = (struct sctp_auth_hmac_algo *)phdr;
+			num_hmacs = (plen - sizeof(*hmacs)) /
+			    sizeof(hmacs->hmac_ids[0]);
+			/* validate the hmac list */
+			if (sctp_verify_hmac_param(hmacs, num_hmacs)) {
+				SCTPDBG(SCTP_DEBUG_AUTH1,
+					"SCTP: invalid HMAC param\n");
+				return (-1);
+			}
+			got_hmacs = 1;
+		} else if (ptype == SCTP_CHUNK_LIST) {
+			int i, num_chunks;
+			uint8_t chunks_store[SCTP_SMALL_CHUNK_STORE];
+			/* did the peer send a non-empty chunk list? */
+			struct sctp_auth_chunk_list *chunks = NULL;
+			phdr = sctp_get_next_param(m, offset,
+						   (struct sctp_paramhdr *)chunks_store,
+						   min(plen,sizeof(chunks_store)));
+			if (phdr == NULL)
+				return (-1);
+
+			/*-
+			 * Flip through the list and mark that the
+			 * peer supports asconf/asconf_ack.
+			 */
+			chunks = (struct sctp_auth_chunk_list *)phdr;
+			num_chunks = plen - sizeof(*chunks);
+			for (i = 0; i < num_chunks; i++) {
+				/* record asconf/asconf-ack if listed */
+				if (chunks->chunk_types[i] == SCTP_ASCONF)
+					saw_asconf = 1;
+				if (chunks->chunk_types[i] == SCTP_ASCONF_ACK)
+					saw_asconf_ack = 1;
+
+			}
+			if (num_chunks)
+				got_chklist = 1;
+		}
+
+		offset += SCTP_SIZE32(plen);
+		if (offset >= limit) {
+			break;
+		}
+		phdr = sctp_get_next_param(m, offset, &parm_buf,
+		    sizeof(parm_buf));
+	}
+	/* validate authentication required parameters */
+	if (got_random && got_hmacs) {
+		peer_supports_auth = 1;
+	} else {
+		peer_supports_auth = 0;
+	}
+	if (!peer_supports_auth && got_chklist) {
+		SCTPDBG(SCTP_DEBUG_AUTH1,
+			"SCTP: peer sent chunk list w/o AUTH\n");
+		return (-1);
+	}
+	if (peer_supports_asconf && !peer_supports_auth) {
+		SCTPDBG(SCTP_DEBUG_AUTH1,
+			"SCTP: peer supports ASCONF but not AUTH\n");
+		return (-1);
+	} else if ((peer_supports_asconf) && (peer_supports_auth) &&
+		   ((saw_asconf == 0) || (saw_asconf_ack == 0))) {
+		return (-2);
+	}
+	return (0);
+}
+
+void
+sctp_initialize_auth_params(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
+{
+	uint16_t chunks_len = 0;
+	uint16_t hmacs_len = 0;
+	uint16_t random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
+	sctp_key_t *new_key;
+	uint16_t keylen;
+
+	/* initialize hmac list from endpoint */
+	stcb->asoc.local_hmacs = sctp_copy_hmaclist(inp->sctp_ep.local_hmacs);
+	if (stcb->asoc.local_hmacs != NULL) {
+		hmacs_len = stcb->asoc.local_hmacs->num_algo *
+		    sizeof(stcb->asoc.local_hmacs->hmac[0]);
+	}
+	/* initialize auth chunks list from endpoint */
+	stcb->asoc.local_auth_chunks =
+	    sctp_copy_chunklist(inp->sctp_ep.local_auth_chunks);
+	if (stcb->asoc.local_auth_chunks != NULL) {
+		int i;
+		for (i = 0; i < 256; i++) {
+			if (stcb->asoc.local_auth_chunks->chunks[i])
+				chunks_len++;
+		}
+	}
+	/* copy defaults from the endpoint */
+	stcb->asoc.authinfo.active_keyid = inp->sctp_ep.default_keyid;
+
+	/* copy out the shared key list (by reference) from the endpoint */
+	(void)sctp_copy_skeylist(&inp->sctp_ep.shared_keys,
+				 &stcb->asoc.shared_keys);
+
+	/* now set the concatenated key (random + chunks + hmacs) */
+	/* key includes parameter headers */
+	keylen = (3 * sizeof(struct sctp_paramhdr)) + random_len + chunks_len +
+	    hmacs_len;
+	new_key = sctp_alloc_key(keylen);
+	if (new_key != NULL) {
+		struct sctp_paramhdr *ph;
+		int plen;
+		/* generate and copy in the RANDOM */
+		ph = (struct sctp_paramhdr *)new_key->key;
+		ph->param_type = htons(SCTP_RANDOM);
+		plen = sizeof(*ph) + random_len;
+		ph->param_length = htons(plen);
+		SCTP_READ_RANDOM(new_key->key + sizeof(*ph), random_len);
+		keylen = plen;
+
+		/* append in the AUTH chunks */
+		/* NOTE: currently we always have chunks to list */
+		ph = (struct sctp_paramhdr *)(new_key->key + keylen);
+		ph->param_type = htons(SCTP_CHUNK_LIST);
+		plen = sizeof(*ph) + chunks_len;
+		ph->param_length = htons(plen);
+		keylen += sizeof(*ph);
+		if (stcb->asoc.local_auth_chunks) {
+			int i;
+			for (i = 0; i < 256; i++) {
+				if (stcb->asoc.local_auth_chunks->chunks[i])
+					new_key->key[keylen++] = i;
+			}
+		}
+
+		/* append in the HMACs */
+		ph = (struct sctp_paramhdr *)(new_key->key + keylen);
+		ph->param_type = htons(SCTP_HMAC_LIST);
+		plen = sizeof(*ph) + hmacs_len;
+		ph->param_length = htons(plen);
+		keylen += sizeof(*ph);
+		(void)sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
+					new_key->key + keylen);
+	}
+	if (stcb->asoc.authinfo.random != NULL)
+	    sctp_free_key(stcb->asoc.authinfo.random);
+	stcb->asoc.authinfo.random = new_key;
+	stcb->asoc.authinfo.random_len = random_len;
+}
+
+
+#ifdef SCTP_HMAC_TEST
+/*
+ * HMAC and key concatenation tests
+ */
+static void
+sctp_print_digest(uint8_t *digest, uint32_t digestlen, const char *str)
+{
+	uint32_t i;
+
+	SCTP_PRINTF("\n%s: 0x", str);
+	if (digest == NULL)
+		return;
+
+	for (i = 0; i < digestlen; i++)
+		SCTP_PRINTF("%02x", digest[i]);
+}
+
+static int
+sctp_test_hmac(const char *str, uint16_t hmac_id, uint8_t *key,
+    uint32_t keylen, uint8_t *text, uint32_t textlen,
+    uint8_t *digest, uint32_t digestlen)
+{
+	uint8_t computed_digest[SCTP_AUTH_DIGEST_LEN_MAX];
+
+	SCTP_PRINTF("\n%s:", str);
+	sctp_hmac(hmac_id, key, keylen, text, textlen, computed_digest);
+	sctp_print_digest(digest, digestlen, "Expected digest");
+	sctp_print_digest(computed_digest, digestlen, "Computed digest");
+	if (memcmp(digest, computed_digest, digestlen) != 0) {
+		SCTP_PRINTF("\nFAILED");
+		return (-1);
+	} else {
+		SCTP_PRINTF("\nPASSED");
+		return (0);
+	}
+}
+
+
+/*
+ * RFC 2202: HMAC-SHA1 test cases
+ */
+void
+sctp_test_hmac_sha1(void)
+{
+	uint8_t *digest;
+	uint8_t key[128];
+	uint32_t keylen;
+	uint8_t text[128];
+	uint32_t textlen;
+	uint32_t digestlen = 20;
+	int failed = 0;
+
+	/*-
+	 * test_case =     1
+	 * key =           0x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b
+	 * key_len =       20
+	 * data =          "Hi There"
+	 * data_len =      8
+	 * digest =        0xb617318655057264e28bc0b6fb378c8ef146be00
+	 */
+	keylen = 20;
+	memset(key, 0x0b, keylen);
+	textlen = 8;
+	strcpy(text, "Hi There");
+	digest = "\xb6\x17\x31\x86\x55\x05\x72\x64\xe2\x8b\xc0\xb6\xfb\x37\x8c\x8e\xf1\x46\xbe\x00";
+	if (sctp_test_hmac("SHA1 test case 1", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+	    text, textlen, digest, digestlen) < 0)
+		failed++;
+
+	/*-
+	 * test_case =     2
+	 * key =           "Jefe"
+	 * key_len =       4
+	 * data =          "what do ya want for nothing?"
+	 * data_len =      28
+	 * digest =        0xeffcdf6ae5eb2fa2d27416d5f184df9c259a7c79
+	 */
+	keylen = 4;
+	strcpy(key, "Jefe");
+	textlen = 28;
+	strcpy(text, "what do ya want for nothing?");
+	digest = "\xef\xfc\xdf\x6a\xe5\xeb\x2f\xa2\xd2\x74\x16\xd5\xf1\x84\xdf\x9c\x25\x9a\x7c\x79";
+	if (sctp_test_hmac("SHA1 test case 2", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+	    text, textlen, digest, digestlen) < 0)
+		failed++;
+
+	/*-
+	 * test_case =     3
+	 * key =           0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+	 * key_len =       20
+	 * data =          0xdd repeated 50 times
+	 * data_len =      50
+	 * digest =        0x125d7342b9ac11cd91a39af48aa17b4f63f175d3
+	 */
+	keylen = 20;
+	memset(key, 0xaa, keylen);
+	textlen = 50;
+	memset(text, 0xdd, textlen);
+	digest = "\x12\x5d\x73\x42\xb9\xac\x11\xcd\x91\xa3\x9a\xf4\x8a\xa1\x7b\x4f\x63\xf1\x75\xd3";
+	if (sctp_test_hmac("SHA1 test case 3", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+	    text, textlen, digest, digestlen) < 0)
+		failed++;
+
+	/*-
+	 * test_case =     4
+	 * key =           0x0102030405060708090a0b0c0d0e0f10111213141516171819
+	 * key_len =       25
+	 * data =          0xcd repeated 50 times
+	 * data_len =      50
+	 * digest =        0x4c9007f4026250c6bc8414f9bf50c86c2d7235da
+	 */
+	keylen = 25;
+	memcpy(key, "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19", keylen);
+	textlen = 50;
+	memset(text, 0xcd, textlen);
+	digest = "\x4c\x90\x07\xf4\x02\x62\x50\xc6\xbc\x84\x14\xf9\xbf\x50\xc8\x6c\x2d\x72\x35\xda";
+	if (sctp_test_hmac("SHA1 test case 4", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+	    text, textlen, digest, digestlen) < 0)
+		failed++;
+
+	/*-
+	 * test_case =     5
+	 * key =           0x0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c0c
+	 * key_len =       20
+	 * data =          "Test With Truncation"
+	 * data_len =      20
+	 * digest =        0x4c1a03424b55e07fe7f27be1d58bb9324a9a5a04
+	 * digest-96 =     0x4c1a03424b55e07fe7f27be1
+	 */
+	keylen = 20;
+	memset(key, 0x0c, keylen);
+	textlen = 20;
+	strcpy(text, "Test With Truncation");
+	digest = "\x4c\x1a\x03\x42\x4b\x55\xe0\x7f\xe7\xf2\x7b\xe1\xd5\x8b\xb9\x32\x4a\x9a\x5a\x04";
+	if (sctp_test_hmac("SHA1 test case 5", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+	    text, textlen, digest, digestlen) < 0)
+		failed++;
+
+	/*-
+	 * test_case =     6
+	 * key =           0xaa repeated 80 times
+	 * key_len =       80
+	 * data =          "Test Using Larger Than Block-Size Key - Hash Key First"
+	 * data_len =      54
+	 * digest =        0xaa4ae5e15272d00e95705637ce8a3b55ed402112
+	 */
+	keylen = 80;
+	memset(key, 0xaa, keylen);
+	textlen = 54;
+	strcpy(text, "Test Using Larger Than Block-Size Key - Hash Key First");
+	digest = "\xaa\x4a\xe5\xe1\x52\x72\xd0\x0e\x95\x70\x56\x37\xce\x8a\x3b\x55\xed\x40\x21\x12";
+	if (sctp_test_hmac("SHA1 test case 6", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+	    text, textlen, digest, digestlen) < 0)
+		failed++;
+
+	/*-
+	 * test_case =     7
+	 * key =           0xaa repeated 80 times
+	 * key_len =       80
+	 * data =          "Test Using Larger Than Block-Size Key and Larger Than One Block-Size Data"
+	 * data_len =      73
+	 * digest =        0xe8e99d0f45237d786d6bbaa7965c7808bbff1a91
+	 */
+	keylen = 80;
+	memset(key, 0xaa, keylen);
+	textlen = 73;
+	strcpy(text, "Test Using Larger Than Block-Size Key and Larger Than One Block-Size Data");
+	digest = "\xe8\xe9\x9d\x0f\x45\x23\x7d\x78\x6d\x6b\xba\xa7\x96\x5c\x78\x08\xbb\xff\x1a\x91";
+	if (sctp_test_hmac("SHA1 test case 7", SCTP_AUTH_HMAC_ID_SHA1, key, keylen,
+	    text, textlen, digest, digestlen) < 0)
+		failed++;
+
+	/* done with all tests */
+	if (failed)
+		SCTP_PRINTF("\nSHA1 test results: %d cases failed", failed);
+	else
+		SCTP_PRINTF("\nSHA1 test results: all test cases passed");
+}
+
+/*
+ * test assoc key concatenation
+ */
+static int
+sctp_test_key_concatenation(sctp_key_t *key1, sctp_key_t *key2,
+    sctp_key_t *expected_key)
+{
+	sctp_key_t *key;
+	int ret_val;
+
+	sctp_show_key(key1, "\nkey1");
+	sctp_show_key(key2, "\nkey2");
+	key = sctp_compute_hashkey(key1, key2, NULL);
+	sctp_show_key(expected_key, "\nExpected");
+	sctp_show_key(key, "\nComputed");
+	if (memcmp(key, expected_key, expected_key->keylen) != 0) {
+		SCTP_PRINTF("\nFAILED");
+		ret_val = -1;
+	} else {
+		SCTP_PRINTF("\nPASSED");
+		ret_val = 0;
+	}
+	sctp_free_key(key1);
+	sctp_free_key(key2);
+	sctp_free_key(expected_key);
+	sctp_free_key(key);
+	return (ret_val);
+}
+
+
+void
+sctp_test_authkey(void)
+{
+	sctp_key_t *key1, *key2, *expected_key;
+	int failed = 0;
+
+	/* test case 1 */
+	key1 = sctp_set_key("\x01\x01\x01\x01", 4);
+	key2 = sctp_set_key("\x01\x02\x03\x04", 4);
+	expected_key = sctp_set_key("\x01\x01\x01\x01\x01\x02\x03\x04", 8);
+	if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+		failed++;
+
+	/* test case 2 */
+	key1 = sctp_set_key("\x00\x00\x00\x01", 4);
+	key2 = sctp_set_key("\x02", 1);
+	expected_key = sctp_set_key("\x00\x00\x00\x01\x02", 5);
+	if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+		failed++;
+
+	/* test case 3 */
+	key1 = sctp_set_key("\x01", 1);
+	key2 = sctp_set_key("\x00\x00\x00\x02", 4);
+	expected_key = sctp_set_key("\x01\x00\x00\x00\x02", 5);
+	if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+		failed++;
+
+	/* test case 4 */
+	key1 = sctp_set_key("\x00\x00\x00\x01", 4);
+	key2 = sctp_set_key("\x01", 1);
+	expected_key = sctp_set_key("\x01\x00\x00\x00\x01", 5);
+	if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+		failed++;
+
+	/* test case 5 */
+	key1 = sctp_set_key("\x01", 1);
+	key2 = sctp_set_key("\x00\x00\x00\x01", 4);
+	expected_key = sctp_set_key("\x01\x00\x00\x00\x01", 5);
+	if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+		failed++;
+
+	/* test case 6 */
+	key1 = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07", 11);
+	key2 = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x08", 11);
+	expected_key = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x08", 22);
+	if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+		failed++;
+
+	/* test case 7 */
+	key1 = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x08", 11);
+	key2 = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07", 11);
+	expected_key = sctp_set_key("\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x08", 22);
+	if (sctp_test_key_concatenation(key1, key2, expected_key) < 0)
+		failed++;
+
+	/* done with all tests */
+	if (failed)
+		SCTP_PRINTF("\nKey concatenation test results: %d cases failed", failed);
+	else
+		SCTP_PRINTF("\nKey concatenation test results: all test cases passed");
+}
+
+
+#if defined(STANDALONE_HMAC_TEST)
+int
+main(void)
+{
+	sctp_test_hmac_sha1();
+	sctp_test_authkey();
+}
+
+#endif /* STANDALONE_HMAC_TEST */
+
+#endif /* SCTP_HMAC_TEST */
diff --git a/usrsctplib/netinet/sctp_auth.h b/usrsctplib/netinet/sctp_auth.h
new file mode 100755
index 0000000..b24f514
--- /dev/null
+++ b/usrsctplib/netinet/sctp_auth.h
@@ -0,0 +1,216 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_auth.h 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_AUTH_H_
+#define _NETINET_SCTP_AUTH_H_
+
+#include <netinet/sctp_os.h>
+
+/* digest lengths */
+#define SCTP_AUTH_DIGEST_LEN_SHA1	20
+#define SCTP_AUTH_DIGEST_LEN_SHA256	32
+#define SCTP_AUTH_DIGEST_LEN_MAX	SCTP_AUTH_DIGEST_LEN_SHA256
+
+/* random sizes */
+#define SCTP_AUTH_RANDOM_SIZE_DEFAULT	32
+#define SCTP_AUTH_RANDOM_SIZE_REQUIRED	32
+
+/* union of all supported HMAC algorithm contexts */
+typedef union sctp_hash_context {
+	SCTP_SHA1_CTX sha1;
+#if defined(SCTP_SUPPORT_HMAC_SHA256)
+	SCTP_SHA256_CTX sha256;
+#endif
+} sctp_hash_context_t;
+
+typedef struct sctp_key {
+	uint32_t keylen;
+	uint8_t key[];
+} sctp_key_t;
+
+typedef struct sctp_shared_key {
+	LIST_ENTRY(sctp_shared_key) next;
+	sctp_key_t *key;	/* key text */
+	uint32_t refcount;	/* reference count */
+	uint16_t keyid;		/* shared key ID */
+	uint8_t deactivated;	/* key is deactivated */
+} sctp_sharedkey_t;
+
+LIST_HEAD(sctp_keyhead, sctp_shared_key);
+
+/* authentication chunks list */
+typedef struct sctp_auth_chklist {
+	uint8_t chunks[256];
+	uint8_t num_chunks;
+} sctp_auth_chklist_t;
+
+/* hmac algos supported list */
+typedef struct sctp_hmaclist {
+	uint16_t max_algo;	/* max algorithms allocated */
+	uint16_t num_algo;	/* num algorithms used */
+	uint16_t hmac[];
+} sctp_hmaclist_t;
+
+/* authentication info */
+typedef struct sctp_authinformation {
+	sctp_key_t *random;	/* local random key (concatenated) */
+	uint32_t random_len;	/* local random number length for param */
+	sctp_key_t *peer_random;/* peer's random key (concatenated) */
+	sctp_key_t *assoc_key;	/* cached concatenated send key */
+	sctp_key_t *recv_key;	/* cached concatenated recv key */
+	uint16_t active_keyid;	/* active send keyid */
+	uint16_t assoc_keyid;	/* current send keyid (cached) */
+	uint16_t recv_keyid;	/* last recv keyid (cached) */
+} sctp_authinfo_t;
+
+
+
+/*
+ * Macros
+ */
+#define sctp_auth_is_required_chunk(chunk, list) ((list == NULL) ? (0) : (list->chunks[chunk] != 0))
+
+/*
+ * function prototypes
+ */
+
+/* socket option api functions */
+extern sctp_auth_chklist_t *sctp_alloc_chunklist(void);
+extern void sctp_free_chunklist(sctp_auth_chklist_t *chklist);
+extern void sctp_clear_chunklist(sctp_auth_chklist_t *chklist);
+extern sctp_auth_chklist_t *sctp_copy_chunklist(sctp_auth_chklist_t *chklist);
+extern int sctp_auth_add_chunk(uint8_t chunk, sctp_auth_chklist_t *list);
+extern int sctp_auth_delete_chunk(uint8_t chunk, sctp_auth_chklist_t *list);
+extern size_t sctp_auth_get_chklist_size(const sctp_auth_chklist_t *list);
+extern int sctp_serialize_auth_chunks(const sctp_auth_chklist_t *list,
+    uint8_t *ptr);
+extern int sctp_pack_auth_chunks(const sctp_auth_chklist_t *list,
+    uint8_t *ptr);
+extern int sctp_unpack_auth_chunks(const uint8_t *ptr, uint8_t num_chunks,
+    sctp_auth_chklist_t *list);
+
+/* key handling */
+extern sctp_key_t *sctp_alloc_key(uint32_t keylen);
+extern void sctp_free_key(sctp_key_t *key);
+extern void sctp_print_key(sctp_key_t *key, const char *str);
+extern void sctp_show_key(sctp_key_t *key, const char *str);
+extern sctp_key_t *sctp_generate_random_key(uint32_t keylen);
+extern sctp_key_t *sctp_set_key(uint8_t *key, uint32_t keylen);
+extern sctp_key_t *sctp_compute_hashkey(sctp_key_t *key1, sctp_key_t *key2,
+    sctp_key_t *shared);
+
+/* shared key handling */
+extern sctp_sharedkey_t *sctp_alloc_sharedkey(void);
+extern void sctp_free_sharedkey(sctp_sharedkey_t *skey);
+extern sctp_sharedkey_t *sctp_find_sharedkey(struct sctp_keyhead *shared_keys,
+    uint16_t key_id);
+extern int sctp_insert_sharedkey(struct sctp_keyhead *shared_keys,
+    sctp_sharedkey_t *new_skey);
+extern int sctp_copy_skeylist(const struct sctp_keyhead *src,
+    struct sctp_keyhead *dest);
+/* ref counts on shared keys, by key id */
+extern void sctp_auth_key_acquire(struct sctp_tcb *stcb, uint16_t keyid);
+extern void sctp_auth_key_release(struct sctp_tcb *stcb, uint16_t keyid,
+    int so_locked);
+
+
+/* hmac list handling */
+extern sctp_hmaclist_t *sctp_alloc_hmaclist(uint16_t num_hmacs);
+extern void sctp_free_hmaclist(sctp_hmaclist_t *list);
+extern int sctp_auth_add_hmacid(sctp_hmaclist_t *list, uint16_t hmac_id);
+extern sctp_hmaclist_t *sctp_copy_hmaclist(sctp_hmaclist_t *list);
+extern sctp_hmaclist_t *sctp_default_supported_hmaclist(void);
+extern uint16_t sctp_negotiate_hmacid(sctp_hmaclist_t *peer,
+    sctp_hmaclist_t *local);
+extern int sctp_serialize_hmaclist(sctp_hmaclist_t *list, uint8_t *ptr);
+extern int sctp_verify_hmac_param(struct sctp_auth_hmac_algo *hmacs,
+    uint32_t num_hmacs);
+
+extern sctp_authinfo_t *sctp_alloc_authinfo(void);
+extern void sctp_free_authinfo(sctp_authinfo_t *authinfo);
+
+/* keyed-HMAC functions */
+extern uint32_t sctp_get_auth_chunk_len(uint16_t hmac_algo);
+extern uint32_t sctp_get_hmac_digest_len(uint16_t hmac_algo);
+extern uint32_t sctp_hmac(uint16_t hmac_algo, uint8_t *key, uint32_t keylen,
+    uint8_t *text, uint32_t textlen, uint8_t *digest);
+extern int sctp_verify_hmac(uint16_t hmac_algo, uint8_t *key, uint32_t keylen,
+    uint8_t *text, uint32_t textlen, uint8_t *digest, uint32_t digestlen);
+extern uint32_t sctp_compute_hmac(uint16_t hmac_algo, sctp_key_t *key,
+    uint8_t *text, uint32_t textlen, uint8_t *digest);
+extern int sctp_auth_is_supported_hmac(sctp_hmaclist_t *list, uint16_t id);
+
+/* mbuf versions */
+extern uint32_t sctp_hmac_m(uint16_t hmac_algo, uint8_t *key, uint32_t keylen,
+    struct mbuf *m, uint32_t m_offset, uint8_t *digest, uint32_t trailer);
+extern uint32_t sctp_compute_hmac_m(uint16_t hmac_algo, sctp_key_t *key,
+    struct mbuf *m, uint32_t m_offset, uint8_t *digest);
+
+/*
+ * authentication routines
+ */
+extern void sctp_clear_cachedkeys(struct sctp_tcb *stcb, uint16_t keyid);
+extern void sctp_clear_cachedkeys_ep(struct sctp_inpcb *inp, uint16_t keyid);
+extern int sctp_delete_sharedkey(struct sctp_tcb *stcb, uint16_t keyid);
+extern int sctp_delete_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid);
+extern int sctp_auth_setactivekey(struct sctp_tcb *stcb, uint16_t keyid);
+extern int sctp_auth_setactivekey_ep(struct sctp_inpcb *inp, uint16_t keyid);
+extern int sctp_deact_sharedkey(struct sctp_tcb *stcb, uint16_t keyid);
+extern int sctp_deact_sharedkey_ep(struct sctp_inpcb *inp, uint16_t keyid);
+
+extern void sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
+    uint32_t offset, uint32_t length);
+extern void sctp_fill_hmac_digest_m(struct mbuf *m, uint32_t auth_offset,
+    struct sctp_auth_chunk *auth, struct sctp_tcb *stcb, uint16_t key_id);
+extern struct mbuf *sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
+    struct sctp_auth_chunk **auth_ret, uint32_t *offset,
+    struct sctp_tcb *stcb, uint8_t chunk);
+extern int sctp_handle_auth(struct sctp_tcb *stcb, struct sctp_auth_chunk *ch,
+    struct mbuf *m, uint32_t offset);
+extern void sctp_notify_authentication(struct sctp_tcb *stcb,
+    uint32_t indication, uint16_t keyid, uint16_t alt_keyid, int so_locked);
+extern int sctp_validate_init_auth_params(struct mbuf *m, int offset,
+    int limit);
+extern void sctp_initialize_auth_params(struct sctp_inpcb *inp,
+    struct sctp_tcb *stcb);
+
+/* test functions */
+#ifdef SCTP_HMAC_TEST
+extern void sctp_test_hmac_sha1(void);
+extern void sctp_test_authkey(void);
+#endif
+#endif /* __SCTP_AUTH_H__ */
diff --git a/usrsctplib/netinet/sctp_bsd_addr.c b/usrsctplib/netinet/sctp_bsd_addr.c
new file mode 100755
index 0000000..805fdb8
--- /dev/null
+++ b/usrsctplib/netinet/sctp_bsd_addr.c
@@ -0,0 +1,1111 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_bsd_addr.c 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_bsd_addr.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_indata.h>
+#if defined(__FreeBSD__)
+#include <sys/unistd.h>
+#endif
+
+/* Declare all of our malloc named types */
+#ifndef __Panda__
+MALLOC_DEFINE(SCTP_M_MAP, "sctp_map", "sctp asoc map descriptor");
+MALLOC_DEFINE(SCTP_M_STRMI, "sctp_stri", "sctp stream in array");
+MALLOC_DEFINE(SCTP_M_STRMO, "sctp_stro", "sctp stream out array");
+MALLOC_DEFINE(SCTP_M_ASC_ADDR, "sctp_aadr", "sctp asconf address");
+MALLOC_DEFINE(SCTP_M_ASC_IT, "sctp_a_it", "sctp asconf iterator");
+MALLOC_DEFINE(SCTP_M_AUTH_CL, "sctp_atcl", "sctp auth chunklist");
+MALLOC_DEFINE(SCTP_M_AUTH_KY, "sctp_atky", "sctp auth key");
+MALLOC_DEFINE(SCTP_M_AUTH_HL, "sctp_athm", "sctp auth hmac list");
+MALLOC_DEFINE(SCTP_M_AUTH_IF, "sctp_athi", "sctp auth info");
+MALLOC_DEFINE(SCTP_M_STRESET, "sctp_stre", "sctp stream reset");
+MALLOC_DEFINE(SCTP_M_CMSG, "sctp_cmsg", "sctp CMSG buffer");
+MALLOC_DEFINE(SCTP_M_COPYAL, "sctp_cpal", "sctp copy all");
+MALLOC_DEFINE(SCTP_M_VRF, "sctp_vrf", "sctp vrf struct");
+MALLOC_DEFINE(SCTP_M_IFA, "sctp_ifa", "sctp ifa struct");
+MALLOC_DEFINE(SCTP_M_IFN, "sctp_ifn", "sctp ifn struct");
+MALLOC_DEFINE(SCTP_M_TIMW, "sctp_timw", "sctp time block");
+MALLOC_DEFINE(SCTP_M_MVRF, "sctp_mvrf", "sctp mvrf pcb list");
+MALLOC_DEFINE(SCTP_M_ITER, "sctp_iter", "sctp iterator control");
+MALLOC_DEFINE(SCTP_M_SOCKOPT, "sctp_socko", "sctp socket option");
+MALLOC_DEFINE(SCTP_M_MCORE, "sctp_mcore", "sctp mcore queue");
+#endif
+
+/* Global NON-VNET structure that controls the iterator */
+struct iterator_control sctp_it_ctl;
+
+#if !defined(__FreeBSD__)
+static void
+sctp_cleanup_itqueue(void)
+{
+	struct sctp_iterator *it, *nit;
+
+	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
+		if (it->function_atend != NULL) {
+			(*it->function_atend) (it->pointer, it->val);
+		}
+		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
+		SCTP_FREE(it, SCTP_M_ITER);
+	}
+}
+#endif
+#if defined(__Userspace__)
+/*__Userspace__ TODO if we use thread based iterator
+ * then the implementation of wakeup will need to change.
+ * Currently we are using timeo_cond for ident so_timeo
+ * but that is not sufficient if we need to use another ident
+ * like wakeup(&sctppcbinfo.iterator_running);
+ */
+#endif
+
+void
+sctp_wakeup_iterator(void)
+{
+#if defined(SCTP_PROCESS_LEVEL_LOCKS)
+#if defined(__Userspace_os_Windows)
+	WakeAllConditionVariable(&sctp_it_ctl.iterator_wakeup);
+#else
+	pthread_cond_broadcast(&sctp_it_ctl.iterator_wakeup);
+#endif
+#else
+	wakeup(&sctp_it_ctl.iterator_running);
+#endif
+}
+
+#if defined(__Userspace__)
+static void *
+#else
+static void
+#endif
+sctp_iterator_thread(void *v SCTP_UNUSED)
+{
+#if defined(__Userspace__)
+	sctp_userspace_set_threadname("SCTP iterator");
+#endif
+	SCTP_IPI_ITERATOR_WQ_LOCK();
+	/* In FreeBSD this thread never terminates. */
+#if defined(__FreeBSD__)
+	for (;;) {
+#else
+	while ((sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) == 0) {
+#endif
+#if !defined(__Userspace__)
+		msleep(&sctp_it_ctl.iterator_running,
+#if defined(__FreeBSD__)
+		       &sctp_it_ctl.ipi_iterator_wq_mtx,
+#elif defined(__APPLE__) || defined(__Userspace_os_Darwin)
+		       sctp_it_ctl.ipi_iterator_wq_mtx,
+#endif
+		       0, "waiting_for_work", 0);
+#else
+#if defined(__Userspace_os_Windows)
+		SleepConditionVariableCS(&sctp_it_ctl.iterator_wakeup, &sctp_it_ctl.ipi_iterator_wq_mtx, INFINITE);
+#else
+		pthread_cond_wait(&sctp_it_ctl.iterator_wakeup, &sctp_it_ctl.ipi_iterator_wq_mtx);
+#endif
+#endif
+#if !defined(__FreeBSD__)
+		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
+			break;
+		}
+#endif
+		sctp_iterator_worker();
+	}
+#if !defined(__FreeBSD__)
+	/* Now this thread needs to be terminated */
+	sctp_cleanup_itqueue();
+	sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_EXITED;
+	SCTP_IPI_ITERATOR_WQ_UNLOCK();
+#if defined(__Userspace__)
+	sctp_wakeup_iterator();
+	return (NULL);
+#else
+	wakeup(&sctp_it_ctl.iterator_flags);
+	thread_terminate(current_thread());
+#ifdef INVARIANTS
+	panic("Hmm. thread_terminate() continues...");
+#endif
+#endif
+#endif
+}
+
+void
+sctp_startup_iterator(void)
+{
+	if (sctp_it_ctl.thread_proc) {
+		/* You only get one */
+		return;
+	}
+	/* Initialize global locks here, thus only once. */
+	SCTP_ITERATOR_LOCK_INIT();
+	SCTP_IPI_ITERATOR_WQ_INIT();
+	TAILQ_INIT(&sctp_it_ctl.iteratorhead);
+#if defined(__FreeBSD__)
+#if __FreeBSD_version <= 701000
+	kthread_create(sctp_iterator_thread,
+#else
+	kproc_create(sctp_iterator_thread,
+#endif
+	             (void *)NULL,
+	             &sctp_it_ctl.thread_proc,
+	             RFPROC,
+	             SCTP_KTHREAD_PAGES,
+	             SCTP_KTRHEAD_NAME);
+#elif defined(__APPLE__)
+	kernel_thread_start((thread_continue_t)sctp_iterator_thread, NULL, &sctp_it_ctl.thread_proc);
+#elif defined(__Userspace__)
+	if (sctp_userspace_thread_create(&sctp_it_ctl.thread_proc, &sctp_iterator_thread)) {
+		SCTP_PRINTF("ERROR: Creating sctp_iterator_thread failed.\n");
+	}
+#endif
+}
+
+#ifdef INET6
+
+#if defined(__Userspace__)
+/* __Userspace__ TODO. struct in6_ifaddr is defined in sys/netinet6/in6_var.h
+   ip6_use_deprecated is defined as  int ip6_use_deprecated = 1; in /src/sys/netinet6/in6_proto.c
+ */
+void
+sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa)
+{
+    return; /* stub */
+}
+#else
+void
+sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa)
+{
+	struct in6_ifaddr *ifa6;
+
+	ifa6 = (struct in6_ifaddr *)ifa->ifa;
+	ifa->flags = ifa6->ia6_flags;
+	if (!MODULE_GLOBAL(ip6_use_deprecated)) {
+		if (ifa->flags &
+		    IN6_IFF_DEPRECATED) {
+			ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
+		} else {
+			ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
+		}
+	} else {
+		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
+	}
+	if (ifa->flags &
+	    (IN6_IFF_DETACHED |
+	     IN6_IFF_ANYCAST |
+	     IN6_IFF_NOTREADY)) {
+		ifa->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
+	} else {
+		ifa->localifa_flags &= ~SCTP_ADDR_IFA_UNUSEABLE;
+	}
+}
+#endif /* __Userspace__ */
+#endif /* INET6 */
+
+
+#if !defined(__Userspace__)
+static uint32_t
+sctp_is_desired_interface_type(struct ifnet *ifn)
+{
+	int result;
+
+	/* check the interface type to see if it's one we care about */
+#if defined(__APPLE__)
+	switch(ifnet_type(ifn)) {
+#else
+	switch (ifn->if_type) {
+#endif
+	case IFT_ETHER:
+	case IFT_ISO88023:
+	case IFT_ISO88024:
+	case IFT_ISO88025:
+	case IFT_ISO88026:
+	case IFT_STARLAN:
+	case IFT_P10:
+	case IFT_P80:
+	case IFT_HY:
+	case IFT_FDDI:
+	case IFT_XETHER:
+	case IFT_ISDNBASIC:
+	case IFT_ISDNPRIMARY:
+	case IFT_PTPSERIAL:
+	case IFT_OTHER:
+	case IFT_PPP:
+	case IFT_LOOP:
+	case IFT_SLIP:
+	case IFT_GIF:
+	case IFT_L2VLAN:
+	case IFT_STF:
+#if !defined(__APPLE__)
+	case IFT_IP:
+	case IFT_IPOVERCDLC:
+	case IFT_IPOVERCLAW:
+	case IFT_PROPVIRTUAL: /* NetGraph Virtual too */
+	case IFT_VIRTUALIPADDRESS:
+#endif
+		result = 1;
+		break;
+	default:
+		result = 0;
+	}
+
+	return (result);
+}
+#endif
+
+#if defined(__APPLE__)
+int
+sctp_is_vmware_interface(struct ifnet *ifn)
+{
+	return (strncmp(ifnet_name(ifn), "vmnet", 5) == 0);
+}
+#endif
+
+#if defined(__Userspace_os_Windows)
+#ifdef MALLOC
+#undef MALLOC
+#define MALLOC(x) HeapAlloc(GetProcessHeap(), 0, (x))
+#endif
+#ifdef FREE
+#undef FREE
+#define FREE(x) HeapFree(GetProcessHeap(), 0, (x))
+#endif
+static void
+sctp_init_ifns_for_vrf(int vrfid)
+{
+#if defined(INET) || defined(INET6)
+	struct sctp_ifa *sctp_ifa;
+	DWORD Err, AdapterAddrsSize;
+	PIP_ADAPTER_ADDRESSES pAdapterAddrs, pAdapt;
+	PIP_ADAPTER_UNICAST_ADDRESS pUnicast;
+#endif
+
+#ifdef INET
+	AdapterAddrsSize = 0;
+
+	if ((Err = GetAdaptersAddresses(AF_INET, 0, NULL, NULL, &AdapterAddrsSize)) != 0) {
+		if ((Err != ERROR_BUFFER_OVERFLOW) && (Err != ERROR_INSUFFICIENT_BUFFER)) {
+			SCTP_PRINTF("GetAdaptersV4Addresses() sizing failed with error code %d\n", Err);
+			SCTP_PRINTF("err = %d; AdapterAddrsSize = %d\n", Err, AdapterAddrsSize);
+			return;
+		}
+	}
+
+	/* Allocate memory from sizing information */
+	if ((pAdapterAddrs = (PIP_ADAPTER_ADDRESSES) GlobalAlloc(GPTR, AdapterAddrsSize)) == NULL) {
+		SCTP_PRINTF("Memory allocation error!\n");
+		return;
+	}
+	/* Get actual adapter information */
+	if ((Err = GetAdaptersAddresses(AF_INET, 0, NULL, pAdapterAddrs, &AdapterAddrsSize)) != ERROR_SUCCESS) {
+		SCTP_PRINTF("GetAdaptersV4Addresses() failed with error code %d\n", Err);
+		FREE(pAdapterAddrs);
+		return;
+	}
+	/* Enumerate through each returned adapter and save its information */
+	for (pAdapt = pAdapterAddrs; pAdapt; pAdapt = pAdapt->Next) {
+		if (pAdapt->IfType == IF_TYPE_IEEE80211 || pAdapt->IfType == IF_TYPE_ETHERNET_CSMACD) {
+			for (pUnicast = pAdapt->FirstUnicastAddress; pUnicast; pUnicast = pUnicast->Next) {
+				if (IN4_ISLINKLOCAL_ADDRESS(&(((struct sockaddr_in *)(pUnicast->Address.lpSockaddr))->sin_addr))) {
+					continue;
+				}
+				sctp_ifa = sctp_add_addr_to_vrf(0,
+				                                NULL,
+				                                pAdapt->IfIndex,
+				                                (pAdapt->IfType == IF_TYPE_IEEE80211)?MIB_IF_TYPE_ETHERNET:pAdapt->IfType,
+				                                pAdapt->AdapterName,
+				                                NULL,
+				                                pUnicast->Address.lpSockaddr,
+				                                pAdapt->Flags,
+				                                0);
+				if (sctp_ifa) {
+					sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+				}
+			}
+		}
+	}
+	FREE(pAdapterAddrs);
+#endif
+#ifdef INET6
+	AdapterAddrsSize = 0;
+
+	if ((Err = GetAdaptersAddresses(AF_INET6, 0, NULL, NULL, &AdapterAddrsSize)) != 0) {
+		if ((Err != ERROR_BUFFER_OVERFLOW) && (Err != ERROR_INSUFFICIENT_BUFFER)) {
+			SCTP_PRINTF("GetAdaptersV6Addresses() sizing failed with error code %d\n", Err);
+			SCTP_PRINTF("err = %d; AdapterAddrsSize = %d\n", Err, AdapterAddrsSize);
+			return;
+		}
+	}
+	/* Allocate memory from sizing information */
+	if ((pAdapterAddrs = (PIP_ADAPTER_ADDRESSES) GlobalAlloc(GPTR, AdapterAddrsSize)) == NULL) {
+		SCTP_PRINTF("Memory allocation error!\n");
+		return;
+	}
+	/* Get actual adapter information */
+	if ((Err = GetAdaptersAddresses(AF_INET6, 0, NULL, pAdapterAddrs, &AdapterAddrsSize)) != ERROR_SUCCESS) {
+		SCTP_PRINTF("GetAdaptersV6Addresses() failed with error code %d\n", Err);
+		FREE(pAdapterAddrs);
+		return;
+	}
+	/* Enumerate through each returned adapter and save its information */
+	for (pAdapt = pAdapterAddrs; pAdapt; pAdapt = pAdapt->Next) {
+		if (pAdapt->IfType == IF_TYPE_IEEE80211 || pAdapt->IfType == IF_TYPE_ETHERNET_CSMACD) {
+			for (pUnicast = pAdapt->FirstUnicastAddress; pUnicast; pUnicast = pUnicast->Next) {
+				sctp_ifa = sctp_add_addr_to_vrf(0,
+				                                NULL,
+				                                pAdapt->Ipv6IfIndex,
+				                                (pAdapt->IfType == IF_TYPE_IEEE80211)?MIB_IF_TYPE_ETHERNET:pAdapt->IfType,
+				                                pAdapt->AdapterName,
+				                                NULL,
+				                                pUnicast->Address.lpSockaddr,
+				                                pAdapt->Flags,
+				                                0);
+				if (sctp_ifa) {
+					sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+				}
+			}
+		}
+	}
+	FREE(pAdapterAddrs);
+#endif
+}
+#elif defined(__Userspace__)
+static void
+sctp_init_ifns_for_vrf(int vrfid)
+{
+#if defined(INET) || defined(INET6)
+	int rc;
+	struct ifaddrs *ifa, *ifas;
+	struct sctp_ifa *sctp_ifa;
+	uint32_t ifa_flags;
+
+	rc = getifaddrs(&ifas);
+	if (rc != 0) {
+		return;
+	}
+	for (ifa = ifas; ifa; ifa = ifa->ifa_next) {
+		if (ifa->ifa_addr == NULL) {
+			continue;
+		}
+#if !defined(INET)
+		if (ifa->ifa_addr->sa_family != AF_INET6) {
+			/* non inet6 skip */
+			continue;
+		}
+#elif !defined(INET6)
+		if (ifa->ifa_addr->sa_family != AF_INET) {
+			/* non inet skip */
+			continue;
+		}
+#else
+		if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) {
+			/* non inet/inet6 skip */
+			continue;
+		}
+#endif
+#if defined(INET6)
+		if ((ifa->ifa_addr->sa_family == AF_INET6) &&
+		    IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
+			/* skip unspecifed addresses */
+			continue;
+		}
+#endif
+#if defined(INET)
+		if (ifa->ifa_addr->sa_family == AF_INET &&
+		    ((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
+			continue;
+		}
+#endif
+		ifa_flags = 0;
+		sctp_ifa = sctp_add_addr_to_vrf(vrfid,
+		                                NULL,
+		                                if_nametoindex(ifa->ifa_name),
+		                                0,
+		                                ifa->ifa_name,
+		                                NULL,
+		                                ifa->ifa_addr,
+		                                ifa_flags,
+		                                0);
+		if (sctp_ifa) {
+			sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+		}
+	}
+	freeifaddrs(ifas);
+#endif
+}
+#endif
+
+#if defined(__APPLE__)
+static void
+sctp_init_ifns_for_vrf(int vrfid)
+{
+	/* Here we must apply ANY locks needed by the
+	 * IFN we access and also make sure we lock
+	 * any IFA that exists as we float through the
+	 * list of IFA's
+	 */
+	struct ifnet **ifnetlist;
+	uint32_t i, j, count;
+	char name[SCTP_IFNAMSIZ];
+	struct ifnet *ifn;
+	struct ifaddr **ifaddrlist;
+	struct ifaddr *ifa;
+	struct in6_ifaddr *ifa6;
+	struct sctp_ifa *sctp_ifa;
+	uint32_t ifa_flags;
+
+	if (ifnet_list_get(IFNET_FAMILY_ANY, &ifnetlist, &count) != 0) {
+		return;
+	}
+	for (i = 0; i < count; i++) {
+		ifn = ifnetlist[i];
+		if (SCTP_BASE_SYSCTL(sctp_ignore_vmware_interfaces) && sctp_is_vmware_interface(ifn)) {
+			continue;
+		}
+		if (sctp_is_desired_interface_type(ifn) == 0) {
+			/* non desired type */
+			continue;
+		}
+		if (ifnet_get_address_list(ifn, &ifaddrlist) != 0) {
+			continue;
+		}
+		for (j = 0; ifaddrlist[j] != NULL; j++) {
+			ifa = ifaddrlist[j];
+			if (ifa->ifa_addr == NULL) {
+				continue;
+			}
+			if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) {
+				/* non inet/inet6 skip */
+				continue;
+			}
+			if (ifa->ifa_addr->sa_family == AF_INET6) {
+				if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
+					/* skip unspecifed addresses */
+					continue;
+				}
+			} else {
+				if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == INADDR_ANY) {
+					continue;
+				}
+			}
+			if (ifa->ifa_addr->sa_family == AF_INET6) {
+				ifa6 = (struct in6_ifaddr *)ifa;
+				ifa_flags = ifa6->ia6_flags;
+			} else {
+				ifa_flags = 0;
+			}
+			snprintf(name, SCTP_IFNAMSIZ, "%s%d", ifnet_name(ifn), ifnet_unit(ifn));
+			sctp_ifa = sctp_add_addr_to_vrf(vrfid,
+			                                (void *)ifn, /* XXX */
+			                                ifnet_index(ifn),
+			                                ifnet_type(ifn),
+			                                name,
+			                                (void *)ifa, /* XXX */
+			                                ifa->ifa_addr,
+			                                ifa_flags,
+			                                0);
+			if (sctp_ifa) {
+				sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+			}
+		}
+		ifnet_free_address_list(ifaddrlist);
+	}
+	ifnet_list_free(ifnetlist);
+}
+#endif
+
+#if defined(__FreeBSD__)
+static void
+sctp_init_ifns_for_vrf(int vrfid)
+{
+	/* Here we must apply ANY locks needed by the
+	 * IFN we access and also make sure we lock
+	 * any IFA that exists as we float through the
+	 * list of IFA's
+	 */
+	struct ifnet *ifn;
+	struct ifaddr *ifa;
+	struct sctp_ifa *sctp_ifa;
+	uint32_t ifa_flags;
+#ifdef INET6
+	struct in6_ifaddr *ifa6;
+#endif
+
+	IFNET_RLOCK();
+	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_link) {
+		if (sctp_is_desired_interface_type(ifn) == 0) {
+			/* non desired type */
+			continue;
+		}
+#if (__FreeBSD_version >= 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000
+		IF_ADDR_RLOCK(ifn);
+#else
+		IF_ADDR_LOCK(ifn);
+#endif
+		TAILQ_FOREACH(ifa, &ifn->if_addrhead, ifa_link) {
+			if (ifa->ifa_addr == NULL) {
+				continue;
+			}
+			switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+			case AF_INET:
+				if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
+					continue;
+				}
+				break;
+#endif
+#ifdef INET6
+			case AF_INET6:
+				if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
+					/* skip unspecifed addresses */
+					continue;
+				}
+				break;
+#endif
+			default:
+				continue;
+			}
+			switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+			case AF_INET:
+				ifa_flags = 0;
+				break;
+#endif
+#ifdef INET6
+			case AF_INET6:
+				ifa6 = (struct in6_ifaddr *)ifa;
+				ifa_flags = ifa6->ia6_flags;
+				break;
+#endif
+			default:
+				ifa_flags = 0;
+				break;
+			}
+			sctp_ifa = sctp_add_addr_to_vrf(vrfid,
+			                                (void *)ifn,
+			                                ifn->if_index,
+			                                ifn->if_type,
+			                                ifn->if_xname,
+			                                (void *)ifa,
+			                                ifa->ifa_addr,
+			                                ifa_flags,
+			                                0);
+			if (sctp_ifa) {
+				sctp_ifa->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+			}
+		}
+#if (__FreeBSD_version >= 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000
+		IF_ADDR_RUNLOCK(ifn);
+#else
+		IF_ADDR_UNLOCK(ifn);
+#endif
+	}
+	IFNET_RUNLOCK();
+}
+#endif
+
+void
+sctp_init_vrf_list(int vrfid)
+{
+	if (vrfid > SCTP_MAX_VRF_ID)
+		/* can't do that */
+		return;
+
+	/* Don't care about return here */
+	(void)sctp_allocate_vrf(vrfid);
+
+	/* Now we need to build all the ifn's
+	 * for this vrf and there addresses
+	 */
+	sctp_init_ifns_for_vrf(vrfid);
+}
+
+void
+sctp_addr_change(struct ifaddr *ifa, int cmd)
+{
+#if defined(__Userspace__)
+        return;
+#else
+	uint32_t ifa_flags = 0;
+
+	if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
+		return;
+	}
+	/* BSD only has one VRF, if this changes
+	 * we will need to hook in the right
+	 * things here to get the id to pass to
+	 * the address management routine.
+	 */
+	if (SCTP_BASE_VAR(first_time) == 0) {
+		/* Special test to see if my ::1 will showup with this */
+		SCTP_BASE_VAR(first_time) = 1;
+		sctp_init_ifns_for_vrf(SCTP_DEFAULT_VRFID);
+	}
+
+	if ((cmd != RTM_ADD) && (cmd != RTM_DELETE)) {
+		/* don't know what to do with this */
+		return;
+	}
+
+	if (ifa->ifa_addr == NULL) {
+		return;
+	}
+	if (sctp_is_desired_interface_type(ifa->ifa_ifp) == 0) {
+		/* non desired type */
+		return;
+	}
+	switch (ifa->ifa_addr->sa_family) {
+#ifdef INET
+	case AF_INET:
+		if (((struct sockaddr_in *)ifa->ifa_addr)->sin_addr.s_addr == 0) {
+			return;
+		}
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		ifa_flags = ((struct in6_ifaddr *)ifa)->ia6_flags;
+		if (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) {
+			/* skip unspecifed addresses */
+			return;
+		}
+		break;
+#endif
+	default:
+		/* non inet/inet6 skip */
+		return;
+	}
+	if (cmd == RTM_ADD) {
+		(void)sctp_add_addr_to_vrf(SCTP_DEFAULT_VRFID, (void *)ifa->ifa_ifp,
+#if defined(__APPLE__)
+		                           ifnet_index(ifa->ifa_ifp), ifnet_type(ifa->ifa_ifp), ifnet_name(ifa->ifa_ifp),
+#else
+		                           ifa->ifa_ifp->if_index, ifa->ifa_ifp->if_type, ifa->ifa_ifp->if_xname,
+#endif
+		                           (void *)ifa, ifa->ifa_addr, ifa_flags, 1);
+	} else {
+
+		sctp_del_addr_from_vrf(SCTP_DEFAULT_VRFID, ifa->ifa_addr,
+#if defined(__APPLE__)
+		                       ifnet_index(ifa->ifa_ifp),
+		                       ifnet_name(ifa->ifa_ifp));
+#else
+		                       ifa->ifa_ifp->if_index,
+		                       ifa->ifa_ifp->if_xname);
+#endif
+
+		/* We don't bump refcount here so when it completes
+		 * the final delete will happen.
+		 */
+	}
+#endif
+}
+
+#if defined(__FreeBSD__)
+void
+sctp_add_or_del_interfaces(int (*pred)(struct ifnet *), int add)
+{
+	struct ifnet *ifn;
+	struct ifaddr *ifa;
+
+	IFNET_RLOCK();
+	TAILQ_FOREACH(ifn, &MODULE_GLOBAL(ifnet), if_link) {
+		if (!(*pred)(ifn)) {
+			continue;
+		}
+		TAILQ_FOREACH(ifa, &ifn->if_addrhead, ifa_link) {
+			sctp_addr_change(ifa, add ? RTM_ADD : RTM_DELETE);
+		}
+	}
+	IFNET_RUNLOCK();
+}
+#endif
+#if defined(__APPLE__)
+void
+sctp_add_or_del_interfaces(int (*pred)(struct ifnet *), int add)
+{
+	struct ifnet **ifnetlist;
+	struct ifaddr **ifaddrlist;
+	uint32_t i, j, count;
+
+	if (ifnet_list_get(IFNET_FAMILY_ANY, &ifnetlist, &count) != 0) {
+		return;
+	}
+	for (i = 0; i < count; i++) {
+		if (!(*pred)(ifnetlist[i])) {
+			continue;
+		}
+		if (ifnet_get_address_list(ifnetlist[i], &ifaddrlist) != 0) {
+			continue;
+		}
+		for (j = 0; ifaddrlist[j] != NULL; j++) {
+			sctp_addr_change(ifaddrlist[j], add ? RTM_ADD : RTM_DELETE);
+		}
+		ifnet_free_address_list(ifaddrlist);
+	}
+	ifnet_list_free(ifnetlist);
+	return;
+}
+#endif
+
+struct mbuf *
+sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
+		      int how, int allonebuf, int type)
+{
+    struct mbuf *m = NULL;
+#if defined(__Userspace__)
+
+  /*
+   * __Userspace__
+   * Using m_clget, which creates and mbuf and a cluster and
+   * hooks those together.
+   * TODO: This does not yet have functionality for jumbo packets.
+   *
+   */
+
+	int mbuf_threshold;
+	if (want_header) {
+		MGETHDR(m, how, type);
+	} else {
+		MGET(m, how, type);
+	}
+	if (m == NULL) {
+		return (NULL);
+	}
+	if (allonebuf == 0)
+		mbuf_threshold = SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count);
+	else
+		mbuf_threshold = 1;
+
+
+	if ((int)space_needed > (((mbuf_threshold - 1) * MLEN) + MHLEN)) {
+		MCLGET(m, how);
+		if (m == NULL) {
+			return (NULL);
+		}
+
+		if (SCTP_BUF_IS_EXTENDED(m) == 0) {
+			sctp_m_freem(m);
+			return (NULL);
+		}
+	}
+	SCTP_BUF_LEN(m) = 0;
+	SCTP_BUF_NEXT(m) = SCTP_BUF_NEXT_PKT(m) = NULL;
+
+	/* __Userspace__
+	 * Check if anything need to be done to ensure logging works
+	 */
+#ifdef SCTP_MBUF_LOGGING
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+		sctp_log_mb(m, SCTP_MBUF_IALLOC);
+	}
+#endif
+#elif defined(__FreeBSD__) && __FreeBSD_version > 1100052
+	m =  m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0);
+	if (m == NULL) {
+		/* bad, no memory */
+		return (m);
+	}
+	if (allonebuf) {
+		if (SCTP_BUF_SIZE(m) < space_needed) {
+			m_freem(m);
+			return (NULL);
+		}
+	}
+	if (SCTP_BUF_NEXT(m)) {
+		sctp_m_freem(SCTP_BUF_NEXT(m));
+		SCTP_BUF_NEXT(m) = NULL;
+	}
+#ifdef SCTP_MBUF_LOGGING
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+		sctp_log_mb(m, SCTP_MBUF_IALLOC);
+	}
+#endif
+#elif defined(__FreeBSD__) && __FreeBSD_version > 602000
+	m =  m_getm2(NULL, space_needed, how, type, want_header ? M_PKTHDR : 0);
+	if (m == NULL) {
+		/* bad, no memory */
+		return (m);
+	}
+	if (allonebuf) {
+		int siz;
+		if (SCTP_BUF_IS_EXTENDED(m)) {
+			siz = SCTP_BUF_EXTEND_SIZE(m);
+		} else {
+			if (want_header)
+				siz = MHLEN;
+			else
+				siz = MLEN;
+		}
+		if (siz < space_needed) {
+			m_freem(m);
+			return (NULL);
+		}
+	}
+	if (SCTP_BUF_NEXT(m)) {
+		sctp_m_freem(SCTP_BUF_NEXT(m));
+		SCTP_BUF_NEXT(m) = NULL;
+	}
+#ifdef SCTP_MBUF_LOGGING
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+		sctp_log_mb(m, SCTP_MBUF_IALLOC);
+	}
+#endif
+#else
+#if defined(__FreeBSD__) && __FreeBSD_version >= 601000
+	int aloc_size;
+	int index = 0;
+#endif
+	int mbuf_threshold;
+	if (want_header) {
+		MGETHDR(m, how, type);
+	} else {
+		MGET(m, how, type);
+	}
+	if (m == NULL) {
+		return (NULL);
+	}
+	if (allonebuf == 0)
+		mbuf_threshold = SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count);
+	else
+		mbuf_threshold = 1;
+
+
+	if (space_needed > (((mbuf_threshold - 1) * MLEN) + MHLEN)) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 601000
+	try_again:
+		index = 4;
+		if (space_needed <= MCLBYTES) {
+			aloc_size = MCLBYTES;
+		} else {
+			aloc_size = MJUMPAGESIZE;
+			index = 5;
+		}
+		m_cljget(m, how, aloc_size);
+		if (m == NULL) {
+			return (NULL);
+		}
+		if (SCTP_BUF_IS_EXTENDED(m) == 0) {
+			if ((aloc_size != MCLBYTES) &&
+			   (allonebuf == 0)) {
+				aloc_size -= 10;
+				goto try_again;
+			}
+			sctp_m_freem(m);
+			return (NULL);
+		}
+#else
+		MCLGET(m, how);
+		if (m == NULL) {
+			return (NULL);
+		}
+		if (SCTP_BUF_IS_EXTENDED(m) == 0) {
+			sctp_m_freem(m);
+			return (NULL);
+		}
+#endif
+	}
+	SCTP_BUF_LEN(m) = 0;
+	SCTP_BUF_NEXT(m) = SCTP_BUF_NEXT_PKT(m) = NULL;
+#ifdef SCTP_MBUF_LOGGING
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+		sctp_log_mb(m, SCTP_MBUF_IALLOC);
+	}
+#endif
+#endif
+	return (m);
+}
+
+
+#ifdef SCTP_PACKET_LOGGING
+void
+sctp_packet_log(struct mbuf *m)
+{
+	int *lenat, thisone;
+	void *copyto;
+	uint32_t *tick_tock;
+	int length;
+	int total_len;
+	int grabbed_lock = 0;
+	int value, newval, thisend, thisbegin;
+	/*
+	 * Buffer layout.
+	 * -sizeof this entry (total_len)
+	 * -previous end      (value)
+	 * -ticks of log      (ticks)
+	 * o -ip packet
+	 * o -as logged
+	 * - where this started (thisbegin)
+	 * x <--end points here
+	 */
+	length = SCTP_HEADER_LEN(m);
+	total_len = SCTP_SIZE32((length + (4 * sizeof(int))));
+	/* Log a packet to the buffer. */
+	if (total_len> SCTP_PACKET_LOG_SIZE) {
+		/* Can't log this packet I have not a buffer big enough */
+		return;
+	}
+	if (length < (int)(SCTP_MIN_V4_OVERHEAD + sizeof(struct sctp_cookie_ack_chunk))) {
+		return;
+	}
+	atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), 1);
+ try_again:
+	if (SCTP_BASE_VAR(packet_log_writers) > SCTP_PKTLOG_WRITERS_NEED_LOCK) {
+		SCTP_IP_PKTLOG_LOCK();
+		grabbed_lock = 1;
+	again_locked:
+		value = SCTP_BASE_VAR(packet_log_end);
+		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
+		if (newval >= SCTP_PACKET_LOG_SIZE) {
+			/* we wrapped */
+			thisbegin = 0;
+			thisend = total_len;
+		} else {
+			thisbegin = SCTP_BASE_VAR(packet_log_end);
+			thisend = newval;
+		}
+		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
+			goto again_locked;
+		}
+	} else {
+		value = SCTP_BASE_VAR(packet_log_end);
+		newval = SCTP_BASE_VAR(packet_log_end) + total_len;
+		if (newval >= SCTP_PACKET_LOG_SIZE) {
+			/* we wrapped */
+			thisbegin = 0;
+			thisend = total_len;
+		} else {
+			thisbegin = SCTP_BASE_VAR(packet_log_end);
+			thisend = newval;
+		}
+		if (!(atomic_cmpset_int(&SCTP_BASE_VAR(packet_log_end), value, thisend))) {
+			goto try_again;
+		}
+	}
+	/* Sanity check */
+	if (thisend >= SCTP_PACKET_LOG_SIZE) {
+		SCTP_PRINTF("Insanity stops a log thisbegin:%d thisend:%d writers:%d lock:%d end:%d\n",
+		            thisbegin,
+		            thisend,
+		            SCTP_BASE_VAR(packet_log_writers),
+		            grabbed_lock,
+		            SCTP_BASE_VAR(packet_log_end));
+		SCTP_BASE_VAR(packet_log_end) = 0;
+		goto no_log;
+
+	}
+	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisbegin];
+	*lenat = total_len;
+	lenat++;
+	*lenat = value;
+	lenat++;
+	tick_tock = (uint32_t *)lenat;
+	lenat++;
+	*tick_tock = sctp_get_tick_count();
+	copyto = (void *)lenat;
+	thisone = thisend - sizeof(int);
+	lenat = (int *)&SCTP_BASE_VAR(packet_log_buffer)[thisone];
+	*lenat = thisbegin;
+	if (grabbed_lock) {
+		SCTP_IP_PKTLOG_UNLOCK();
+		grabbed_lock = 0;
+	}
+	m_copydata(m, 0, length, (caddr_t)copyto);
+ no_log:
+	if (grabbed_lock) {
+		SCTP_IP_PKTLOG_UNLOCK();
+	}
+	atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers), 1);
+}
+
+
+int
+sctp_copy_out_packet_log(uint8_t *target, int length)
+{
+	/* We wind through the packet log starting at
+	 * start copying up to length bytes out.
+	 * We return the number of bytes copied.
+	 */
+	int tocopy, this_copy;
+	int *lenat;
+	int did_delay = 0;
+
+	tocopy = length;
+	if (length < (int)(2 * sizeof(int))) {
+		/* not enough room */
+		return (0);
+	}
+	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
+		atomic_add_int(&SCTP_BASE_VAR(packet_log_writers), SCTP_PKTLOG_WRITERS_NEED_LOCK);
+	again:
+		if ((did_delay == 0) && (SCTP_BASE_VAR(packet_log_writers) != SCTP_PKTLOG_WRITERS_NEED_LOCK)) {
+			/* we delay here for just a moment hoping the writer(s) that were
+			 * present when we entered will have left and we only have
+			 * locking ones that will contend with us for the lock. This
+			 * does not assure 100% access, but its good enough for
+			 * a logging facility like this.
+			 */
+			did_delay = 1;
+			DELAY(10);
+			goto again;
+		}
+	}
+	SCTP_IP_PKTLOG_LOCK();
+	lenat = (int *)target;
+	*lenat = SCTP_BASE_VAR(packet_log_end);
+	lenat++;
+	this_copy = min((length - sizeof(int)), SCTP_PACKET_LOG_SIZE);
+	memcpy((void *)lenat, (void *)SCTP_BASE_VAR(packet_log_buffer), this_copy);
+	if (SCTP_PKTLOG_WRITERS_NEED_LOCK) {
+		atomic_subtract_int(&SCTP_BASE_VAR(packet_log_writers),
+				    SCTP_PKTLOG_WRITERS_NEED_LOCK);
+	}
+	SCTP_IP_PKTLOG_UNLOCK();
+	return (this_copy + sizeof(int));
+}
+
+#endif
diff --git a/usrsctplib/netinet/sctp_bsd_addr.h b/usrsctplib/netinet/sctp_bsd_addr.h
new file mode 100755
index 0000000..e0483a4
--- /dev/null
+++ b/usrsctplib/netinet/sctp_bsd_addr.h
@@ -0,0 +1,69 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_bsd_addr.h 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_BSD_ADDR_H_
+#define _NETINET_SCTP_BSD_ADDR_H_
+
+#include <netinet/sctp_pcb.h>
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+extern struct iterator_control sctp_it_ctl;
+void sctp_wakeup_iterator(void);
+
+void sctp_startup_iterator(void);
+
+
+#ifdef INET6
+void sctp_gather_internal_ifa_flags(struct sctp_ifa *ifa);
+#endif
+
+#ifdef  SCTP_PACKET_LOGGING
+
+void sctp_packet_log(struct mbuf *m);
+int sctp_copy_out_packet_log(uint8_t *target, int length);
+
+#endif
+
+#if !defined(__Panda__)
+void sctp_addr_change(struct ifaddr *ifa, int cmd);
+#endif
+
+void sctp_add_or_del_interfaces(int (*pred)(struct ifnet *), int add);
+
+#endif
+#endif
diff --git a/usrsctplib/netinet/sctp_callout.c b/usrsctplib/netinet/sctp_callout.c
new file mode 100755
index 0000000..c9e467a
--- /dev/null
+++ b/usrsctplib/netinet/sctp_callout.c
@@ -0,0 +1,224 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__Userspace__)
+#include <sys/types.h>
+#if !defined (__Userspace_os_Windows)
+#include <sys/wait.h>
+#include <unistd.h>
+#include <pthread.h>
+#endif
+#if defined(__Userspace_os_NaCl)
+#include <sys/select.h>
+#endif
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_pcb.h>
+#else
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_callout.h>
+#include <netinet/sctp_pcb.h>
+#endif
+
+/*
+ * Callout/Timer routines for OS that doesn't have them
+ */
+#if defined(__APPLE__) || defined(__Userspace__)
+static int ticks = 0;
+#else
+extern int ticks;
+#endif
+
+int sctp_get_tick_count(void) {
+	int ret;
+
+	SCTP_TIMERQ_LOCK();
+	ret = ticks;
+	SCTP_TIMERQ_UNLOCK();
+	return ret;
+}
+
+/*
+ * SCTP_TIMERQ_LOCK protects:
+ * - SCTP_BASE_INFO(callqueue)
+ * - sctp_os_timer_next: next timer to check
+ */
+static sctp_os_timer_t *sctp_os_timer_next = NULL;
+
+void
+sctp_os_timer_init(sctp_os_timer_t *c)
+{
+	bzero(c, sizeof(*c));
+}
+
+void
+sctp_os_timer_start(sctp_os_timer_t *c, int to_ticks, void (*ftn) (void *),
+                    void *arg)
+{
+	/* paranoia */
+	if ((c == NULL) || (ftn == NULL))
+	    return;
+
+	SCTP_TIMERQ_LOCK();
+	/* check to see if we're rescheduling a timer */
+	if (c->c_flags & SCTP_CALLOUT_PENDING) {
+		if (c == sctp_os_timer_next) {
+			sctp_os_timer_next = TAILQ_NEXT(c, tqe);
+		}
+		TAILQ_REMOVE(&SCTP_BASE_INFO(callqueue), c, tqe);
+		/*
+		 * part of the normal "stop a pending callout" process
+		 * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING
+		 * flags.  We don't bother since we are setting these
+		 * below and we still hold the lock.
+		 */
+	}
+
+	/*
+	 * We could unlock/splx here and lock/spl at the TAILQ_INSERT_TAIL,
+	 * but there's no point since doing this setup doesn't take much time.
+	 */
+	if (to_ticks <= 0)
+		to_ticks = 1;
+
+	c->c_arg = arg;
+	c->c_flags = (SCTP_CALLOUT_ACTIVE | SCTP_CALLOUT_PENDING);
+	c->c_func = ftn;
+	c->c_time = ticks + to_ticks;
+	TAILQ_INSERT_TAIL(&SCTP_BASE_INFO(callqueue), c, tqe);
+	SCTP_TIMERQ_UNLOCK();
+}
+
+int
+sctp_os_timer_stop(sctp_os_timer_t *c)
+{
+	SCTP_TIMERQ_LOCK();
+	/*
+	 * Don't attempt to delete a callout that's not on the queue.
+	 */
+	if (!(c->c_flags & SCTP_CALLOUT_PENDING)) {
+		c->c_flags &= ~SCTP_CALLOUT_ACTIVE;
+		SCTP_TIMERQ_UNLOCK();
+		return (0);
+	}
+	c->c_flags &= ~(SCTP_CALLOUT_ACTIVE | SCTP_CALLOUT_PENDING);
+	if (c == sctp_os_timer_next) {
+		sctp_os_timer_next = TAILQ_NEXT(c, tqe);
+	}
+	TAILQ_REMOVE(&SCTP_BASE_INFO(callqueue), c, tqe);
+	SCTP_TIMERQ_UNLOCK();
+	return (1);
+}
+
+void
+sctp_handle_tick(unsigned int delta)
+{
+	sctp_os_timer_t *c;
+	void (*c_func)(void *);
+	void *c_arg;
+
+	SCTP_TIMERQ_LOCK();
+	/* update our tick count */
+	ticks += delta;
+	c = TAILQ_FIRST(&SCTP_BASE_INFO(callqueue));
+	while (c) {
+		if (c->c_time <= ticks) {
+			sctp_os_timer_next = TAILQ_NEXT(c, tqe);
+			TAILQ_REMOVE(&SCTP_BASE_INFO(callqueue), c, tqe);
+			c_func = c->c_func;
+			c_arg = c->c_arg;
+			c->c_flags &= ~SCTP_CALLOUT_PENDING;
+			SCTP_TIMERQ_UNLOCK();
+			c_func(c_arg);
+			SCTP_TIMERQ_LOCK();
+			c = sctp_os_timer_next;
+		} else {
+			c = TAILQ_NEXT(c, tqe);
+		}
+	}
+	sctp_os_timer_next = NULL;
+	SCTP_TIMERQ_UNLOCK();
+}
+
+#if defined(__APPLE__)
+void
+sctp_timeout(void *arg SCTP_UNUSED)
+{
+	sctp_handle_tick(SCTP_BASE_VAR(sctp_main_timer_ticks));
+	sctp_start_main_timer();
+}
+#endif
+
+#if defined(__Userspace__)
+#define TIMEOUT_INTERVAL 10
+
+void *
+user_sctp_timer_iterate(void *arg)
+{
+	sctp_userspace_set_threadname("SCTP timer");
+	for (;;) {
+#if defined (__Userspace_os_Windows)
+		Sleep(TIMEOUT_INTERVAL);
+#else
+		struct timeval timeout;
+
+		timeout.tv_sec  = 0;
+		timeout.tv_usec = 1000 * TIMEOUT_INTERVAL;
+		select(0, NULL, NULL, NULL, &timeout);
+#endif
+		if (SCTP_BASE_VAR(timer_thread_should_exit)) {
+			break;
+		}
+		sctp_handle_tick(MSEC_TO_TICKS(TIMEOUT_INTERVAL));
+	}
+	return (NULL);
+}
+
+void
+sctp_start_timer(void)
+{
+	/*
+	 * No need to do SCTP_TIMERQ_LOCK_INIT();
+	 * here, it is being done in sctp_pcb_init()
+	 */
+	int rc;
+
+	rc = sctp_userspace_thread_create(&SCTP_BASE_VAR(timer_thread), user_sctp_timer_iterate);
+	if (rc) {
+		SCTP_PRINTF("ERROR; return code from sctp_thread_create() is %d\n", rc);
+	}
+}
+
+#endif
diff --git a/usrsctplib/netinet/sctp_callout.h b/usrsctplib/netinet/sctp_callout.h
new file mode 100755
index 0000000..52a282f
--- /dev/null
+++ b/usrsctplib/netinet/sctp_callout.h
@@ -0,0 +1,109 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#endif
+
+#ifndef _NETINET_SCTP_CALLOUT_
+#define _NETINET_SCTP_CALLOUT_
+
+/*
+ * NOTE: the following MACROS are required for locking the callout
+ * queue along with a lock/mutex in the OS specific headers and
+ * implementation files::
+ * - SCTP_TIMERQ_LOCK()
+ * - SCTP_TIMERQ_UNLOCK()
+ * - SCTP_TIMERQ_LOCK_INIT()
+ * - SCTP_TIMERQ_LOCK_DESTROY()
+ */
+
+#define _SCTP_NEEDS_CALLOUT_ 1
+
+#define SCTP_TICKS_PER_FASTTIMO 20	/* called about every 20ms */
+
+#if defined(__Userspace__)
+#if defined(__Userspace_os_Windows)
+#define SCTP_TIMERQ_LOCK()          EnterCriticalSection(&SCTP_BASE_VAR(timer_mtx))
+#define SCTP_TIMERQ_UNLOCK()        LeaveCriticalSection(&SCTP_BASE_VAR(timer_mtx))
+#define SCTP_TIMERQ_LOCK_INIT()     InitializeCriticalSection(&SCTP_BASE_VAR(timer_mtx))
+#define SCTP_TIMERQ_LOCK_DESTROY()  DeleteCriticalSection(&SCTP_BASE_VAR(timer_mtx))
+#else
+#ifdef INVARIANTS
+#define SCTP_TIMERQ_LOCK()          KASSERT(pthread_mutex_lock(&SCTP_BASE_VAR(timer_mtx)) == 0, ("%s: timer_mtx already locked", __func__))
+#define SCTP_TIMERQ_UNLOCK()        KASSERT(pthread_mutex_unlock(&SCTP_BASE_VAR(timer_mtx)) == 0, ("%s: timer_mtx not locked", __func__))
+#else
+#define SCTP_TIMERQ_LOCK()          (void)pthread_mutex_lock(&SCTP_BASE_VAR(timer_mtx))
+#define SCTP_TIMERQ_UNLOCK()        (void)pthread_mutex_unlock(&SCTP_BASE_VAR(timer_mtx))
+#endif
+#define SCTP_TIMERQ_LOCK_INIT()     (void)pthread_mutex_init(&SCTP_BASE_VAR(timer_mtx), &SCTP_BASE_VAR(mtx_attr))
+#define SCTP_TIMERQ_LOCK_DESTROY()  (void)pthread_mutex_destroy(&SCTP_BASE_VAR(timer_mtx))
+#endif
+#endif
+
+int sctp_get_tick_count(void);
+
+TAILQ_HEAD(calloutlist, sctp_callout);
+
+struct sctp_callout {
+	TAILQ_ENTRY(sctp_callout) tqe;
+	int c_time;		/* ticks to the event */
+	void *c_arg;		/* function argument */
+	void (*c_func)(void *);	/* function to call */
+	int c_flags;		/* state of this entry */
+};
+typedef struct sctp_callout sctp_os_timer_t;
+
+#define	SCTP_CALLOUT_ACTIVE	0x0002	/* callout is currently active */
+#define	SCTP_CALLOUT_PENDING	0x0004	/* callout is waiting for timeout */
+
+void sctp_os_timer_init(sctp_os_timer_t *tmr);
+void sctp_os_timer_start(sctp_os_timer_t *, int, void (*)(void *), void *);
+int sctp_os_timer_stop(sctp_os_timer_t *);
+void sctp_handle_tick(unsigned int);
+
+#define SCTP_OS_TIMER_INIT	sctp_os_timer_init
+#define SCTP_OS_TIMER_START	sctp_os_timer_start
+#define SCTP_OS_TIMER_STOP	sctp_os_timer_stop
+/* MT FIXME: Is the following correct? */
+#define SCTP_OS_TIMER_STOP_DRAIN SCTP_OS_TIMER_STOP
+#define	SCTP_OS_TIMER_PENDING(tmr) ((tmr)->c_flags & SCTP_CALLOUT_PENDING)
+#define	SCTP_OS_TIMER_ACTIVE(tmr) ((tmr)->c_flags & SCTP_CALLOUT_ACTIVE)
+#define	SCTP_OS_TIMER_DEACTIVATE(tmr) ((tmr)->c_flags &= ~SCTP_CALLOUT_ACTIVE)
+
+#if defined(__Userspace__)
+void sctp_start_timer(void);
+#endif
+#if defined(__APPLE__)
+void sctp_timeout(void *);
+#endif
+
+#endif
diff --git a/usrsctplib/netinet/sctp_cc_functions.c b/usrsctplib/netinet/sctp_cc_functions.c
new file mode 100755
index 0000000..ace7464
--- /dev/null
+++ b/usrsctplib/netinet/sctp_cc_functions.c
@@ -0,0 +1,2506 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_cc_functions.c 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_input.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_asconf.h>
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+#include <netinet/sctp_dtrace_declare.h>
+#endif
+
+#define SHIFT_MPTCP_MULTI_N 40
+#define SHIFT_MPTCP_MULTI_Z 16
+#define SHIFT_MPTCP_MULTI 8
+
+static void
+sctp_enforce_cwnd_limit(struct sctp_association *assoc, struct sctp_nets *net)
+{
+	if ((assoc->max_cwnd > 0) &&
+	    (net->cwnd > assoc->max_cwnd) &&
+	    (net->cwnd > (net->mtu - sizeof(struct sctphdr)))) {
+		net->cwnd = assoc->max_cwnd ;
+		if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
+			net->cwnd = net->mtu - sizeof(struct sctphdr);
+		}
+	}
+}
+
+static void
+sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	struct sctp_association *assoc;
+	uint32_t cwnd_in_mtu;
+
+	assoc = &stcb->asoc;
+	cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
+	if (cwnd_in_mtu == 0) {
+		/* Using 0 means that the value of RFC 4960 is used. */
+		net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
+	} else {
+		/*
+		 * We take the minimum of the burst limit and the
+		 * initial congestion window.
+		 */
+		if ((assoc->max_burst > 0) && (cwnd_in_mtu > assoc->max_burst))
+			cwnd_in_mtu = assoc->max_burst;
+		net->cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
+	}
+	if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
+	    (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) {
+		/* In case of resource pooling initialize appropriately */
+		net->cwnd /= assoc->numnets;
+		if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
+			net->cwnd = net->mtu - sizeof(struct sctphdr);
+		}
+	}
+	sctp_enforce_cwnd_limit(assoc, net);
+	net->ssthresh = assoc->peers_rwnd;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	SDT_PROBE5(sctp, cwnd, net, init,
+	          stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
+	          0, net->cwnd);
+#endif
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) &
+	    (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) {
+		sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
+	}
+}
+
+static void
+sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
+                          struct sctp_association *asoc)
+{
+	struct sctp_nets *net;
+	uint32_t t_ssthresh, t_cwnd;
+	uint64_t t_ucwnd_sbw;
+
+	/* MT FIXME: Don't compute this over and over again */
+	t_ssthresh = 0;
+	t_cwnd = 0;
+	t_ucwnd_sbw = 0;
+	if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) ||
+	    (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) {
+		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+			t_ssthresh += net->ssthresh;
+			t_cwnd += net->cwnd;
+			if (net->lastsa > 0) {
+				t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)net->lastsa;
+			}
+		}
+		if (t_ucwnd_sbw == 0) {
+			t_ucwnd_sbw = 1;
+		}
+	}
+
+	/*-
+	 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
+	 * (net->fast_retran_loss_recovery == 0)))
+	 */
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+		if ((asoc->fast_retran_loss_recovery == 0) ||
+		    (asoc->sctp_cmt_on_off > 0)) {
+			/* out of a RFC2582 Fast recovery window? */
+			if (net->net_ack > 0) {
+				/*
+				 * per section 7.2.3, are there any
+				 * destinations that had a fast retransmit
+				 * to them. If so what we need to do is
+				 * adjust ssthresh and cwnd.
+				 */
+				struct sctp_tmit_chunk *lchk;
+				int old_cwnd = net->cwnd;
+
+				if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) ||
+				    (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) {
+					if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) {
+						net->ssthresh = (uint32_t)(((uint64_t)4 *
+					                                    (uint64_t)net->mtu *
+					                                    (uint64_t)net->ssthresh) /
+						                           (uint64_t)t_ssthresh);
+
+					}
+					if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2) {
+						uint32_t srtt;
+
+						srtt = net->lastsa;
+						/* lastsa>>3;  we don't need to devide ...*/
+						if (srtt == 0) {
+							srtt = 1;
+						}
+						/* Short Version => Equal to Contel Version MBe */
+						net->ssthresh = (uint32_t) (((uint64_t)4 *
+						                             (uint64_t)net->mtu *
+						                             (uint64_t)net->cwnd) /
+						                            ((uint64_t)srtt *
+						                             t_ucwnd_sbw));
+									     /* INCREASE FACTOR */;
+					}
+					if ((net->cwnd > t_cwnd / 2) &&
+					    (net->ssthresh < net->cwnd - t_cwnd / 2)) {
+						net->ssthresh = net->cwnd - t_cwnd / 2;
+					}
+					if (net->ssthresh < net->mtu) {
+						net->ssthresh = net->mtu;
+					}
+				} else {
+					net->ssthresh = net->cwnd / 2;
+					if (net->ssthresh < (net->mtu * 2)) {
+						net->ssthresh = 2 * net->mtu;
+					}
+				}
+				net->cwnd = net->ssthresh;
+				sctp_enforce_cwnd_limit(asoc, net);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+				SDT_PROBE5(sctp, cwnd, net, fr,
+					  stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
+					  old_cwnd, net->cwnd);
+#endif
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+					sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
+						SCTP_CWND_LOG_FROM_FR);
+				}
+				lchk = TAILQ_FIRST(&asoc->send_queue);
+
+				net->partial_bytes_acked = 0;
+				/* Turn on fast recovery window */
+				asoc->fast_retran_loss_recovery = 1;
+				if (lchk == NULL) {
+					/* Mark end of the window */
+					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
+				} else {
+					asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
+				}
+
+				/*
+				 * CMT fast recovery -- per destination
+				 * recovery variable.
+				 */
+				net->fast_retran_loss_recovery = 1;
+
+				if (lchk == NULL) {
+					/* Mark end of the window */
+					net->fast_recovery_tsn = asoc->sending_seq - 1;
+				} else {
+					net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
+				}
+
+				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
+						stcb->sctp_ep, stcb, net,
+				                SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_1);
+				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+						 stcb->sctp_ep, stcb, net);
+			}
+		} else if (net->net_ack > 0) {
+			/*
+			 * Mark a peg that we WOULD have done a cwnd
+			 * reduction but RFC2582 prevented this action.
+			 */
+			SCTP_STAT_INCR(sctps_fastretransinrtt);
+		}
+	}
+}
+
+/* Defines for instantaneous bw decisions */
+#define SCTP_INST_LOOSING 1 /* Losing to other flows */
+#define SCTP_INST_NEUTRAL 2 /* Neutral, no indication */
+#define SCTP_INST_GAINING 3 /* Gaining, step down possible */
+
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+static int
+cc_bw_same(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw,
+	   uint64_t rtt_offset, uint64_t vtag, uint8_t inst_ind)
+#else
+static int
+cc_bw_same(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nbw,
+	   uint64_t rtt_offset, uint8_t inst_ind)
+#endif
+{
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	uint64_t oth, probepoint;
+#endif
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	probepoint = (((uint64_t)net->cwnd) << 32);
+#endif
+	if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) {
+		/*
+		 * rtt increased
+		 * we don't update bw.. so we don't
+		 * update the rtt either.
+		 */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		/* Probe point 5 */
+		probepoint |=  ((5 << 16) | 1);
+		SDT_PROBE5(sctp, cwnd, net, rttvar,
+			  vtag,
+			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+			  net->flight_size,
+			  probepoint);
+#endif
+		if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
+			if (net->cc_mod.rtcc.last_step_state == 5)
+				net->cc_mod.rtcc.step_cnt++;
+			else
+				net->cc_mod.rtcc.step_cnt = 1;
+			net->cc_mod.rtcc.last_step_state = 5;
+			if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) ||
+			    ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) &&
+			     ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) {
+				/* Try a step down */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+				oth = net->cc_mod.rtcc.vol_reduce;
+				oth <<= 16;
+				oth |= net->cc_mod.rtcc.step_cnt;
+				oth <<= 16;
+				oth |= net->cc_mod.rtcc.last_step_state;
+				SDT_PROBE5(sctp, cwnd, net, rttstep,
+					  vtag,
+					  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+					  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+					  oth,
+					  probepoint);
+#endif
+				if (net->cwnd > (4 * net->mtu)) {
+					net->cwnd -= net->mtu;
+					net->cc_mod.rtcc.vol_reduce++;
+				} else {
+					net->cc_mod.rtcc.step_cnt = 0;
+				}
+			}
+		}
+		return (1);
+	}
+	if (net->rtt < net->cc_mod.rtcc.lbw_rtt-rtt_offset) {
+		/*
+		 * rtt decreased, there could be more room.
+		 * we update both the bw and the rtt here to
+		 * lock this in as a good step down.
+		 */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		/* Probe point 6 */
+		probepoint |=  ((6 << 16) | 0);
+		SDT_PROBE5(sctp, cwnd, net, rttvar,
+			  vtag,
+			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+			  net->flight_size,
+			  probepoint);
+#endif
+		if (net->cc_mod.rtcc.steady_step) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+			oth = net->cc_mod.rtcc.vol_reduce;
+			oth <<= 16;
+			oth |= net->cc_mod.rtcc.step_cnt;
+			oth <<= 16;
+			oth |= net->cc_mod.rtcc.last_step_state;
+			SDT_PROBE5(sctp, cwnd, net, rttstep,
+				  vtag,
+				  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+				  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+				  oth,
+				  probepoint);
+#endif
+			if ((net->cc_mod.rtcc.last_step_state == 5) &&
+			    (net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step)) {
+				/* Step down worked */
+				net->cc_mod.rtcc.step_cnt = 0;
+				return (1);
+			} else {
+				net->cc_mod.rtcc.last_step_state = 6;
+				net->cc_mod.rtcc.step_cnt = 0;
+			}
+		}
+		net->cc_mod.rtcc.lbw = nbw;
+		net->cc_mod.rtcc.lbw_rtt = net->rtt;
+		net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
+		if (inst_ind == SCTP_INST_GAINING)
+			return (1);
+		else if (inst_ind == SCTP_INST_NEUTRAL)
+			return (1);
+		else
+			return (0);
+	}
+	/* Ok bw and rtt remained the same .. no update to any
+	 */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	/* Probe point 7 */
+	probepoint |=  ((7 << 16) | net->cc_mod.rtcc.ret_from_eq);
+	SDT_PROBE5(sctp, cwnd, net, rttvar,
+		  vtag,
+		  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+		  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+		  net->flight_size,
+		  probepoint);
+#endif
+	if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
+		if (net->cc_mod.rtcc.last_step_state == 5)
+			net->cc_mod.rtcc.step_cnt++;
+		else
+			net->cc_mod.rtcc.step_cnt = 1;
+		net->cc_mod.rtcc.last_step_state = 5;
+		if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) ||
+		    ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) &&
+		     ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) {
+			/* Try a step down */
+			if (net->cwnd > (4 * net->mtu)) {
+				net->cwnd -= net->mtu;
+				net->cc_mod.rtcc.vol_reduce++;
+				return (1);
+			} else {
+				net->cc_mod.rtcc.step_cnt = 0;
+			}
+		}
+	}
+	if (inst_ind == SCTP_INST_GAINING)
+		return (1);
+	else if (inst_ind == SCTP_INST_NEUTRAL)
+		return (1);
+	else
+		return ((int)net->cc_mod.rtcc.ret_from_eq);
+}
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+static int
+cc_bw_decrease(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset,
+	       uint64_t vtag, uint8_t inst_ind)
+#else
+static int
+cc_bw_decrease(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset,
+	       uint8_t inst_ind)
+#endif
+{
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	uint64_t oth, probepoint;
+#endif
+
+	/* Bandwidth decreased.*/
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	probepoint = (((uint64_t)net->cwnd) << 32);
+#endif
+	if (net->rtt  > net->cc_mod.rtcc.lbw_rtt+rtt_offset) {
+		/* rtt increased */
+		/* Did we add more */
+		if ((net->cwnd > net->cc_mod.rtcc.cwnd_at_bw_set) &&
+		    (inst_ind != SCTP_INST_LOOSING)) {
+			/* We caused it maybe.. back off? */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+			/* PROBE POINT 1 */
+			probepoint |=  ((1 << 16) | 1);
+			SDT_PROBE5(sctp, cwnd, net, rttvar,
+				  vtag,
+				  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+				  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+				  net->flight_size,
+				  probepoint);
+#endif
+			if (net->cc_mod.rtcc.ret_from_eq) {
+				/* Switch over to CA if we are less aggressive */
+				net->ssthresh = net->cwnd-1;
+				net->partial_bytes_acked = 0;
+			}
+			return (1);
+		}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		/* Probe point 2 */
+		probepoint |=  ((2 << 16) | 0);
+		SDT_PROBE5(sctp, cwnd, net, rttvar,
+			  vtag,
+			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+			  net->flight_size,
+			  probepoint);
+#endif
+		/* Someone else - fight for more? */
+		if (net->cc_mod.rtcc.steady_step) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+			oth = net->cc_mod.rtcc.vol_reduce;
+			oth <<= 16;
+			oth |= net->cc_mod.rtcc.step_cnt;
+			oth <<= 16;
+			oth |= net->cc_mod.rtcc.last_step_state;
+			SDT_PROBE5(sctp, cwnd, net, rttstep,
+				  vtag,
+				  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+				  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+				  oth,
+				  probepoint);
+#endif
+			/* Did we voluntarily give up some? if so take
+			 * one back please
+			 */
+			if ((net->cc_mod.rtcc.vol_reduce) &&
+			    (inst_ind != SCTP_INST_GAINING)) {
+				net->cwnd += net->mtu;
+				sctp_enforce_cwnd_limit(&stcb->asoc, net);
+				net->cc_mod.rtcc.vol_reduce--;
+			}
+			net->cc_mod.rtcc.last_step_state = 2;
+			net->cc_mod.rtcc.step_cnt = 0;
+		}
+		goto out_decision;
+	} else  if (net->rtt  < net->cc_mod.rtcc.lbw_rtt-rtt_offset) {
+		/* bw & rtt decreased */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		/* Probe point 3 */
+		probepoint |=  ((3 << 16) | 0);
+		SDT_PROBE5(sctp, cwnd, net, rttvar,
+			  vtag,
+			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+			  net->flight_size,
+			  probepoint);
+#endif
+		if (net->cc_mod.rtcc.steady_step) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+			oth = net->cc_mod.rtcc.vol_reduce;
+			oth <<= 16;
+			oth |= net->cc_mod.rtcc.step_cnt;
+			oth <<= 16;
+			oth |= net->cc_mod.rtcc.last_step_state;
+			SDT_PROBE5(sctp, cwnd, net, rttstep,
+				  vtag,
+				  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+				  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+				  oth,
+				  probepoint);
+#endif
+			if ((net->cc_mod.rtcc.vol_reduce) &&
+			    (inst_ind != SCTP_INST_GAINING)) {
+				net->cwnd += net->mtu;
+				sctp_enforce_cwnd_limit(&stcb->asoc, net);
+				net->cc_mod.rtcc.vol_reduce--;
+			}
+			net->cc_mod.rtcc.last_step_state = 3;
+			net->cc_mod.rtcc.step_cnt = 0;
+		}
+		goto out_decision;
+	}
+	/* The bw decreased but rtt stayed the same */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	/* Probe point 4 */
+	probepoint |=  ((4 << 16) | 0);
+	SDT_PROBE5(sctp, cwnd, net, rttvar,
+		  vtag,
+		  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+		  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+		  net->flight_size,
+		  probepoint);
+#endif
+	if (net->cc_mod.rtcc.steady_step) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		oth = net->cc_mod.rtcc.vol_reduce;
+		oth <<= 16;
+		oth |= net->cc_mod.rtcc.step_cnt;
+		oth <<= 16;
+		oth |= net->cc_mod.rtcc.last_step_state;
+		SDT_PROBE5(sctp, cwnd, net, rttstep,
+			  vtag,
+			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+			  oth,
+			  probepoint);
+#endif
+		if ((net->cc_mod.rtcc.vol_reduce) &&
+		    (inst_ind != SCTP_INST_GAINING)) {
+			net->cwnd += net->mtu;
+			sctp_enforce_cwnd_limit(&stcb->asoc, net);
+			net->cc_mod.rtcc.vol_reduce--;
+		}
+		net->cc_mod.rtcc.last_step_state = 4;
+		net->cc_mod.rtcc.step_cnt = 0;
+	}
+out_decision:
+	net->cc_mod.rtcc.lbw = nbw;
+	net->cc_mod.rtcc.lbw_rtt = net->rtt;
+	net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
+	if (inst_ind == SCTP_INST_GAINING) {
+		return (1);
+	} else {
+		return (0);
+	}
+}
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+static int
+cc_bw_increase(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t vtag)
+#else
+static int
+cc_bw_increase(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net, uint64_t nbw)
+#endif
+{
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	uint64_t oth, probepoint;
+
+#endif
+	/* BW increased, so update and
+	 * return 0, since all actions in
+	 * our table say to do the normal CC
+	 * update. Note that we pay no attention to
+	 * the inst_ind since our overall sum is increasing.
+	 */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	/* PROBE POINT 0 */
+	probepoint = (((uint64_t)net->cwnd) << 32);
+	SDT_PROBE5(sctp, cwnd, net, rttvar,
+		  vtag,
+		  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+		  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+		  net->flight_size,
+		  probepoint);
+#endif
+	if (net->cc_mod.rtcc.steady_step) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		oth = net->cc_mod.rtcc.vol_reduce;
+		oth <<= 16;
+		oth |= net->cc_mod.rtcc.step_cnt;
+		oth <<= 16;
+		oth |= net->cc_mod.rtcc.last_step_state;
+		SDT_PROBE5(sctp, cwnd, net, rttstep,
+			  vtag,
+			  ((net->cc_mod.rtcc.lbw << 32) | nbw),
+			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+			  oth,
+			  probepoint);
+#endif
+		net->cc_mod.rtcc.last_step_state = 0;
+		net->cc_mod.rtcc.step_cnt = 0;
+		net->cc_mod.rtcc.vol_reduce = 0;
+	}
+	net->cc_mod.rtcc.lbw = nbw;
+	net->cc_mod.rtcc.lbw_rtt = net->rtt;
+	net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
+	return (0);
+}
+
+/* RTCC Algorithm to limit growth of cwnd, return
+ * true if you want to NOT allow cwnd growth
+ */
+static int
+cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw)
+{
+	uint64_t bw_offset, rtt_offset;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	uint64_t probepoint, rtt, vtag;
+#endif
+	uint64_t bytes_for_this_rtt, inst_bw;
+	uint64_t div, inst_off;
+	int bw_shift;
+	uint8_t inst_ind;
+	int ret;
+	/*-
+	 * Here we need to see if we want
+	 * to limit cwnd growth due to increase
+	 * in overall rtt but no increase in bw.
+	 * We use the following table to figure
+	 * out what we should do. When we return
+	 * 0, cc update goes on as planned. If we
+	 * return 1, then no cc update happens and cwnd
+	 * stays where it is at.
+	 * ----------------------------------
+	 *   BW    |    RTT   | Action
+	 * *********************************
+	 *   INC   |    INC   | return 0
+	 * ----------------------------------
+	 *   INC   |    SAME  | return 0
+	 * ----------------------------------
+	 *   INC   |    DECR  | return 0
+	 * ----------------------------------
+	 *   SAME  |    INC   | return 1
+	 * ----------------------------------
+	 *   SAME  |    SAME  | return 1
+	 * ----------------------------------
+	 *   SAME  |    DECR  | return 0
+	 * ----------------------------------
+	 *   DECR  |    INC   | return 0 or 1 based on if we caused.
+	 * ----------------------------------
+	 *   DECR  |    SAME  | return 0
+	 * ----------------------------------
+	 *   DECR  |    DECR  | return 0
+	 * ----------------------------------
+	 *
+	 * We are a bit fuzz on what an increase or
+	 * decrease is. For BW it is the same if
+	 * it did not change within 1/64th. For
+	 * RTT it stayed the same if it did not
+	 * change within 1/32nd
+	 */
+	bw_shift = SCTP_BASE_SYSCTL(sctp_rttvar_bw);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	rtt = stcb->asoc.my_vtag;
+	vtag = (rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) | (stcb->rport);
+	probepoint = (((uint64_t)net->cwnd) << 32);
+	rtt = net->rtt;
+#endif
+	if (net->cc_mod.rtcc.rtt_set_this_sack) {
+		net->cc_mod.rtcc.rtt_set_this_sack = 0;
+		bytes_for_this_rtt = net->cc_mod.rtcc.bw_bytes - net->cc_mod.rtcc.bw_bytes_at_last_rttc;
+		net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes;
+		if (net->rtt) {
+			div = net->rtt / 1000;
+			if (div) {
+				inst_bw = bytes_for_this_rtt / div;
+				inst_off = inst_bw >> bw_shift;
+				if (inst_bw > nbw)
+					inst_ind = SCTP_INST_GAINING;
+				else if ((inst_bw+inst_off) < nbw)
+					inst_ind = SCTP_INST_LOOSING;
+				else
+					inst_ind = SCTP_INST_NEUTRAL;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+				probepoint |=  ((0xb << 16) | inst_ind);
+#endif
+			} else {
+				inst_ind = net->cc_mod.rtcc.last_inst_ind;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+				inst_bw = bytes_for_this_rtt / (uint64_t)(net->rtt);
+				/* Can't determine do not change */
+				probepoint |=  ((0xc << 16) | inst_ind);
+#endif
+			}
+		} else {
+			inst_ind = net->cc_mod.rtcc.last_inst_ind;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+			inst_bw = bytes_for_this_rtt;
+			/* Can't determine do not change */
+			probepoint |=  ((0xd << 16) | inst_ind);
+#endif
+		}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		SDT_PROBE5(sctp, cwnd, net, rttvar,
+			  vtag,
+			  ((nbw << 32) | inst_bw),
+			  ((net->cc_mod.rtcc.lbw_rtt << 32) | rtt),
+			  net->flight_size,
+			  probepoint);
+#endif
+	} else {
+		/* No rtt measurement, use last one */
+		inst_ind = net->cc_mod.rtcc.last_inst_ind;
+	}
+	bw_offset = net->cc_mod.rtcc.lbw >> bw_shift;
+	if (nbw > net->cc_mod.rtcc.lbw + bw_offset) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		ret = cc_bw_increase(stcb, net, nbw, vtag);
+#else
+		ret = cc_bw_increase(stcb, net, nbw);
+#endif
+		goto out;
+	}
+	rtt_offset = net->cc_mod.rtcc.lbw_rtt >> SCTP_BASE_SYSCTL(sctp_rttvar_rtt);
+	if (nbw < net->cc_mod.rtcc.lbw - bw_offset) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, vtag, inst_ind);
+#else
+		ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, inst_ind);
+#endif
+		goto out;
+	}
+	/* If we reach here then
+	 * we are in a situation where
+	 * the bw stayed the same.
+	 */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	ret = cc_bw_same(stcb, net, nbw, rtt_offset, vtag, inst_ind);
+#else
+	ret = cc_bw_same(stcb, net, nbw, rtt_offset, inst_ind);
+#endif
+out:
+	net->cc_mod.rtcc.last_inst_ind = inst_ind;
+	return (ret);
+}
+
+static void
+sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb,
+				   struct sctp_association *asoc,
+				   int accum_moved, int reneged_all SCTP_UNUSED, int will_exit, int use_rtcc)
+{
+	struct sctp_nets *net;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	int old_cwnd;
+#endif
+	uint32_t t_ssthresh, t_cwnd, incr;
+	uint64_t t_ucwnd_sbw;
+	uint64_t t_path_mptcp;
+	uint64_t mptcp_like_alpha;
+	uint32_t srtt;
+	uint64_t max_path;
+
+	/* MT FIXME: Don't compute this over and over again */
+	t_ssthresh = 0;
+	t_cwnd = 0;
+	t_ucwnd_sbw = 0;
+	t_path_mptcp = 0;
+	mptcp_like_alpha = 1;
+	if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
+	    (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2) ||
+	    (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_MPTCP)) {
+		max_path = 0;
+		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+			t_ssthresh += net->ssthresh;
+			t_cwnd += net->cwnd;
+			/* lastsa>>3;  we don't need to devide ...*/
+			srtt = net->lastsa;
+			if (srtt > 0) {
+				uint64_t tmp;
+
+				t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)srtt;
+				t_path_mptcp += (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_Z) /
+				                (((uint64_t)net->mtu) * (uint64_t)srtt);
+				tmp = (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_N) /
+				      ((uint64_t)net->mtu * (uint64_t)(srtt * srtt));
+				if (tmp > max_path) {
+					max_path = tmp;
+				}
+			}
+		}
+		if (t_path_mptcp > 0) {
+			mptcp_like_alpha = max_path / (t_path_mptcp * t_path_mptcp);
+		} else {
+			mptcp_like_alpha = 1;
+		}
+	}
+	if (t_ssthresh == 0) {
+		t_ssthresh = 1;
+	}
+	if (t_ucwnd_sbw == 0) {
+		t_ucwnd_sbw = 1;
+	}
+	/******************************/
+	/* update cwnd and Early FR   */
+	/******************************/
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+
+#ifdef JANA_CMT_FAST_RECOVERY
+		/*
+		 * CMT fast recovery code. Need to debug.
+		 */
+		if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
+			if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
+			    SCTP_TSN_GE(net->pseudo_cumack,net->fast_recovery_tsn)) {
+				net->will_exit_fast_recovery = 1;
+			}
+		}
+#endif
+		/* if nothing was acked on this destination skip it */
+		if (net->net_ack == 0) {
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+				sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
+			}
+			continue;
+		}
+#ifdef JANA_CMT_FAST_RECOVERY
+                /* CMT fast recovery code
+		 */
+		/*
+		  if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery && net->will_exit_fast_recovery == 0) {
+		  @@@ Do something
+		  }
+		  else if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) {
+		*/
+#endif
+
+		if (asoc->fast_retran_loss_recovery &&
+		    (will_exit == 0) &&
+		    (asoc->sctp_cmt_on_off == 0)) {
+			/*
+			 * If we are in loss recovery we skip any cwnd
+			 * update
+			 */
+			return;
+		}
+		/*
+		 * Did any measurements go on for this network?
+		 */
+		if (use_rtcc && (net->cc_mod.rtcc.tls_needs_set > 0)) {
+			uint64_t nbw;
+			/*
+			 * At this point our bw_bytes has been updated
+			 * by incoming sack information.
+			 *
+			 * But our bw may not yet be set.
+			 *
+			 */
+			if ((net->cc_mod.rtcc.new_tot_time/1000) > 0) {
+				nbw = net->cc_mod.rtcc.bw_bytes/(net->cc_mod.rtcc.new_tot_time/1000);
+			} else {
+				nbw = net->cc_mod.rtcc.bw_bytes;
+			}
+			if (net->cc_mod.rtcc.lbw) {
+				if (cc_bw_limit(stcb, net, nbw)) {
+					/* Hold here, no update */
+					continue;
+				}
+			} else {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+				uint64_t vtag, probepoint;
+
+				probepoint = (((uint64_t)net->cwnd) << 32);
+				probepoint |=  ((0xa << 16) | 0);
+				vtag = (net->rtt << 32) |
+					(((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
+					(stcb->rport);
+
+				SDT_PROBE5(sctp, cwnd, net, rttvar,
+					  vtag,
+					  nbw,
+					  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+					  net->flight_size,
+					  probepoint);
+#endif
+				net->cc_mod.rtcc.lbw = nbw;
+				net->cc_mod.rtcc.lbw_rtt = net->rtt;
+				if (net->cc_mod.rtcc.rtt_set_this_sack) {
+					net->cc_mod.rtcc.rtt_set_this_sack = 0;
+					net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes;
+				}
+			}
+		}
+		/*
+		 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
+		 * moved.
+		 */
+		if (accum_moved ||
+		    ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
+			/* If the cumulative ack moved we can proceed */
+			if (net->cwnd <= net->ssthresh) {
+				/* We are in slow start */
+				if (net->flight_size + net->net_ack >= net->cwnd) {
+					uint32_t limit;
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+					old_cwnd = net->cwnd;
+#endif
+					switch (asoc->sctp_cmt_on_off) {
+					case SCTP_CMT_RPV1:
+						limit = (uint32_t)(((uint64_t)net->mtu *
+						                    (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
+						                    (uint64_t)net->ssthresh) /
+						                   (uint64_t)t_ssthresh);
+						incr = (uint32_t)(((uint64_t)net->net_ack *
+						                   (uint64_t)net->ssthresh) /
+						                  (uint64_t)t_ssthresh);
+						if (incr > limit) {
+							incr = limit;
+						}
+						if (incr == 0) {
+							incr = 1;
+						}
+						break;
+					case SCTP_CMT_RPV2:
+						/* lastsa>>3;  we don't need to divide ...*/
+						srtt = net->lastsa;
+						if (srtt == 0) {
+							srtt = 1;
+						}
+						limit = (uint32_t)(((uint64_t)net->mtu *
+						                    (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
+						                    (uint64_t)net->cwnd) /
+						                   ((uint64_t)srtt * t_ucwnd_sbw));
+						                   /* INCREASE FACTOR */
+						incr = (uint32_t)(((uint64_t)net->net_ack *
+						                   (uint64_t)net->cwnd) /
+						                  ((uint64_t)srtt * t_ucwnd_sbw));
+						                  /* INCREASE FACTOR */
+						if (incr > limit) {
+							incr = limit;
+						}
+						if (incr == 0) {
+							incr = 1;
+						}
+						break;
+					case SCTP_CMT_MPTCP:
+						limit = (uint32_t)(((uint64_t)net->mtu *
+						                    mptcp_like_alpha *
+						                    (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) >>
+						                   SHIFT_MPTCP_MULTI);
+						incr  = (uint32_t)(((uint64_t)net->net_ack *
+						                    mptcp_like_alpha) >>
+						                   SHIFT_MPTCP_MULTI);
+						if (incr > limit) {
+							incr = limit;
+						}
+						if (incr > net->net_ack) {
+							incr = net->net_ack;
+						}
+						if (incr > net->mtu) {
+							incr = net->mtu;
+						}
+						break;
+					default:
+						incr = net->net_ack;
+						if (incr > net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) {
+							incr = net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable);
+						}
+						break;
+					}
+					net->cwnd += incr;
+					sctp_enforce_cwnd_limit(asoc, net);
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+						sctp_log_cwnd(stcb, net, incr,
+						              SCTP_CWND_LOG_FROM_SS);
+					}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+					SDT_PROBE5(sctp, cwnd, net, ack,
+					          stcb->asoc.my_vtag,
+					          ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+					          net,
+					          old_cwnd, net->cwnd);
+#endif
+				} else {
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+						sctp_log_cwnd(stcb, net, net->net_ack,
+							      SCTP_CWND_LOG_NOADV_SS);
+					}
+				}
+			} else {
+				/* We are in congestion avoidance */
+				/*
+				 * Add to pba
+				 */
+			        net->partial_bytes_acked += net->net_ack;
+
+				if ((net->flight_size + net->net_ack >= net->cwnd) &&
+                                    (net->partial_bytes_acked >= net->cwnd)) {
+					net->partial_bytes_acked -= net->cwnd;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+					old_cwnd = net->cwnd;
+#endif
+					switch (asoc->sctp_cmt_on_off) {
+					case SCTP_CMT_RPV1:
+						incr = (uint32_t)(((uint64_t)net->mtu *
+						                   (uint64_t)net->ssthresh) /
+						                  (uint64_t)t_ssthresh);
+						if (incr == 0) {
+							incr = 1;
+						}
+						break;
+					case SCTP_CMT_RPV2:
+						/* lastsa>>3;  we don't need to divide ... */
+						srtt = net->lastsa;
+						if (srtt == 0) {
+							srtt = 1;
+						}
+						incr = (uint32_t)((uint64_t)net->mtu *
+						                  (uint64_t)net->cwnd /
+						                  ((uint64_t)srtt *
+						                   t_ucwnd_sbw));
+						                  /* INCREASE FACTOR */
+						if (incr == 0) {
+							incr = 1;
+						}
+						break;
+					case SCTP_CMT_MPTCP:
+						incr = (uint32_t)((mptcp_like_alpha *
+						                   (uint64_t) net->cwnd) >>
+						                  SHIFT_MPTCP_MULTI);
+						if (incr > net->mtu) {
+							incr = net->mtu;
+						}
+						break;
+					default:
+						incr = net->mtu;
+						break;
+					}
+					net->cwnd += incr;
+					sctp_enforce_cwnd_limit(asoc, net);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+					SDT_PROBE5(sctp, cwnd, net, ack,
+						  stcb->asoc.my_vtag,
+						  ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+						  net,
+						  old_cwnd, net->cwnd);
+#endif
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+						sctp_log_cwnd(stcb, net, net->mtu,
+							      SCTP_CWND_LOG_FROM_CA);
+					}
+				} else {
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+						sctp_log_cwnd(stcb, net, net->net_ack,
+							      SCTP_CWND_LOG_NOADV_CA);
+					}
+				}
+			}
+		} else {
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+				sctp_log_cwnd(stcb, net, net->mtu,
+					      SCTP_CWND_LOG_NO_CUMACK);
+			}
+		}
+	}
+}
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+static void
+sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb, struct sctp_nets *net)
+#else
+static void
+sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net)
+#endif
+{
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	int old_cwnd;
+
+	old_cwnd = net->cwnd;
+#endif
+	net->cwnd = net->mtu;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	SDT_PROBE5(sctp, cwnd, net, ack,
+	          stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
+	          old_cwnd, net->cwnd);
+#endif
+	SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
+	        (void *)net, net->cwnd);
+}
+
+
+static void
+sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	int old_cwnd = net->cwnd;
+	uint32_t t_ssthresh, t_cwnd;
+	uint64_t t_ucwnd_sbw;
+
+	/* MT FIXME: Don't compute this over and over again */
+	t_ssthresh = 0;
+	t_cwnd = 0;
+	if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
+	    (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) {
+		struct sctp_nets *lnet;
+		uint32_t srtt;
+
+		t_ucwnd_sbw = 0;
+		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+			t_ssthresh += lnet->ssthresh;
+			t_cwnd += lnet->cwnd;
+			srtt = lnet->lastsa;
+			/* lastsa>>3;  we don't need to divide ... */
+			if (srtt > 0) {
+				t_ucwnd_sbw += (uint64_t)lnet->cwnd / (uint64_t)srtt;
+			}
+		}
+		if (t_ssthresh < 1) {
+			t_ssthresh = 1;
+		}
+		if (t_ucwnd_sbw < 1) {
+			t_ucwnd_sbw = 1;
+		}
+		if (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) {
+			net->ssthresh = (uint32_t)(((uint64_t)4 *
+			                            (uint64_t)net->mtu *
+			                            (uint64_t)net->ssthresh) /
+			                           (uint64_t)t_ssthresh);
+		} else {
+			uint64_t cc_delta;
+
+			srtt = net->lastsa;
+			/* lastsa>>3;  we don't need to divide ... */
+			if (srtt == 0) {
+				srtt = 1;
+			}
+			cc_delta = t_ucwnd_sbw * (uint64_t)srtt / 2;
+			if (cc_delta < t_cwnd) {
+				net->ssthresh = (uint32_t)((uint64_t)t_cwnd - cc_delta);
+			} else {
+				net->ssthresh  = net->mtu;
+			}
+		}
+		if ((net->cwnd > t_cwnd / 2) &&
+		    (net->ssthresh < net->cwnd - t_cwnd / 2)) {
+			net->ssthresh = net->cwnd - t_cwnd / 2;
+		}
+		if (net->ssthresh < net->mtu) {
+			net->ssthresh = net->mtu;
+		}
+	} else {
+		net->ssthresh = max(net->cwnd / 2, 4 * net->mtu);
+	}
+	net->cwnd = net->mtu;
+	net->partial_bytes_acked = 0;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	SDT_PROBE5(sctp, cwnd, net, to,
+		  stcb->asoc.my_vtag,
+		  ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+		  net,
+		  old_cwnd, net->cwnd);
+#endif
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+		sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
+	}
+}
+
+static void
+sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets *net,
+					    int in_window, int num_pkt_lost, int use_rtcc)
+{
+	int old_cwnd = net->cwnd;
+	if ((use_rtcc) && (net->lan_type == SCTP_LAN_LOCAL) && (net->cc_mod.rtcc.use_dccc_ecn)) {
+		/* Data center Congestion Control */
+		if (in_window == 0) {
+			/* Go to CA with the cwnd at the point we sent
+			 * the TSN that was marked with a CE.
+			 */
+			if (net->ecn_prev_cwnd < net->cwnd) {
+				/* Restore to prev cwnd */
+				net->cwnd = net->ecn_prev_cwnd - (net->mtu * num_pkt_lost);
+			} else {
+				/* Just cut in 1/2 */
+				net->cwnd /= 2;
+			}
+			/* Drop to CA */
+			net->ssthresh = net->cwnd - (num_pkt_lost * net->mtu);
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+				sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
+			}
+		} else {
+			/* Further tuning down required over the drastic original cut */
+			net->ssthresh -= (net->mtu * num_pkt_lost);
+			net->cwnd -= (net->mtu * num_pkt_lost);
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+				sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
+			}
+
+		}
+		SCTP_STAT_INCR(sctps_ecnereducedcwnd);
+	}  else {
+		if (in_window == 0) {
+			SCTP_STAT_INCR(sctps_ecnereducedcwnd);
+			net->ssthresh = net->cwnd / 2;
+			if (net->ssthresh < net->mtu) {
+				net->ssthresh = net->mtu;
+				/* here back off the timer as well, to slow us down */
+				net->RTO <<= 1;
+			}
+			net->cwnd = net->ssthresh;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+			SDT_PROBE5(sctp, cwnd, net, ecn,
+				  stcb->asoc.my_vtag,
+				  ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+				  net,
+				  old_cwnd, net->cwnd);
+#endif
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+				sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
+			}
+		}
+	}
+
+}
+
+static void
+sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb,
+	struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
+	uint32_t *bottle_bw, uint32_t *on_queue)
+{
+	uint32_t bw_avail;
+	unsigned int incr;
+	int old_cwnd = net->cwnd;
+
+	/* get bottle neck bw */
+	*bottle_bw = ntohl(cp->bottle_bw);
+	/* and whats on queue */
+	*on_queue = ntohl(cp->current_onq);
+	/*
+	 * adjust the on-queue if our flight is more it could be
+	 * that the router has not yet gotten data "in-flight" to it
+	 */
+	if (*on_queue < net->flight_size) {
+		*on_queue = net->flight_size;
+	}
+	/* rtt is measured in micro seconds, bottle_bw in bytes per second */
+	bw_avail = (uint32_t)(((uint64_t)(*bottle_bw) * net->rtt) / (uint64_t)1000000);
+	if (bw_avail > *bottle_bw) {
+		/*
+		 * Cap the growth to no more than the bottle neck.
+		 * This can happen as RTT slides up due to queues.
+		 * It also means if you have more than a 1 second
+		 * RTT with a empty queue you will be limited to the
+		 * bottle_bw per second no matter if other points
+		 * have 1/2 the RTT and you could get more out...
+		 */
+		bw_avail = *bottle_bw;
+	}
+	if (*on_queue > bw_avail) {
+		/*
+		 * No room for anything else don't allow anything
+		 * else to be "added to the fire".
+		 */
+		int seg_inflight, seg_onqueue, my_portion;
+
+		net->partial_bytes_acked = 0;
+		/* how much are we over queue size? */
+		incr = *on_queue - bw_avail;
+		if (stcb->asoc.seen_a_sack_this_pkt) {
+			/*
+			 * undo any cwnd adjustment that the sack
+			 * might have made
+			 */
+			net->cwnd = net->prev_cwnd;
+		}
+		/* Now how much of that is mine? */
+		seg_inflight = net->flight_size / net->mtu;
+		seg_onqueue = *on_queue / net->mtu;
+		my_portion = (incr * seg_inflight) / seg_onqueue;
+
+		/* Have I made an adjustment already */
+		if (net->cwnd > net->flight_size) {
+			/*
+			 * for this flight I made an adjustment we
+			 * need to decrease the portion by a share
+			 * our previous adjustment.
+			 */
+			int diff_adj;
+
+			diff_adj = net->cwnd - net->flight_size;
+			if (diff_adj > my_portion)
+				my_portion = 0;
+			else
+				my_portion -= diff_adj;
+		}
+		/*
+		 * back down to the previous cwnd (assume we have
+		 * had a sack before this packet). minus what ever
+		 * portion of the overage is my fault.
+		 */
+		net->cwnd -= my_portion;
+
+		/* we will NOT back down more than 1 MTU */
+		if (net->cwnd <= net->mtu) {
+			net->cwnd = net->mtu;
+		}
+		/* force into CA */
+		net->ssthresh = net->cwnd - 1;
+	} else {
+		/*
+		 * Take 1/4 of the space left or max burst up ..
+		 * whichever is less.
+		 */
+		incr = (bw_avail - *on_queue) >> 2;
+		if ((stcb->asoc.max_burst > 0) &&
+		    (stcb->asoc.max_burst * net->mtu < incr)) {
+			incr = stcb->asoc.max_burst * net->mtu;
+		}
+		net->cwnd += incr;
+	}
+	if (net->cwnd > bw_avail) {
+		/* We can't exceed the pipe size */
+		net->cwnd = bw_avail;
+	}
+	if (net->cwnd < net->mtu) {
+		/* We always have 1 MTU */
+		net->cwnd = net->mtu;
+	}
+	sctp_enforce_cwnd_limit(&stcb->asoc, net);
+	if (net->cwnd - old_cwnd != 0) {
+		/* log only changes */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		SDT_PROBE5(sctp, cwnd, net, pd,
+			  stcb->asoc.my_vtag,
+			  ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+			  net,
+			  old_cwnd, net->cwnd);
+#endif
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
+				SCTP_CWND_LOG_FROM_SAT);
+		}
+	}
+}
+
+static void
+sctp_cwnd_update_after_output(struct sctp_tcb *stcb,
+			      struct sctp_nets *net, int burst_limit)
+{
+	int old_cwnd = net->cwnd;
+
+	if (net->ssthresh < net->cwnd)
+		net->ssthresh = net->cwnd;
+	if (burst_limit) {
+		net->cwnd = (net->flight_size + (burst_limit * net->mtu));
+		sctp_enforce_cwnd_limit(&stcb->asoc, net);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		SDT_PROBE5(sctp, cwnd, net, bl,
+			  stcb->asoc.my_vtag,
+			  ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
+			  net,
+			  old_cwnd, net->cwnd);
+#endif
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
+		}
+	}
+}
+
+static void
+sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
+			    struct sctp_association *asoc,
+			    int accum_moved, int reneged_all, int will_exit)
+{
+	/* Passing a zero argument in last disables the rtcc algorithm */
+	sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 0);
+}
+
+static void
+sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
+	int in_window, int num_pkt_lost)
+{
+	/* Passing a zero argument in last disables the rtcc algorithm */
+	sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 0);
+}
+
+/* Here starts the RTCCVAR type CC invented by RRS which
+ * is a slight mod to RFC2581. We reuse a common routine or
+ * two since these algorithms are so close and need to
+ * remain the same.
+ */
+static void
+sctp_cwnd_update_rtcc_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
+				     int in_window, int num_pkt_lost)
+{
+	sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 1);
+}
+
+
+static
+void sctp_cwnd_update_rtcc_tsn_acknowledged(struct sctp_nets *net,
+					    struct sctp_tmit_chunk *tp1)
+{
+	net->cc_mod.rtcc.bw_bytes += tp1->send_size;
+}
+
+static void
+sctp_cwnd_prepare_rtcc_net_for_sack(struct sctp_tcb *stcb SCTP_UNUSED,
+				    struct sctp_nets *net)
+{
+	if (net->cc_mod.rtcc.tls_needs_set > 0) {
+		/* We had a bw measurment going on */
+		struct timeval ltls;
+		SCTP_GETPTIME_TIMEVAL(&ltls);
+		timevalsub(&ltls, &net->cc_mod.rtcc.tls);
+		net->cc_mod.rtcc.new_tot_time = (ltls.tv_sec * 1000000) + ltls.tv_usec;
+	}
+}
+
+static void
+sctp_cwnd_new_rtcc_transmission_begins(struct sctp_tcb *stcb,
+				       struct sctp_nets *net)
+{
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	uint64_t vtag, probepoint;
+
+#endif
+	if (net->cc_mod.rtcc.lbw) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+		/* Clear the old bw.. we went to 0 in-flight */
+		vtag = (net->rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
+			(stcb->rport);
+		probepoint = (((uint64_t)net->cwnd) << 32);
+		/* Probe point 8 */
+		probepoint |=  ((8 << 16) | 0);
+		SDT_PROBE5(sctp, cwnd, net, rttvar,
+			  vtag,
+			  ((net->cc_mod.rtcc.lbw << 32) | 0),
+			  ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
+			  net->flight_size,
+			  probepoint);
+#endif
+		net->cc_mod.rtcc.lbw_rtt = 0;
+		net->cc_mod.rtcc.cwnd_at_bw_set = 0;
+		net->cc_mod.rtcc.lbw = 0;
+		net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0;
+		net->cc_mod.rtcc.vol_reduce = 0;
+		net->cc_mod.rtcc.bw_tot_time = 0;
+		net->cc_mod.rtcc.bw_bytes = 0;
+		net->cc_mod.rtcc.tls_needs_set = 0;
+		if (net->cc_mod.rtcc.steady_step) {
+			net->cc_mod.rtcc.vol_reduce = 0;
+			net->cc_mod.rtcc.step_cnt = 0;
+			net->cc_mod.rtcc.last_step_state = 0;
+		}
+		if (net->cc_mod.rtcc.ret_from_eq) {
+			/* less aggressive one - reset cwnd too */
+			uint32_t cwnd_in_mtu, cwnd;
+
+			cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
+			if (cwnd_in_mtu == 0) {
+				/* Using 0 means that the value of RFC 4960 is used. */
+				cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
+			} else {
+				/*
+				 * We take the minimum of the burst limit and the
+				 * initial congestion window.
+				 */
+				if ((stcb->asoc.max_burst > 0) && (cwnd_in_mtu > stcb->asoc.max_burst))
+					cwnd_in_mtu = stcb->asoc.max_burst;
+				cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
+			}
+			if (net->cwnd > cwnd) {
+				/* Only set if we are not a timeout (i.e. down to 1 mtu) */
+				net->cwnd = cwnd;
+			}
+		}
+	}
+}
+
+static void
+sctp_set_rtcc_initial_cc_param(struct sctp_tcb *stcb,
+			       struct sctp_nets *net)
+{
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	uint64_t vtag, probepoint;
+
+#endif
+	sctp_set_initial_cc_param(stcb, net);
+	stcb->asoc.use_precise_time = 1;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 803000
+	probepoint = (((uint64_t)net->cwnd) << 32);
+	probepoint |=  ((9 << 16) | 0);
+	vtag = (net->rtt << 32) |
+		(((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
+		(stcb->rport);
+	SDT_PROBE5(sctp, cwnd, net, rttvar,
+		  vtag,
+		  0,
+		  0,
+		  0,
+		  probepoint);
+#endif
+	net->cc_mod.rtcc.lbw_rtt = 0;
+	net->cc_mod.rtcc.cwnd_at_bw_set = 0;
+	net->cc_mod.rtcc.vol_reduce = 0;
+	net->cc_mod.rtcc.lbw = 0;
+	net->cc_mod.rtcc.vol_reduce = 0;
+	net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0;
+	net->cc_mod.rtcc.bw_tot_time = 0;
+	net->cc_mod.rtcc.bw_bytes = 0;
+	net->cc_mod.rtcc.tls_needs_set = 0;
+	net->cc_mod.rtcc.ret_from_eq = SCTP_BASE_SYSCTL(sctp_rttvar_eqret);
+	net->cc_mod.rtcc.steady_step = SCTP_BASE_SYSCTL(sctp_steady_step);
+	net->cc_mod.rtcc.use_dccc_ecn = SCTP_BASE_SYSCTL(sctp_use_dccc_ecn);
+	net->cc_mod.rtcc.step_cnt = 0;
+	net->cc_mod.rtcc.last_step_state = 0;
+
+
+}
+
+static int
+sctp_cwnd_rtcc_socket_option(struct sctp_tcb *stcb, int setorget,
+			     struct sctp_cc_option *cc_opt)
+{
+	struct sctp_nets *net;
+	if (setorget == 1) {
+		/* a set */
+		if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) {
+			if ((cc_opt->aid_value.assoc_value != 0) &&
+			    (cc_opt->aid_value.assoc_value != 1)) {
+				return (EINVAL);
+			}
+			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+				net->cc_mod.rtcc.ret_from_eq = cc_opt->aid_value.assoc_value;
+			}
+		} else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) {
+			if ((cc_opt->aid_value.assoc_value != 0) &&
+			    (cc_opt->aid_value.assoc_value != 1)) {
+				return (EINVAL);
+			}
+			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+				net->cc_mod.rtcc.use_dccc_ecn = cc_opt->aid_value.assoc_value;
+			}
+		} else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) {
+			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+				net->cc_mod.rtcc.steady_step = cc_opt->aid_value.assoc_value;
+			}
+		} else {
+			return (EINVAL);
+		}
+	} else {
+		/* a get */
+		if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) {
+			net = TAILQ_FIRST(&stcb->asoc.nets);
+			if (net == NULL) {
+				return (EFAULT);
+			}
+			cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.ret_from_eq;
+		} else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) {
+			net = TAILQ_FIRST(&stcb->asoc.nets);
+			if (net == NULL) {
+				return (EFAULT);
+			}
+			cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.use_dccc_ecn;
+		} else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) {
+			net = TAILQ_FIRST(&stcb->asoc.nets);
+			if (net == NULL) {
+				return (EFAULT);
+			}
+			cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.steady_step;
+		} else {
+			return (EINVAL);
+		}
+	}
+	return (0);
+}
+
+static void
+sctp_cwnd_update_rtcc_packet_transmitted(struct sctp_tcb *stcb SCTP_UNUSED,
+                                         struct sctp_nets *net)
+{
+	if (net->cc_mod.rtcc.tls_needs_set == 0) {
+		SCTP_GETPTIME_TIMEVAL(&net->cc_mod.rtcc.tls);
+		net->cc_mod.rtcc.tls_needs_set = 2;
+	}
+}
+
+static void
+sctp_cwnd_update_rtcc_after_sack(struct sctp_tcb *stcb,
+				 struct sctp_association *asoc,
+				 int accum_moved, int reneged_all, int will_exit)
+{
+	/* Passing a one argument at the last enables the rtcc algorithm */
+	sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 1);
+}
+
+static void
+sctp_rtt_rtcc_calculated(struct sctp_tcb *stcb SCTP_UNUSED,
+                         struct sctp_nets *net,
+                         struct timeval *now SCTP_UNUSED)
+{
+	net->cc_mod.rtcc.rtt_set_this_sack = 1;
+}
+
+/* Here starts Sally Floyds HS-TCP */
+
+struct sctp_hs_raise_drop {
+	int32_t cwnd;
+	int8_t increase;
+	int8_t drop_percent;
+};
+
+#define SCTP_HS_TABLE_SIZE 73
+
+static const struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
+	{38, 1, 50},		/* 0   */
+	{118, 2, 44},		/* 1   */
+	{221, 3, 41},		/* 2   */
+	{347, 4, 38},		/* 3   */
+	{495, 5, 37},		/* 4   */
+	{663, 6, 35},		/* 5   */
+	{851, 7, 34},		/* 6   */
+	{1058, 8, 33},		/* 7   */
+	{1284, 9, 32},		/* 8   */
+	{1529, 10, 31},		/* 9   */
+	{1793, 11, 30},		/* 10  */
+	{2076, 12, 29},		/* 11  */
+	{2378, 13, 28},		/* 12  */
+	{2699, 14, 28},		/* 13  */
+	{3039, 15, 27},		/* 14  */
+	{3399, 16, 27},		/* 15  */
+	{3778, 17, 26},		/* 16  */
+	{4177, 18, 26},		/* 17  */
+	{4596, 19, 25},		/* 18  */
+	{5036, 20, 25},		/* 19  */
+	{5497, 21, 24},		/* 20  */
+	{5979, 22, 24},		/* 21  */
+	{6483, 23, 23},		/* 22  */
+	{7009, 24, 23},		/* 23  */
+	{7558, 25, 22},		/* 24  */
+	{8130, 26, 22},		/* 25  */
+	{8726, 27, 22},		/* 26  */
+	{9346, 28, 21},		/* 27  */
+	{9991, 29, 21},		/* 28  */
+	{10661, 30, 21},	/* 29  */
+	{11358, 31, 20},	/* 30  */
+	{12082, 32, 20},	/* 31  */
+	{12834, 33, 20},	/* 32  */
+	{13614, 34, 19},	/* 33  */
+	{14424, 35, 19},	/* 34  */
+	{15265, 36, 19},	/* 35  */
+	{16137, 37, 19},	/* 36  */
+	{17042, 38, 18},	/* 37  */
+	{17981, 39, 18},	/* 38  */
+	{18955, 40, 18},	/* 39  */
+	{19965, 41, 17},	/* 40  */
+	{21013, 42, 17},	/* 41  */
+	{22101, 43, 17},	/* 42  */
+	{23230, 44, 17},	/* 43  */
+	{24402, 45, 16},	/* 44  */
+	{25618, 46, 16},	/* 45  */
+	{26881, 47, 16},	/* 46  */
+	{28193, 48, 16},	/* 47  */
+	{29557, 49, 15},	/* 48  */
+	{30975, 50, 15},	/* 49  */
+	{32450, 51, 15},	/* 50  */
+	{33986, 52, 15},	/* 51  */
+	{35586, 53, 14},	/* 52  */
+	{37253, 54, 14},	/* 53  */
+	{38992, 55, 14},	/* 54  */
+	{40808, 56, 14},	/* 55  */
+	{42707, 57, 13},	/* 56  */
+	{44694, 58, 13},	/* 57  */
+	{46776, 59, 13},	/* 58  */
+	{48961, 60, 13},	/* 59  */
+	{51258, 61, 13},	/* 60  */
+	{53677, 62, 12},	/* 61  */
+	{56230, 63, 12},	/* 62  */
+	{58932, 64, 12},	/* 63  */
+	{61799, 65, 12},	/* 64  */
+	{64851, 66, 11},	/* 65  */
+	{68113, 67, 11},	/* 66  */
+	{71617, 68, 11},	/* 67  */
+	{75401, 69, 10},	/* 68  */
+	{79517, 70, 10},	/* 69  */
+	{84035, 71, 10},	/* 70  */
+	{89053, 72, 10},	/* 71  */
+	{94717, 73, 9}		/* 72  */
+};
+
+static void
+sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	int cur_val, i, indx, incr;
+	int old_cwnd = net->cwnd;
+
+	cur_val = net->cwnd >> 10;
+	indx = SCTP_HS_TABLE_SIZE - 1;
+
+	if (cur_val < sctp_cwnd_adjust[0].cwnd) {
+		/* normal mode */
+		if (net->net_ack > net->mtu) {
+			net->cwnd += net->mtu;
+		} else {
+			net->cwnd += net->net_ack;
+		}
+	} else {
+		for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
+			if (cur_val < sctp_cwnd_adjust[i].cwnd) {
+				indx = i;
+				break;
+			}
+		}
+		net->last_hs_used = indx;
+		incr = (((int32_t)sctp_cwnd_adjust[indx].increase) << 10);
+		net->cwnd += incr;
+	}
+	sctp_enforce_cwnd_limit(&stcb->asoc, net);
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+		sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SS);
+	}
+}
+
+static void
+sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	int cur_val, i, indx;
+	int old_cwnd = net->cwnd;
+
+	cur_val = net->cwnd >> 10;
+	if (cur_val < sctp_cwnd_adjust[0].cwnd) {
+		/* normal mode */
+		net->ssthresh = net->cwnd / 2;
+		if (net->ssthresh < (net->mtu * 2)) {
+			net->ssthresh = 2 * net->mtu;
+		}
+		net->cwnd = net->ssthresh;
+	} else {
+		/* drop by the proper amount */
+		net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
+		    (int32_t)sctp_cwnd_adjust[net->last_hs_used].drop_percent);
+		net->cwnd = net->ssthresh;
+		/* now where are we */
+		indx = net->last_hs_used;
+		cur_val = net->cwnd >> 10;
+		/* reset where we are in the table */
+		if (cur_val < sctp_cwnd_adjust[0].cwnd) {
+			/* feel out of hs */
+			net->last_hs_used = 0;
+		} else {
+			for (i = indx; i >= 1; i--) {
+				if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
+					break;
+				}
+			}
+			net->last_hs_used = indx;
+		}
+	}
+	sctp_enforce_cwnd_limit(&stcb->asoc, net);
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+		sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
+	}
+}
+
+static void
+sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb,
+                             struct sctp_association *asoc)
+{
+	struct sctp_nets *net;
+		/*
+	 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
+	 * (net->fast_retran_loss_recovery == 0)))
+	 */
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+		if ((asoc->fast_retran_loss_recovery == 0) ||
+		    (asoc->sctp_cmt_on_off > 0)) {
+			/* out of a RFC2582 Fast recovery window? */
+			if (net->net_ack > 0) {
+				/*
+				 * per section 7.2.3, are there any
+				 * destinations that had a fast retransmit
+				 * to them. If so what we need to do is
+				 * adjust ssthresh and cwnd.
+				 */
+				struct sctp_tmit_chunk *lchk;
+
+				sctp_hs_cwnd_decrease(stcb, net);
+
+				lchk = TAILQ_FIRST(&asoc->send_queue);
+
+				net->partial_bytes_acked = 0;
+				/* Turn on fast recovery window */
+				asoc->fast_retran_loss_recovery = 1;
+				if (lchk == NULL) {
+					/* Mark end of the window */
+					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
+				} else {
+					asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
+				}
+
+				/*
+				 * CMT fast recovery -- per destination
+				 * recovery variable.
+				 */
+				net->fast_retran_loss_recovery = 1;
+
+				if (lchk == NULL) {
+					/* Mark end of the window */
+					net->fast_recovery_tsn = asoc->sending_seq - 1;
+				} else {
+					net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
+				}
+
+				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
+						stcb->sctp_ep, stcb, net,
+				                SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_2);
+				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+						 stcb->sctp_ep, stcb, net);
+			}
+		} else if (net->net_ack > 0) {
+			/*
+			 * Mark a peg that we WOULD have done a cwnd
+			 * reduction but RFC2582 prevented this action.
+			 */
+			SCTP_STAT_INCR(sctps_fastretransinrtt);
+		}
+	}
+}
+
+static void
+sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
+		 struct sctp_association *asoc,
+		 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit)
+{
+	struct sctp_nets *net;
+	/******************************/
+	/* update cwnd and Early FR   */
+	/******************************/
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+
+#ifdef JANA_CMT_FAST_RECOVERY
+		/*
+		 * CMT fast recovery code. Need to debug.
+		 */
+		if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
+			if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
+			    SCTP_TSN_GE(net->pseudo_cumack,net->fast_recovery_tsn)) {
+				net->will_exit_fast_recovery = 1;
+			}
+		}
+#endif
+		/* if nothing was acked on this destination skip it */
+		if (net->net_ack == 0) {
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+				sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
+			}
+			continue;
+		}
+#ifdef JANA_CMT_FAST_RECOVERY
+                /* CMT fast recovery code
+		 */
+		/*
+		if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery && net->will_exit_fast_recovery == 0) {
+		    @@@ Do something
+		 }
+		 else if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) {
+		*/
+#endif
+
+		 if (asoc->fast_retran_loss_recovery &&
+		     (will_exit == 0) &&
+		     (asoc->sctp_cmt_on_off == 0)) {
+			/*
+			 * If we are in loss recovery we skip any cwnd
+			 * update
+			 */
+			return;
+		}
+		/*
+		 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
+		 * moved.
+		 */
+		if (accum_moved ||
+		    ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
+			/* If the cumulative ack moved we can proceed */
+			if (net->cwnd <= net->ssthresh) {
+				/* We are in slow start */
+				if (net->flight_size + net->net_ack >= net->cwnd) {
+					sctp_hs_cwnd_increase(stcb, net);
+				} else {
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+						sctp_log_cwnd(stcb, net, net->net_ack,
+							SCTP_CWND_LOG_NOADV_SS);
+					}
+				}
+			} else {
+				/* We are in congestion avoidance */
+				net->partial_bytes_acked += net->net_ack;
+				if ((net->flight_size + net->net_ack >= net->cwnd) &&
+				    (net->partial_bytes_acked >= net->cwnd)) {
+					net->partial_bytes_acked -= net->cwnd;
+					net->cwnd += net->mtu;
+					sctp_enforce_cwnd_limit(asoc, net);
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+						sctp_log_cwnd(stcb, net, net->mtu,
+							SCTP_CWND_LOG_FROM_CA);
+					}
+				} else {
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+						sctp_log_cwnd(stcb, net, net->net_ack,
+							SCTP_CWND_LOG_NOADV_CA);
+					}
+				}
+			}
+		} else {
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+				sctp_log_cwnd(stcb, net, net->mtu,
+					SCTP_CWND_LOG_NO_CUMACK);
+			}
+		}
+	}
+}
+
+
+/*
+ * H-TCP congestion control. The algorithm is detailed in:
+ * R.N.Shorten, D.J.Leith:
+ *   "H-TCP: TCP for high-speed and long-distance networks"
+ *   Proc. PFLDnet, Argonne, 2004.
+ * http://www.hamilton.ie/net/htcp3.pdf
+ */
+
+
+static int use_rtt_scaling = 1;
+static int use_bandwidth_switch = 1;
+
+static inline int
+between(uint32_t seq1, uint32_t seq2, uint32_t seq3)
+{
+	return (seq3 - seq2 >= seq1 - seq2);
+}
+
+static inline uint32_t
+htcp_cong_time(struct htcp *ca)
+{
+	return (sctp_get_tick_count() - ca->last_cong);
+}
+
+static inline uint32_t
+htcp_ccount(struct htcp *ca)
+{
+	return (htcp_cong_time(ca)/ca->minRTT);
+}
+
+static inline void
+htcp_reset(struct htcp *ca)
+{
+	ca->undo_last_cong = ca->last_cong;
+	ca->undo_maxRTT = ca->maxRTT;
+	ca->undo_old_maxB = ca->old_maxB;
+	ca->last_cong = sctp_get_tick_count();
+}
+
+#ifdef SCTP_NOT_USED
+
+static uint32_t
+htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	net->cc_mod.htcp_ca.last_cong = net->cc_mod.htcp_ca.undo_last_cong;
+	net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.undo_maxRTT;
+	net->cc_mod.htcp_ca.old_maxB = net->cc_mod.htcp_ca.undo_old_maxB;
+	return (max(net->cwnd, ((net->ssthresh/net->mtu<<7)/net->cc_mod.htcp_ca.beta)*net->mtu));
+}
+
+#endif
+
+static inline void
+measure_rtt(struct sctp_nets *net)
+{
+	uint32_t srtt = net->lastsa>>SCTP_RTT_SHIFT;
+
+	/* keep track of minimum RTT seen so far, minRTT is zero at first */
+	if (net->cc_mod.htcp_ca.minRTT > srtt || !net->cc_mod.htcp_ca.minRTT)
+		net->cc_mod.htcp_ca.minRTT = srtt;
+
+	/* max RTT */
+	if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->cc_mod.htcp_ca) > 3) {
+		if (net->cc_mod.htcp_ca.maxRTT < net->cc_mod.htcp_ca.minRTT)
+			net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.minRTT;
+		if (net->cc_mod.htcp_ca.maxRTT < srtt && srtt <= net->cc_mod.htcp_ca.maxRTT+MSEC_TO_TICKS(20))
+			net->cc_mod.htcp_ca.maxRTT = srtt;
+	}
+}
+
+static void
+measure_achieved_throughput(struct sctp_nets *net)
+{
+	uint32_t now = sctp_get_tick_count();
+
+	if (net->fast_retran_ip == 0)
+		net->cc_mod.htcp_ca.bytes_acked = net->net_ack;
+
+	if (!use_bandwidth_switch)
+		return;
+
+	/* achieved throughput calculations */
+	/* JRS - not 100% sure of this statement */
+	if (net->fast_retran_ip == 1) {
+		net->cc_mod.htcp_ca.bytecount = 0;
+		net->cc_mod.htcp_ca.lasttime = now;
+		return;
+	}
+
+	net->cc_mod.htcp_ca.bytecount += net->net_ack;
+	if ((net->cc_mod.htcp_ca.bytecount >= net->cwnd - (((net->cc_mod.htcp_ca.alpha >> 7) ? (net->cc_mod.htcp_ca.alpha >> 7) : 1) * net->mtu)) &&
+	    (now - net->cc_mod.htcp_ca.lasttime >= net->cc_mod.htcp_ca.minRTT) &&
+	    (net->cc_mod.htcp_ca.minRTT > 0)) {
+		uint32_t cur_Bi = net->cc_mod.htcp_ca.bytecount/net->mtu*hz/(now - net->cc_mod.htcp_ca.lasttime);
+
+		if (htcp_ccount(&net->cc_mod.htcp_ca) <= 3) {
+			/* just after backoff */
+			net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi = cur_Bi;
+		} else {
+			net->cc_mod.htcp_ca.Bi = (3*net->cc_mod.htcp_ca.Bi + cur_Bi)/4;
+			if (net->cc_mod.htcp_ca.Bi > net->cc_mod.htcp_ca.maxB)
+				net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi;
+			if (net->cc_mod.htcp_ca.minB > net->cc_mod.htcp_ca.maxB)
+				net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB;
+		}
+		net->cc_mod.htcp_ca.bytecount = 0;
+		net->cc_mod.htcp_ca.lasttime = now;
+	}
+}
+
+static inline void
+htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT)
+{
+	if (use_bandwidth_switch) {
+		uint32_t maxB = ca->maxB;
+		uint32_t old_maxB = ca->old_maxB;
+		ca->old_maxB = ca->maxB;
+
+		if (!between(5*maxB, 4*old_maxB, 6*old_maxB)) {
+			ca->beta = BETA_MIN;
+			ca->modeswitch = 0;
+			return;
+		}
+	}
+
+	if (ca->modeswitch && minRTT > (uint32_t)MSEC_TO_TICKS(10) && maxRTT) {
+		ca->beta = (minRTT<<7)/maxRTT;
+		if (ca->beta < BETA_MIN)
+			ca->beta = BETA_MIN;
+		else if (ca->beta > BETA_MAX)
+			ca->beta = BETA_MAX;
+	} else {
+		ca->beta = BETA_MIN;
+		ca->modeswitch = 1;
+	}
+}
+
+static inline void
+htcp_alpha_update(struct htcp *ca)
+{
+	uint32_t minRTT = ca->minRTT;
+	uint32_t factor = 1;
+	uint32_t diff = htcp_cong_time(ca);
+
+	if (diff > (uint32_t)hz) {
+		diff -= hz;
+		factor = 1+ ( 10*diff + ((diff/2)*(diff/2)/hz))/hz;
+	}
+
+	if (use_rtt_scaling && minRTT) {
+		uint32_t scale = (hz<<3)/(10*minRTT);
+		scale = min(max(scale, 1U<<2), 10U<<3); /* clamping ratio to interval [0.5,10]<<3 */
+		factor = (factor<<3)/scale;
+		if (!factor)
+			factor = 1;
+	}
+
+	ca->alpha = 2*factor*((1<<7)-ca->beta);
+	if (!ca->alpha)
+		ca->alpha = ALPHA_BASE;
+}
+
+/* After we have the rtt data to calculate beta, we'd still prefer to wait one
+ * rtt before we adjust our beta to ensure we are working from a consistent
+ * data.
+ *
+ * This function should be called when we hit a congestion event since only at
+ * that point do we really have a real sense of maxRTT (the queues en route
+ * were getting just too full now).
+ */
+static void
+htcp_param_update(struct sctp_nets *net)
+{
+	uint32_t minRTT = net->cc_mod.htcp_ca.minRTT;
+	uint32_t maxRTT = net->cc_mod.htcp_ca.maxRTT;
+
+	htcp_beta_update(&net->cc_mod.htcp_ca, minRTT, maxRTT);
+	htcp_alpha_update(&net->cc_mod.htcp_ca);
+
+	/* add slowly fading memory for maxRTT to accommodate routing changes etc */
+	if (minRTT > 0 && maxRTT > minRTT)
+		net->cc_mod.htcp_ca.maxRTT = minRTT + ((maxRTT-minRTT)*95)/100;
+}
+
+static uint32_t
+htcp_recalc_ssthresh(struct sctp_nets *net)
+{
+	htcp_param_update(net);
+	return (max(((net->cwnd/net->mtu * net->cc_mod.htcp_ca.beta) >> 7)*net->mtu, 2U*net->mtu));
+}
+
+static void
+htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	/*-
+	 * How to handle these functions?
+         *	if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question.
+	 *		return;
+	 */
+        if (net->cwnd <= net->ssthresh) {
+		/* We are in slow start */
+		if (net->flight_size + net->net_ack >= net->cwnd) {
+			if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) {
+				net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable));
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+					sctp_log_cwnd(stcb, net, net->mtu,
+						SCTP_CWND_LOG_FROM_SS);
+				}
+
+			} else {
+				net->cwnd += net->net_ack;
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+					sctp_log_cwnd(stcb, net, net->net_ack,
+						SCTP_CWND_LOG_FROM_SS);
+				}
+
+			}
+			sctp_enforce_cwnd_limit(&stcb->asoc, net);
+		} else {
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+				sctp_log_cwnd(stcb, net, net->net_ack,
+					SCTP_CWND_LOG_NOADV_SS);
+			}
+		}
+	} else {
+		measure_rtt(net);
+
+		/* In dangerous area, increase slowly.
+		 * In theory this is net->cwnd += alpha / net->cwnd
+		 */
+		/* What is snd_cwnd_cnt?? */
+		if (((net->partial_bytes_acked/net->mtu * net->cc_mod.htcp_ca.alpha) >> 7)*net->mtu >= net->cwnd) {
+                        /*-
+			 * Does SCTP have a cwnd clamp?
+			 * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS).
+			 */
+			net->cwnd += net->mtu;
+			net->partial_bytes_acked = 0;
+			sctp_enforce_cwnd_limit(&stcb->asoc, net);
+			htcp_alpha_update(&net->cc_mod.htcp_ca);
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+				sctp_log_cwnd(stcb, net, net->mtu,
+					SCTP_CWND_LOG_FROM_CA);
+			}
+		} else {
+			net->partial_bytes_acked += net->net_ack;
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+				sctp_log_cwnd(stcb, net, net->net_ack,
+					SCTP_CWND_LOG_NOADV_CA);
+			}
+		}
+
+		net->cc_mod.htcp_ca.bytes_acked = net->mtu;
+	}
+}
+
+#ifdef SCTP_NOT_USED
+/* Lower bound on congestion window. */
+static uint32_t
+htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	return (net->ssthresh);
+}
+#endif
+
+static void
+htcp_init(struct sctp_nets *net)
+{
+	memset(&net->cc_mod.htcp_ca, 0, sizeof(struct htcp));
+	net->cc_mod.htcp_ca.alpha = ALPHA_BASE;
+	net->cc_mod.htcp_ca.beta = BETA_MIN;
+	net->cc_mod.htcp_ca.bytes_acked = net->mtu;
+	net->cc_mod.htcp_ca.last_cong = sctp_get_tick_count();
+}
+
+static void
+sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	/*
+	 * We take the max of the burst limit times a MTU or the
+	 * INITIAL_CWND. We then limit this to 4 MTU's of sending.
+	 */
+	net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
+	net->ssthresh = stcb->asoc.peers_rwnd;
+	sctp_enforce_cwnd_limit(&stcb->asoc, net);
+	htcp_init(net);
+
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) {
+		sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
+	}
+}
+
+static void
+sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
+		 struct sctp_association *asoc,
+		 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit)
+{
+	struct sctp_nets *net;
+
+	/******************************/
+	/* update cwnd and Early FR   */
+	/******************************/
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+
+#ifdef JANA_CMT_FAST_RECOVERY
+		/*
+		 * CMT fast recovery code. Need to debug.
+		 */
+		if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
+			if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
+			    SCTP_TSN_GE(net->pseudo_cumack,net->fast_recovery_tsn)) {
+				net->will_exit_fast_recovery = 1;
+			}
+		}
+#endif
+		/* if nothing was acked on this destination skip it */
+		if (net->net_ack == 0) {
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+				sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
+			}
+			continue;
+		}
+#ifdef JANA_CMT_FAST_RECOVERY
+                /* CMT fast recovery code
+		 */
+		/*
+		if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery && net->will_exit_fast_recovery == 0) {
+		    @@@ Do something
+		 }
+		 else if (sctp_cmt_on_off == 0 && asoc->fast_retran_loss_recovery && will_exit == 0) {
+		*/
+#endif
+
+		if (asoc->fast_retran_loss_recovery &&
+		    will_exit == 0 &&
+		    (asoc->sctp_cmt_on_off == 0)) {
+			/*
+			 * If we are in loss recovery we skip any cwnd
+			 * update
+			 */
+			return;
+		}
+		/*
+		 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
+		 * moved.
+		 */
+		if (accum_moved ||
+		    ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
+			htcp_cong_avoid(stcb, net);
+			measure_achieved_throughput(net);
+		} else {
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+				sctp_log_cwnd(stcb, net, net->mtu,
+					SCTP_CWND_LOG_NO_CUMACK);
+			}
+		}
+	}
+}
+
+static void
+sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
+		struct sctp_association *asoc)
+{
+	struct sctp_nets *net;
+	/*
+	 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
+	 * (net->fast_retran_loss_recovery == 0)))
+	 */
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+		if ((asoc->fast_retran_loss_recovery == 0) ||
+		    (asoc->sctp_cmt_on_off > 0)) {
+			/* out of a RFC2582 Fast recovery window? */
+			if (net->net_ack > 0) {
+				/*
+				 * per section 7.2.3, are there any
+				 * destinations that had a fast retransmit
+				 * to them. If so what we need to do is
+				 * adjust ssthresh and cwnd.
+				 */
+				struct sctp_tmit_chunk *lchk;
+				int old_cwnd = net->cwnd;
+
+				/* JRS - reset as if state were changed */
+				htcp_reset(&net->cc_mod.htcp_ca);
+				net->ssthresh = htcp_recalc_ssthresh(net);
+				net->cwnd = net->ssthresh;
+				sctp_enforce_cwnd_limit(asoc, net);
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+					sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
+						SCTP_CWND_LOG_FROM_FR);
+				}
+				lchk = TAILQ_FIRST(&asoc->send_queue);
+
+				net->partial_bytes_acked = 0;
+				/* Turn on fast recovery window */
+				asoc->fast_retran_loss_recovery = 1;
+				if (lchk == NULL) {
+					/* Mark end of the window */
+					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
+				} else {
+					asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
+				}
+
+				/*
+				 * CMT fast recovery -- per destination
+				 * recovery variable.
+				 */
+				net->fast_retran_loss_recovery = 1;
+
+				if (lchk == NULL) {
+					/* Mark end of the window */
+					net->fast_recovery_tsn = asoc->sending_seq - 1;
+				} else {
+					net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
+				}
+
+				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
+						stcb->sctp_ep, stcb, net,
+				                SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_3);
+				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+						 stcb->sctp_ep, stcb, net);
+			}
+		} else if (net->net_ack > 0) {
+			/*
+			 * Mark a peg that we WOULD have done a cwnd
+			 * reduction but RFC2582 prevented this action.
+			 */
+			SCTP_STAT_INCR(sctps_fastretransinrtt);
+		}
+	}
+}
+
+static void
+sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
+	struct sctp_nets *net)
+{
+		int old_cwnd = net->cwnd;
+
+		/* JRS - reset as if the state were being changed to timeout */
+		htcp_reset(&net->cc_mod.htcp_ca);
+		net->ssthresh = htcp_recalc_ssthresh(net);
+		net->cwnd = net->mtu;
+		net->partial_bytes_acked = 0;
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+			sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
+		}
+}
+
+static void
+sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
+		struct sctp_nets *net, int in_window, int num_pkt_lost SCTP_UNUSED)
+{
+	int old_cwnd;
+	old_cwnd = net->cwnd;
+
+	/* JRS - reset hctp as if state changed */
+	if (in_window == 0) {
+		htcp_reset(&net->cc_mod.htcp_ca);
+		SCTP_STAT_INCR(sctps_ecnereducedcwnd);
+		net->ssthresh = htcp_recalc_ssthresh(net);
+		if (net->ssthresh < net->mtu) {
+			net->ssthresh = net->mtu;
+			/* here back off the timer as well, to slow us down */
+			net->RTO <<= 1;
+		}
+		net->cwnd = net->ssthresh;
+		sctp_enforce_cwnd_limit(&stcb->asoc, net);
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
+		}
+	}
+}
+
+const struct sctp_cc_functions sctp_cc_functions[] = {
+{
+#if defined(__Windows__) || defined(__Userspace_os_Windows)
+	sctp_set_initial_cc_param,
+	sctp_cwnd_update_after_sack,
+	sctp_cwnd_update_exit_pf_common,
+	sctp_cwnd_update_after_fr,
+	sctp_cwnd_update_after_timeout,
+	sctp_cwnd_update_after_ecn_echo,
+	sctp_cwnd_update_after_packet_dropped,
+	sctp_cwnd_update_after_output,
+#else
+	.sctp_set_initial_cc_param = sctp_set_initial_cc_param,
+	.sctp_cwnd_update_after_sack = sctp_cwnd_update_after_sack,
+	.sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
+	.sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr,
+	.sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
+	.sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo,
+	.sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
+	.sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
+#endif
+},
+{
+#if defined(__Windows__) || defined(__Userspace_os_Windows)
+	sctp_set_initial_cc_param,
+	sctp_hs_cwnd_update_after_sack,
+	sctp_cwnd_update_exit_pf_common,
+	sctp_hs_cwnd_update_after_fr,
+	sctp_cwnd_update_after_timeout,
+	sctp_cwnd_update_after_ecn_echo,
+	sctp_cwnd_update_after_packet_dropped,
+	sctp_cwnd_update_after_output,
+#else
+	.sctp_set_initial_cc_param = sctp_set_initial_cc_param,
+	.sctp_cwnd_update_after_sack = sctp_hs_cwnd_update_after_sack,
+	.sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
+	.sctp_cwnd_update_after_fr = sctp_hs_cwnd_update_after_fr,
+	.sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
+	.sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo,
+	.sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
+	.sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
+#endif
+},
+{
+#if defined(__Windows__) || defined(__Userspace_os_Windows)
+	sctp_htcp_set_initial_cc_param,
+	sctp_htcp_cwnd_update_after_sack,
+	sctp_cwnd_update_exit_pf_common,
+	sctp_htcp_cwnd_update_after_fr,
+	sctp_htcp_cwnd_update_after_timeout,
+	sctp_htcp_cwnd_update_after_ecn_echo,
+	sctp_cwnd_update_after_packet_dropped,
+	sctp_cwnd_update_after_output,
+#else
+	.sctp_set_initial_cc_param = sctp_htcp_set_initial_cc_param,
+	.sctp_cwnd_update_after_sack = sctp_htcp_cwnd_update_after_sack,
+	.sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
+	.sctp_cwnd_update_after_fr = sctp_htcp_cwnd_update_after_fr,
+	.sctp_cwnd_update_after_timeout = sctp_htcp_cwnd_update_after_timeout,
+	.sctp_cwnd_update_after_ecn_echo = sctp_htcp_cwnd_update_after_ecn_echo,
+	.sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
+	.sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
+#endif
+},
+{
+#if defined(__Windows__) || defined(__Userspace_os_Windows)
+	sctp_set_rtcc_initial_cc_param,
+	sctp_cwnd_update_rtcc_after_sack,
+	sctp_cwnd_update_exit_pf_common,
+	sctp_cwnd_update_after_fr,
+	sctp_cwnd_update_after_timeout,
+	sctp_cwnd_update_rtcc_after_ecn_echo,
+	sctp_cwnd_update_after_packet_dropped,
+	sctp_cwnd_update_after_output,
+	sctp_cwnd_update_rtcc_packet_transmitted,
+	sctp_cwnd_update_rtcc_tsn_acknowledged,
+	sctp_cwnd_new_rtcc_transmission_begins,
+	sctp_cwnd_prepare_rtcc_net_for_sack,
+	sctp_cwnd_rtcc_socket_option,
+	sctp_rtt_rtcc_calculated
+#else
+	.sctp_set_initial_cc_param = sctp_set_rtcc_initial_cc_param,
+	.sctp_cwnd_update_after_sack = sctp_cwnd_update_rtcc_after_sack,
+	.sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
+	.sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr,
+	.sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
+	.sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_rtcc_after_ecn_echo,
+	.sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
+	.sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
+	.sctp_cwnd_update_packet_transmitted = sctp_cwnd_update_rtcc_packet_transmitted,
+	.sctp_cwnd_update_tsn_acknowledged = sctp_cwnd_update_rtcc_tsn_acknowledged,
+	.sctp_cwnd_new_transmission_begins = sctp_cwnd_new_rtcc_transmission_begins,
+	.sctp_cwnd_prepare_net_for_sack = sctp_cwnd_prepare_rtcc_net_for_sack,
+	.sctp_cwnd_socket_option = sctp_cwnd_rtcc_socket_option,
+	.sctp_rtt_calculated = sctp_rtt_rtcc_calculated
+#endif
+}
+};
diff --git a/usrsctplib/netinet/sctp_constants.h b/usrsctplib/netinet/sctp_constants.h
new file mode 100755
index 0000000..e275dd9
--- /dev/null
+++ b/usrsctplib/netinet/sctp_constants.h
@@ -0,0 +1,1103 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_constants.h 309682 2016-12-07 19:30:59Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_CONSTANTS_H_
+#define _NETINET_SCTP_CONSTANTS_H_
+
+#if defined(__Userspace_os_Windows)
+extern void getwintimeofday(struct timeval *tv);
+#endif
+
+/* IANA assigned port number for SCTP over UDP encapsulation */
+#define SCTP_OVER_UDP_TUNNELING_PORT 9899
+
+/* Number of packets to get before sack sent by default */
+#define SCTP_DEFAULT_SACK_FREQ 2
+
+/* Address limit - This variable is calculated
+ * based on an 65535 byte max ip packet. We take out 100 bytes
+ * for the cookie, 40 bytes for a v6 header and 32
+ * bytes for the init structure. A second init structure
+ * for the init-ack and then finally a third one for the
+ * imbedded init. This yeilds 100+40+(3 * 32) = 236 bytes.
+ * This leaves 65299 bytes for addresses. We throw out the 299 bytes.
+ * Now whatever we send in the INIT() we need to allow to get back in the
+ * INIT-ACK plus all the values from INIT and INIT-ACK
+ * listed in the cookie. Plus we need some overhead for
+ * maybe copied parameters in the COOKIE. If we
+ * allow 1080 addresses, and each side has 1080 V6 addresses
+ * that will be 21600 bytes. In the INIT-ACK we will
+ * see the INIT-ACK 21600 + 43200 in the cookie. This leaves
+ * about 500 bytes slack for misc things in the cookie.
+ */
+#define SCTP_ADDRESS_LIMIT 1080
+
+/* We need at least 2k of space for us, inits
+ * larger than that lets abort.
+ */
+#define SCTP_LARGEST_INIT_ACCEPTED (65535 - 2048)
+
+/* Largest length of a chunk */
+#define SCTP_MAX_CHUNK_LENGTH 0xffff
+/* Largest length of an error cause */
+#define SCTP_MAX_CAUSE_LENGTH 0xffff
+/* Number of addresses where we just skip the counting */
+#define SCTP_COUNT_LIMIT 40
+
+#define SCTP_ZERO_COPY_TICK_DELAY (((100 * hz) + 999) / 1000)
+#define SCTP_ZERO_COPY_SENDQ_TICK_DELAY (((100 * hz) + 999) / 1000)
+
+/* Number of ticks to delay before running
+ * iterator on an address change.
+ */
+#define SCTP_ADDRESS_TICK_DELAY 2
+
+#define SCTP_VERSION_STRING "KAME-BSD 1.1"
+/* #define SCTP_AUDITING_ENABLED 1 used for debug/auditing */
+#define SCTP_AUDIT_SIZE 256
+
+
+#define SCTP_KTRHEAD_NAME "sctp_iterator"
+#define SCTP_KTHREAD_PAGES 0
+
+#define SCTP_MCORE_NAME "sctp_core_worker"
+
+
+/* If you support Multi-VRF how big to
+ * make the initial array of VRF's to.
+ */
+#define SCTP_DEFAULT_VRF_SIZE 4
+
+/* constants for rto calc */
+#define sctp_align_safe_nocopy 0
+#define sctp_align_unsafe_makecopy 1
+
+/* JRS - Values defined for the HTCP algorithm */
+#define ALPHA_BASE	(1<<7)  /* 1.0 with shift << 7 */
+#define BETA_MIN	(1<<6)  /* 0.5 with shift << 7 */
+#define BETA_MAX	102	/* 0.8 with shift << 7 */
+
+/* Places that CWND log can happen from */
+#define SCTP_CWND_LOG_FROM_FR	1
+#define SCTP_CWND_LOG_FROM_RTX	2
+#define SCTP_CWND_LOG_FROM_BRST	3
+#define SCTP_CWND_LOG_FROM_SS	4
+#define SCTP_CWND_LOG_FROM_CA	5
+#define SCTP_CWND_LOG_FROM_SAT	6
+#define SCTP_BLOCK_LOG_INTO_BLK 7
+#define SCTP_BLOCK_LOG_OUTOF_BLK 8
+#define SCTP_BLOCK_LOG_CHECK     9
+#define SCTP_STR_LOG_FROM_INTO_STRD 10
+#define SCTP_STR_LOG_FROM_IMMED_DEL 11
+#define SCTP_STR_LOG_FROM_INSERT_HD 12
+#define SCTP_STR_LOG_FROM_INSERT_MD 13
+#define SCTP_STR_LOG_FROM_INSERT_TL 14
+#define SCTP_STR_LOG_FROM_MARK_TSN  15
+#define SCTP_STR_LOG_FROM_EXPRS_DEL 16
+#define SCTP_FR_LOG_BIGGEST_TSNS    17
+#define SCTP_FR_LOG_STRIKE_TEST     18
+#define SCTP_FR_LOG_STRIKE_CHUNK    19
+#define SCTP_FR_T3_TIMEOUT          20
+#define SCTP_MAP_PREPARE_SLIDE      21
+#define SCTP_MAP_SLIDE_FROM         22
+#define SCTP_MAP_SLIDE_RESULT       23
+#define SCTP_MAP_SLIDE_CLEARED	    24
+#define SCTP_MAP_SLIDE_NONE         25
+#define SCTP_FR_T3_MARK_TIME        26
+#define SCTP_FR_T3_MARKED           27
+#define SCTP_FR_T3_STOPPED          28
+#define SCTP_FR_MARKED              30
+#define SCTP_CWND_LOG_NOADV_SS      31
+#define SCTP_CWND_LOG_NOADV_CA      32
+#define SCTP_MAX_BURST_APPLIED      33
+#define SCTP_MAX_IFP_APPLIED        34
+#define SCTP_MAX_BURST_ERROR_STOP   35
+#define SCTP_INCREASE_PEER_RWND     36
+#define SCTP_DECREASE_PEER_RWND     37
+#define SCTP_SET_PEER_RWND_VIA_SACK 38
+#define SCTP_LOG_MBCNT_INCREASE     39
+#define SCTP_LOG_MBCNT_DECREASE     40
+#define SCTP_LOG_MBCNT_CHKSET       41
+#define SCTP_LOG_NEW_SACK           42
+#define SCTP_LOG_TSN_ACKED          43
+#define SCTP_LOG_TSN_REVOKED        44
+#define SCTP_LOG_LOCK_TCB           45
+#define SCTP_LOG_LOCK_INP           46
+#define SCTP_LOG_LOCK_SOCK          47
+#define SCTP_LOG_LOCK_SOCKBUF_R     48
+#define SCTP_LOG_LOCK_SOCKBUF_S     49
+#define SCTP_LOG_LOCK_CREATE        50
+#define SCTP_LOG_INITIAL_RTT        51
+#define SCTP_LOG_RTTVAR             52
+#define SCTP_LOG_SBALLOC            53
+#define SCTP_LOG_SBFREE             54
+#define SCTP_LOG_SBRESULT           55
+#define SCTP_FR_DUPED               56
+#define SCTP_FR_MARKED_EARLY        57
+#define SCTP_FR_CWND_REPORT         58
+#define SCTP_FR_CWND_REPORT_START   59
+#define SCTP_FR_CWND_REPORT_STOP    60
+#define SCTP_CWND_LOG_FROM_SEND     61
+#define SCTP_CWND_INITIALIZATION    62
+#define SCTP_CWND_LOG_FROM_T3       63
+#define SCTP_CWND_LOG_FROM_SACK     64
+#define SCTP_CWND_LOG_NO_CUMACK     65
+#define SCTP_CWND_LOG_FROM_RESEND   66
+#define SCTP_FR_LOG_CHECK_STRIKE    67
+#define SCTP_SEND_NOW_COMPLETES     68
+#define SCTP_CWND_LOG_FILL_OUTQ_CALLED 69
+#define SCTP_CWND_LOG_FILL_OUTQ_FILLS  70
+#define SCTP_LOG_FREE_SENT             71
+#define SCTP_NAGLE_APPLIED          72
+#define SCTP_NAGLE_SKIPPED          73
+#define SCTP_WAKESND_FROM_SACK      74
+#define SCTP_WAKESND_FROM_FWDTSN    75
+#define SCTP_NOWAKE_FROM_SACK       76
+#define SCTP_CWNDLOG_PRESEND        77
+#define SCTP_CWNDLOG_ENDSEND        78
+#define SCTP_AT_END_OF_SACK         79
+#define SCTP_REASON_FOR_SC          80
+#define SCTP_BLOCK_LOG_INTO_BLKA    81
+#define SCTP_ENTER_USER_RECV        82
+#define SCTP_USER_RECV_SACKS        83
+#define SCTP_SORECV_BLOCKSA         84
+#define SCTP_SORECV_BLOCKSB         85
+#define SCTP_SORECV_DONE            86
+#define SCTP_SACK_RWND_UPDATE       87
+#define SCTP_SORECV_ENTER           88
+#define SCTP_SORECV_ENTERPL         89
+#define SCTP_MBUF_INPUT             90
+#define SCTP_MBUF_IALLOC            91
+#define SCTP_MBUF_IFREE             92
+#define SCTP_MBUF_ICOPY             93
+#define SCTP_MBUF_SPLIT             94
+#define SCTP_SORCV_FREECTL          95
+#define SCTP_SORCV_DOESCPY          96
+#define SCTP_SORCV_DOESLCK          97
+#define SCTP_SORCV_DOESADJ          98
+#define SCTP_SORCV_BOTWHILE         99
+#define SCTP_SORCV_PASSBF          100
+#define SCTP_SORCV_ADJD            101
+#define SCTP_UNKNOWN_MAX           102
+#define SCTP_RANDY_STUFF           103
+#define SCTP_RANDY_STUFF1          104
+#define SCTP_STRMOUT_LOG_ASSIGN	   105
+#define SCTP_STRMOUT_LOG_SEND	   106
+#define SCTP_FLIGHT_LOG_DOWN_CA    107
+#define SCTP_FLIGHT_LOG_UP         108
+#define SCTP_FLIGHT_LOG_DOWN_GAP   109
+#define SCTP_FLIGHT_LOG_DOWN_RSND  110
+#define SCTP_FLIGHT_LOG_UP_RSND    111
+#define SCTP_FLIGHT_LOG_DOWN_RSND_TO    112
+#define SCTP_FLIGHT_LOG_DOWN_WP    113
+#define SCTP_FLIGHT_LOG_UP_REVOKE  114
+#define SCTP_FLIGHT_LOG_DOWN_PDRP  115
+#define SCTP_FLIGHT_LOG_DOWN_PMTU  116
+#define SCTP_SACK_LOG_NORMAL	   117
+#define SCTP_SACK_LOG_EXPRESS	   118
+#define SCTP_MAP_TSN_ENTERS        119
+#define SCTP_THRESHOLD_CLEAR       120
+#define SCTP_THRESHOLD_INCR        121
+#define SCTP_FLIGHT_LOG_DWN_WP_FWD 122
+#define SCTP_FWD_TSN_CHECK         123
+#define SCTP_LOG_MAX_TYPES 124
+/*
+ * To turn on various logging, you must first enable 'options KTR' and
+ * you might want to bump the entires 'options KTR_ENTRIES=80000'.
+ * To get something to log you define one of the logging defines.
+ * (see LINT).
+ *
+ * This gets the compile in place, but you still need to turn the
+ * logging flag on too in the sysctl (see in sctp.h).
+ */
+
+#define SCTP_LOG_EVENT_UNKNOWN 0
+#define SCTP_LOG_EVENT_CWND  1
+#define SCTP_LOG_EVENT_BLOCK 2
+#define SCTP_LOG_EVENT_STRM  3
+#define SCTP_LOG_EVENT_FR    4
+#define SCTP_LOG_EVENT_MAP   5
+#define SCTP_LOG_EVENT_MAXBURST 6
+#define SCTP_LOG_EVENT_RWND  7
+#define SCTP_LOG_EVENT_MBCNT 8
+#define SCTP_LOG_EVENT_SACK  9
+#define SCTP_LOG_LOCK_EVENT 10
+#define SCTP_LOG_EVENT_RTT  11
+#define SCTP_LOG_EVENT_SB   12
+#define SCTP_LOG_EVENT_NAGLE 13
+#define SCTP_LOG_EVENT_WAKE 14
+#define SCTP_LOG_MISC_EVENT 15
+#define SCTP_LOG_EVENT_CLOSE 16
+#define SCTP_LOG_EVENT_MBUF 17
+#define SCTP_LOG_CHUNK_PROC 18
+#define SCTP_LOG_ERROR_RET  19
+
+#define SCTP_LOG_MAX_EVENT 20
+
+#define SCTP_LOCK_UNKNOWN 2
+
+
+/* number of associations by default for zone allocation */
+#define SCTP_MAX_NUM_OF_ASOC	40000
+/* how many addresses per assoc remote and local */
+#define SCTP_SCALE_FOR_ADDR	2
+
+/* default MULTIPLE_ASCONF mode enable(1)/disable(0) value (sysctl) */
+#define SCTP_DEFAULT_MULTIPLE_ASCONFS	0
+
+/*
+ * Threshold for rwnd updates, we have to read (sb_hiwat >>
+ * SCTP_RWND_HIWAT_SHIFT) before we will look to see if we need to send a
+ * window update sack. When we look, we compare the last rwnd we sent vs the
+ * current rwnd. It too must be greater than this value. Using 3 divdes the
+ * hiwat by 8, so for 200k rwnd we need to read 24k. For a 64k rwnd we need
+ * to read 8k. This seems about right.. I hope :-D.. we do set a
+ * min of a MTU on it so if the rwnd is real small we will insist
+ * on a full MTU of 1500 bytes.
+ */
+#define SCTP_RWND_HIWAT_SHIFT 3
+
+/* How much of the rwnd must the
+ * message be taking up to start partial delivery.
+ * We calculate this by shifing the hi_water (recv_win)
+ * left the following .. set to 1, when a message holds
+ * 1/2 the rwnd. If we set it to 2 when a message holds
+ * 1/4 the rwnd...etc..
+ */
+
+#define SCTP_PARTIAL_DELIVERY_SHIFT 1
+
+/*
+ * default HMAC for cookies, etc... use one of the AUTH HMAC id's
+ * SCTP_HMAC is the HMAC_ID to use
+ * SCTP_SIGNATURE_SIZE is the digest length
+ */
+#define SCTP_HMAC		SCTP_AUTH_HMAC_ID_SHA1
+#define SCTP_SIGNATURE_SIZE	SCTP_AUTH_DIGEST_LEN_SHA1
+#define SCTP_SIGNATURE_ALOC_SIZE SCTP_SIGNATURE_SIZE
+
+/*
+ * the SCTP protocol signature this includes the version number encoded in
+ * the last 4 bits of the signature.
+ */
+#define PROTO_SIGNATURE_A	0x30000000
+#define SCTP_VERSION_NUMBER	0x3
+
+#define MAX_TSN	0xffffffff
+
+/* how many executions every N tick's */
+#define SCTP_ITERATOR_MAX_AT_ONCE 20
+
+/* number of clock ticks between iterator executions */
+#define SCTP_ITERATOR_TICKS 1
+
+/*
+ * option: If you comment out the following you will receive the old behavior
+ * of obeying cwnd for the fast retransmit algorithm. With this defined a FR
+ * happens right away with-out waiting for the flightsize to drop below the
+ * cwnd value (which is reduced by the FR to 1/2 the inflight packets).
+ */
+#define SCTP_IGNORE_CWND_ON_FR 1
+
+/*
+ * Adds implementors guide behavior to only use newest highest update in SACK
+ * gap ack's to figure out if you need to stroke a chunk for FR.
+ */
+#define SCTP_NO_FR_UNLESS_SEGMENT_SMALLER 1
+
+/* default max I can burst out after a fast retransmit, 0 disables it */
+#define SCTP_DEF_MAX_BURST 4
+#define SCTP_DEF_HBMAX_BURST 4
+#define SCTP_DEF_FRMAX_BURST 4
+
+/* RTO calculation flag to say if it
+ * is safe to determine local lan or not.
+ */
+#define SCTP_RTT_FROM_NON_DATA 0
+#define SCTP_RTT_FROM_DATA     1
+
+#define PR_SCTP_UNORDERED_FLAG 0x0001
+
+/* IP hdr (20/40) + 12+2+2 (enet) + sctp common 12 */
+#define SCTP_FIRST_MBUF_RESV 68
+/* Packet transmit states in the sent field */
+#define SCTP_DATAGRAM_UNSENT		0
+#define SCTP_DATAGRAM_SENT		1
+#define SCTP_DATAGRAM_RESEND1		2	/* not used (in code, but may
+						 * hit this value) */
+#define SCTP_DATAGRAM_RESEND2		3	/* not used (in code, but may
+						 * hit this value) */
+#define SCTP_DATAGRAM_RESEND		4
+#define SCTP_DATAGRAM_ACKED		10010
+#define SCTP_DATAGRAM_MARKED		20010
+#define SCTP_FORWARD_TSN_SKIP		30010
+#define SCTP_DATAGRAM_NR_ACKED		40010
+
+/* chunk output send from locations */
+#define SCTP_OUTPUT_FROM_USR_SEND       0
+#define SCTP_OUTPUT_FROM_T3       	1
+#define SCTP_OUTPUT_FROM_INPUT_ERROR    2
+#define SCTP_OUTPUT_FROM_CONTROL_PROC   3
+#define SCTP_OUTPUT_FROM_SACK_TMR       4
+#define SCTP_OUTPUT_FROM_SHUT_TMR       5
+#define SCTP_OUTPUT_FROM_HB_TMR         6
+#define SCTP_OUTPUT_FROM_SHUT_ACK_TMR   7
+#define SCTP_OUTPUT_FROM_ASCONF_TMR     8
+#define SCTP_OUTPUT_FROM_STRRST_TMR     9
+#define SCTP_OUTPUT_FROM_AUTOCLOSE_TMR  10
+#define SCTP_OUTPUT_FROM_EARLY_FR_TMR   11
+#define SCTP_OUTPUT_FROM_STRRST_REQ     12
+#define SCTP_OUTPUT_FROM_USR_RCVD       13
+#define SCTP_OUTPUT_FROM_COOKIE_ACK     14
+#define SCTP_OUTPUT_FROM_DRAIN          15
+#define SCTP_OUTPUT_FROM_CLOSING        16
+#define SCTP_OUTPUT_FROM_SOCKOPT        17
+
+/* SCTP chunk types are moved sctp.h for application (NAT, FW) use */
+
+/* align to 32-bit sizes */
+#define SCTP_SIZE32(x)	((((x) + 3) >> 2) << 2)
+
+#define IS_SCTP_CONTROL(a) (((a)->chunk_type != SCTP_DATA) && ((a)->chunk_type != SCTP_IDATA))
+#define IS_SCTP_DATA(a) (((a)->chunk_type == SCTP_DATA) || ((a)->chunk_type == SCTP_IDATA))
+
+
+/* SCTP parameter types */
+/*************0x0000 series*************/
+#define SCTP_HEARTBEAT_INFO		0x0001
+#if defined(__Userspace__)
+#define SCTP_CONN_ADDRESS               0x0004
+#endif
+#define SCTP_IPV4_ADDRESS		0x0005
+#define SCTP_IPV6_ADDRESS		0x0006
+#define SCTP_STATE_COOKIE		0x0007
+#define SCTP_UNRECOG_PARAM		0x0008
+#define SCTP_COOKIE_PRESERVE		0x0009
+#define SCTP_HOSTNAME_ADDRESS		0x000b
+#define SCTP_SUPPORTED_ADDRTYPE		0x000c
+
+/* draft-ietf-stewart-tsvwg-strreset-xxx */
+#define SCTP_STR_RESET_OUT_REQUEST	0x000d
+#define SCTP_STR_RESET_IN_REQUEST	0x000e
+#define SCTP_STR_RESET_TSN_REQUEST	0x000f
+#define SCTP_STR_RESET_RESPONSE		0x0010
+#define SCTP_STR_RESET_ADD_OUT_STREAMS	0x0011
+#define SCTP_STR_RESET_ADD_IN_STREAMS   0x0012
+
+#define SCTP_MAX_RESET_PARAMS 2
+#define SCTP_STREAM_RESET_TSN_DELTA    0x1000
+
+/*************0x4000 series*************/
+
+/*************0x8000 series*************/
+#define SCTP_ECN_CAPABLE		0x8000
+
+/* draft-ietf-tsvwg-auth-xxx */
+#define SCTP_RANDOM			0x8002
+#define SCTP_CHUNK_LIST			0x8003
+#define SCTP_HMAC_LIST			0x8004
+/*
+ * draft-ietf-tsvwg-addip-sctp-xx param=0x8008  len=0xNNNN Byte | Byte | Byte
+ * | Byte Byte | Byte ...
+ *
+ * Where each byte is a chunk type extension supported. For example, to support
+ * all chunks one would have (in hex):
+ *
+ * 80 01 00 09 C0 C1 80 81 82 00 00 00
+ *
+ * Has the parameter. C0 = PR-SCTP    (RFC3758) C1, 80 = ASCONF (addip draft) 81
+ * = Packet Drop 82 = Stream Reset 83 = Authentication
+ */
+#define SCTP_SUPPORTED_CHUNK_EXT    0x8008
+
+/*************0xC000 series*************/
+#define SCTP_PRSCTP_SUPPORTED		0xc000
+/* draft-ietf-tsvwg-addip-sctp */
+#define SCTP_ADD_IP_ADDRESS		0xc001
+#define SCTP_DEL_IP_ADDRESS		0xc002
+#define SCTP_ERROR_CAUSE_IND		0xc003
+#define SCTP_SET_PRIM_ADDR		0xc004
+#define SCTP_SUCCESS_REPORT		0xc005
+#define SCTP_ULP_ADAPTATION		0xc006
+/* behave-nat-draft */
+#define SCTP_HAS_NAT_SUPPORT            0xc007
+#define SCTP_NAT_VTAGS                  0xc008
+
+/* bits for TOS field */
+#define SCTP_ECT0_BIT		0x02
+#define SCTP_ECT1_BIT		0x01
+#define SCTP_CE_BITS		0x03
+
+/* below turns off above */
+#define SCTP_FLEXIBLE_ADDRESS	0x20
+#define SCTP_NO_HEARTBEAT	0x40
+
+/* mask to get sticky */
+#define SCTP_STICKY_OPTIONS_MASK	0x0c
+
+
+/*
+ * SCTP states for internal state machine
+ */
+#define SCTP_STATE_EMPTY		0x0000
+#define SCTP_STATE_INUSE		0x0001
+#define SCTP_STATE_COOKIE_WAIT		0x0002
+#define SCTP_STATE_COOKIE_ECHOED	0x0004
+#define SCTP_STATE_OPEN			0x0008
+#define SCTP_STATE_SHUTDOWN_SENT	0x0010
+#define SCTP_STATE_SHUTDOWN_RECEIVED	0x0020
+#define SCTP_STATE_SHUTDOWN_ACK_SENT	0x0040
+#define SCTP_STATE_SHUTDOWN_PENDING	0x0080
+#define SCTP_STATE_CLOSED_SOCKET	0x0100
+#define SCTP_STATE_ABOUT_TO_BE_FREED    0x0200
+#define SCTP_STATE_PARTIAL_MSG_LEFT     0x0400
+#define SCTP_STATE_WAS_ABORTED          0x0800
+#define SCTP_STATE_IN_ACCEPT_QUEUE      0x1000
+#define SCTP_STATE_MASK			0x007f
+
+#define SCTP_GET_STATE(asoc)	((asoc)->state & SCTP_STATE_MASK)
+#define SCTP_SET_STATE(asoc, newstate)  ((asoc)->state = ((asoc)->state & ~SCTP_STATE_MASK) |  newstate)
+#define SCTP_CLEAR_SUBSTATE(asoc, substate) ((asoc)->state &= ~substate)
+#define SCTP_ADD_SUBSTATE(asoc, substate) ((asoc)->state |= substate)
+
+/* SCTP reachability state for each address */
+#define SCTP_ADDR_REACHABLE		0x001
+#define SCTP_ADDR_NO_PMTUD              0x002
+#define SCTP_ADDR_NOHB			0x004
+#define SCTP_ADDR_BEING_DELETED		0x008
+#define SCTP_ADDR_NOT_IN_ASSOC		0x010
+#define SCTP_ADDR_OUT_OF_SCOPE		0x080
+#define SCTP_ADDR_UNCONFIRMED		0x200
+#define SCTP_ADDR_REQ_PRIMARY           0x400
+/* JRS 5/13/07 - Added potentially failed state for CMT PF */
+#define SCTP_ADDR_PF                    0x800
+
+/* bound address types (e.g. valid address types to allow) */
+#define SCTP_BOUND_V6		0x01
+#define SCTP_BOUND_V4		0x02
+
+/*
+ * what is the default number of mbufs in a chain I allow before switching to
+ * a cluster
+ */
+#define SCTP_DEFAULT_MBUFS_IN_CHAIN 5
+
+/* How long a cookie lives in milli-seconds */
+#define SCTP_DEFAULT_COOKIE_LIFE	60000
+
+/* Maximum the mapping array will  grow to (TSN mapping array) */
+#define SCTP_MAPPING_ARRAY	512
+
+/* size of the initial malloc on the mapping array */
+#define SCTP_INITIAL_MAPPING_ARRAY  16
+/* how much we grow the mapping array each call */
+#define SCTP_MAPPING_ARRAY_INCR     32
+
+/*
+ * Here we define the timer types used by the implementation as arguments in
+ * the set/get timer type calls.
+ */
+#define SCTP_TIMER_INIT 	0
+#define SCTP_TIMER_RECV 	1
+#define SCTP_TIMER_SEND 	2
+#define SCTP_TIMER_HEARTBEAT	3
+#define SCTP_TIMER_PMTU		4
+#define SCTP_TIMER_MAXSHUTDOWN	5
+#define SCTP_TIMER_SIGNATURE	6
+/*
+ * number of timer types in the base SCTP structure used in the set/get and
+ * has the base default.
+ */
+#define SCTP_NUM_TMRS	7
+
+/* timer types */
+#define SCTP_TIMER_TYPE_NONE		0
+#define SCTP_TIMER_TYPE_SEND		1
+#define SCTP_TIMER_TYPE_INIT		2
+#define SCTP_TIMER_TYPE_RECV		3
+#define SCTP_TIMER_TYPE_SHUTDOWN	4
+#define SCTP_TIMER_TYPE_HEARTBEAT	5
+#define SCTP_TIMER_TYPE_COOKIE		6
+#define SCTP_TIMER_TYPE_NEWCOOKIE	7
+#define SCTP_TIMER_TYPE_PATHMTURAISE	8
+#define SCTP_TIMER_TYPE_SHUTDOWNACK	9
+#define SCTP_TIMER_TYPE_ASCONF		10
+#define SCTP_TIMER_TYPE_SHUTDOWNGUARD	11
+#define SCTP_TIMER_TYPE_AUTOCLOSE	12
+#define SCTP_TIMER_TYPE_EVENTWAKE	13
+#define SCTP_TIMER_TYPE_STRRESET        14
+#define SCTP_TIMER_TYPE_INPKILL         15
+#define SCTP_TIMER_TYPE_ASOCKILL        16
+#define SCTP_TIMER_TYPE_ADDR_WQ         17
+#define SCTP_TIMER_TYPE_ZERO_COPY       18
+#define SCTP_TIMER_TYPE_ZCOPY_SENDQ     19
+#define SCTP_TIMER_TYPE_PRIM_DELETED    20
+/* add new timers here - and increment LAST */
+#define SCTP_TIMER_TYPE_LAST            21
+
+#define SCTP_IS_TIMER_TYPE_VALID(t)	(((t) > SCTP_TIMER_TYPE_NONE) && \
+					 ((t) < SCTP_TIMER_TYPE_LAST))
+
+
+#if defined(__APPLE__)
+/* Number of ticks to run the main timer at in msec */
+#define SCTP_MAIN_TIMER_DEFAULT		10
+#endif
+
+/* max number of TSN's dup'd that I will hold */
+#define SCTP_MAX_DUP_TSNS	20
+
+/*
+ * Here we define the types used when setting the retry amounts.
+ */
+/* How many drop re-attempts we make on  INIT/COOKIE-ECHO */
+#define SCTP_RETRY_DROPPED_THRESH 4
+
+/*
+ * Maxmium number of chunks a single association can have on it. Note that
+ * this is a squishy number since the count can run over this if the user
+ * sends a large message down .. the fragmented chunks don't count until
+ * AFTER the message is on queue.. it would be the next send that blocks
+ * things. This number will get tuned up at boot in the sctp_init and use the
+ * number of clusters as a base. This way high bandwidth environments will
+ * not get impacted by the lower bandwidth sending a bunch of 1 byte chunks
+ */
+#ifdef __Panda__
+#define SCTP_ASOC_MAX_CHUNKS_ON_QUEUE 10240
+#else
+#define SCTP_ASOC_MAX_CHUNKS_ON_QUEUE 512
+#endif
+
+
+/* The conversion from time to ticks and vice versa is done by rounding
+ * upwards. This way we can test in the code the time to be positive and
+ * know that this corresponds to a positive number of ticks.
+ */
+#define MSEC_TO_TICKS(x) ((hz == 1000) ? x : ((((x) * hz) + 999) / 1000))
+#define TICKS_TO_MSEC(x) ((hz == 1000) ? x : ((((x) * 1000) + (hz - 1)) / hz))
+
+#define SEC_TO_TICKS(x) ((x) * hz)
+#define TICKS_TO_SEC(x) (((x) + (hz - 1)) / hz)
+
+/*
+ * Basically the minimum amount of time before I do a early FR. Making this
+ * value to low will cause duplicate retransmissions.
+ */
+#define SCTP_MINFR_MSEC_TIMER 250
+/* The floor this value is allowed to fall to when starting a timer. */
+#define SCTP_MINFR_MSEC_FLOOR 20
+
+/* init timer def = 1 sec */
+#define SCTP_INIT_SEC	1
+
+/* send timer def = 1 seconds */
+#define SCTP_SEND_SEC	1
+
+/* recv timer def = 200ms  */
+#define SCTP_RECV_MSEC	200
+
+/* 30 seconds + RTO (in ms) */
+#define SCTP_HB_DEFAULT_MSEC	30000
+
+/*
+ * This is how long a secret lives, NOT how long a cookie lives how many
+ * ticks the current secret will live.
+ */
+#define SCTP_DEFAULT_SECRET_LIFE_SEC 3600
+
+#define SCTP_RTO_UPPER_BOUND	(60000)	/* 60 sec in ms */
+#define SCTP_RTO_LOWER_BOUND	(1000)	/* 1 sec is ms */
+#define SCTP_RTO_INITIAL	(3000)	/* 3 sec in ms */
+
+
+#define SCTP_INP_KILL_TIMEOUT 20	/* number of ms to retry kill of inpcb */
+#define SCTP_ASOC_KILL_TIMEOUT 10	/* number of ms to retry kill of inpcb */
+
+#define SCTP_DEF_MAX_INIT		8
+#define SCTP_DEF_MAX_SEND		10
+#define SCTP_DEF_MAX_PATH_RTX		5
+#define SCTP_DEF_PATH_PF_THRESHOLD	SCTP_DEF_MAX_PATH_RTX
+
+#define SCTP_DEF_PMTU_RAISE_SEC	600	/* 10 min between raise attempts */
+
+
+/* How many streams I request initially by default */
+#define SCTP_OSTREAM_INITIAL 10
+#define SCTP_ISTREAM_INITIAL 2048
+
+/*
+ * How many smallest_mtu's need to increase before a window update sack is
+ * sent (should be a power of 2).
+ */
+/* Send window update (incr * this > hiwat). Should be a power of 2 */
+#define SCTP_MINIMAL_RWND		(4096)	/* minimal rwnd */
+
+#define SCTP_ADDRMAX		16
+
+/* SCTP DEBUG Switch parameters */
+#define SCTP_DEBUG_TIMER1	0x00000001
+#define SCTP_DEBUG_TIMER2	0x00000002	/* unused */
+#define SCTP_DEBUG_TIMER3	0x00000004	/* unused */
+#define SCTP_DEBUG_TIMER4	0x00000008
+#define SCTP_DEBUG_OUTPUT1	0x00000010
+#define SCTP_DEBUG_OUTPUT2	0x00000020
+#define SCTP_DEBUG_OUTPUT3	0x00000040
+#define SCTP_DEBUG_OUTPUT4	0x00000080
+#define SCTP_DEBUG_UTIL1	0x00000100
+#define SCTP_DEBUG_UTIL2	0x00000200	/* unused */
+#define SCTP_DEBUG_AUTH1	0x00000400
+#define SCTP_DEBUG_AUTH2	0x00000800	/* unused */
+#define SCTP_DEBUG_INPUT1	0x00001000
+#define SCTP_DEBUG_INPUT2	0x00002000
+#define SCTP_DEBUG_INPUT3	0x00004000
+#define SCTP_DEBUG_INPUT4	0x00008000	/* unused */
+#define SCTP_DEBUG_ASCONF1	0x00010000
+#define SCTP_DEBUG_ASCONF2	0x00020000
+#define SCTP_DEBUG_OUTPUT5	0x00040000	/* unused */
+#define SCTP_DEBUG_XXX		0x00080000	/* unused */
+#define SCTP_DEBUG_PCB1		0x00100000
+#define SCTP_DEBUG_PCB2		0x00200000	/* unused */
+#define SCTP_DEBUG_PCB3		0x00400000
+#define SCTP_DEBUG_PCB4		0x00800000
+#define SCTP_DEBUG_INDATA1	0x01000000
+#define SCTP_DEBUG_INDATA2	0x02000000	/* unused */
+#define SCTP_DEBUG_INDATA3	0x04000000	/* unused */
+#define SCTP_DEBUG_CRCOFFLOAD	0x08000000	/* unused */
+#define SCTP_DEBUG_USRREQ1	0x10000000	/* unused */
+#define SCTP_DEBUG_USRREQ2	0x20000000	/* unused */
+#define SCTP_DEBUG_PEEL1	0x40000000
+#if defined(__Userspace__)
+#define SCTP_DEBUG_USR 		0x80000000
+#else
+#define SCTP_DEBUG_XXXXX	0x80000000	/* unused */
+#endif
+#define SCTP_DEBUG_ALL		0x7ff3ffff
+#define SCTP_DEBUG_NOISY	0x00040000
+
+/* What sender needs to see to avoid SWS or we consider peers rwnd 0 */
+#define SCTP_SWS_SENDER_DEF	1420
+
+/*
+ * SWS is scaled to the sb_hiwat of the socket. A value of 2 is hiwat/4, 1
+ * would be hiwat/2 etc.
+ */
+/* What receiver needs to see in sockbuf or we tell peer its 1 */
+#define SCTP_SWS_RECEIVER_DEF	3000
+
+#define SCTP_INITIAL_CWND 4380
+
+#define SCTP_DEFAULT_MTU 1500 /* emergency default MTU */
+/* amount peer is obligated to have in rwnd or I will abort */
+#define SCTP_MIN_RWND	1500
+
+#define SCTP_DEFAULT_MAXSEGMENT 65535
+
+#define SCTP_CHUNK_BUFFER_SIZE	512
+#define SCTP_PARAM_BUFFER_SIZE	512
+
+/* small chunk store for looking at chunk_list in auth */
+#define SCTP_SMALL_CHUNK_STORE 260
+
+#define SCTP_HOW_MANY_SECRETS	2	/* how many secrets I keep */
+
+#define SCTP_NUMBER_OF_SECRETS	8	/* or 8 * 4 = 32 octets */
+#define SCTP_SECRET_SIZE	32	/* number of octets in a 256 bits */
+
+
+/*
+ * SCTP upper layer notifications
+ */
+#define SCTP_NOTIFY_ASSOC_UP                     1
+#define SCTP_NOTIFY_ASSOC_DOWN                   2
+#define SCTP_NOTIFY_INTERFACE_DOWN               3
+#define SCTP_NOTIFY_INTERFACE_UP                 4
+#define SCTP_NOTIFY_SENT_DG_FAIL                 5
+#define SCTP_NOTIFY_UNSENT_DG_FAIL               6
+#define SCTP_NOTIFY_SPECIAL_SP_FAIL              7
+#define SCTP_NOTIFY_ASSOC_LOC_ABORTED            8
+#define SCTP_NOTIFY_ASSOC_REM_ABORTED            9
+#define SCTP_NOTIFY_ASSOC_RESTART               10
+#define SCTP_NOTIFY_PEER_SHUTDOWN               11
+#define SCTP_NOTIFY_ASCONF_ADD_IP               12
+#define SCTP_NOTIFY_ASCONF_DELETE_IP            13
+#define SCTP_NOTIFY_ASCONF_SET_PRIMARY          14
+#define SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION 15
+#define SCTP_NOTIFY_INTERFACE_CONFIRMED         16
+#define SCTP_NOTIFY_STR_RESET_RECV              17
+#define SCTP_NOTIFY_STR_RESET_SEND              18
+#define SCTP_NOTIFY_STR_RESET_FAILED_OUT        19
+#define SCTP_NOTIFY_STR_RESET_FAILED_IN         20
+#define SCTP_NOTIFY_STR_RESET_DENIED_OUT        21
+#define SCTP_NOTIFY_STR_RESET_DENIED_IN         22
+#define SCTP_NOTIFY_AUTH_NEW_KEY                23
+#define SCTP_NOTIFY_AUTH_FREE_KEY               24
+#define SCTP_NOTIFY_NO_PEER_AUTH                25
+#define SCTP_NOTIFY_SENDER_DRY                  26
+#define SCTP_NOTIFY_REMOTE_ERROR                27
+
+/* This is the value for messages that are NOT completely
+ * copied down where we will start to split the message.
+ * So, with our default, we split only if the piece we
+ * want to take will fill up a full MTU (assuming
+ * a 1500 byte MTU).
+ */
+#define SCTP_DEFAULT_SPLIT_POINT_MIN 2904
+
+/* Maximum length of diagnostic information in error causes */
+#define SCTP_DIAG_INFO_LEN 64
+
+/* ABORT CODES and other tell-tale location
+ * codes are generated by adding the below
+ * to the instance id.
+ */
+
+/* File defines */
+#define SCTP_FROM_SCTP_INPUT        0x10000000
+#define SCTP_FROM_SCTP_PCB          0x20000000
+#define SCTP_FROM_SCTP_INDATA       0x30000000
+#define SCTP_FROM_SCTP_TIMER        0x40000000
+#define SCTP_FROM_SCTP_USRREQ       0x50000000
+#define SCTP_FROM_SCTPUTIL          0x60000000
+#define SCTP_FROM_SCTP6_USRREQ      0x70000000
+#define SCTP_FROM_SCTP_ASCONF       0x80000000
+#define SCTP_FROM_SCTP_OUTPUT       0x90000000
+#define SCTP_FROM_SCTP_PEELOFF      0xa0000000
+#define SCTP_FROM_SCTP_PANDA        0xb0000000
+#define SCTP_FROM_SCTP_SYSCTL       0xc0000000
+#define SCTP_FROM_SCTP_CC_FUNCTIONS 0xd0000000
+
+/* Location ID's */
+#define SCTP_LOC_1  0x00000001
+#define SCTP_LOC_2  0x00000002
+#define SCTP_LOC_3  0x00000003
+#define SCTP_LOC_4  0x00000004
+#define SCTP_LOC_5  0x00000005
+#define SCTP_LOC_6  0x00000006
+#define SCTP_LOC_7  0x00000007
+#define SCTP_LOC_8  0x00000008
+#define SCTP_LOC_9  0x00000009
+#define SCTP_LOC_10 0x0000000a
+#define SCTP_LOC_11 0x0000000b
+#define SCTP_LOC_12 0x0000000c
+#define SCTP_LOC_13 0x0000000d
+#define SCTP_LOC_14 0x0000000e
+#define SCTP_LOC_15 0x0000000f
+#define SCTP_LOC_16 0x00000010
+#define SCTP_LOC_17 0x00000011
+#define SCTP_LOC_18 0x00000012
+#define SCTP_LOC_19 0x00000013
+#define SCTP_LOC_20 0x00000014
+#define SCTP_LOC_21 0x00000015
+#define SCTP_LOC_22 0x00000016
+#define SCTP_LOC_23 0x00000017
+#define SCTP_LOC_24 0x00000018
+#define SCTP_LOC_25 0x00000019
+#define SCTP_LOC_26 0x0000001a
+#define SCTP_LOC_27 0x0000001b
+#define SCTP_LOC_28 0x0000001c
+#define SCTP_LOC_29 0x0000001d
+#define SCTP_LOC_30 0x0000001e
+#define SCTP_LOC_31 0x0000001f
+#define SCTP_LOC_32 0x00000020
+#define SCTP_LOC_33 0x00000021
+#define SCTP_LOC_34 0x00000022
+#define SCTP_LOC_35 0x00000023
+
+
+/* Free assoc codes */
+#define SCTP_NORMAL_PROC      0
+#define SCTP_PCBFREE_NOFORCE  1
+#define SCTP_PCBFREE_FORCE    2
+
+/* From codes for adding addresses */
+#define SCTP_ADDR_IS_CONFIRMED 8
+#define SCTP_ADDR_DYNAMIC_ADDED 6
+#define SCTP_IN_COOKIE_PROC 100
+#define SCTP_ALLOC_ASOC  1
+#define SCTP_LOAD_ADDR_2 2
+#define SCTP_LOAD_ADDR_3 3
+#define SCTP_LOAD_ADDR_4 4
+#define SCTP_LOAD_ADDR_5 5
+
+#define SCTP_DONOT_SETSCOPE 0
+#define SCTP_DO_SETSCOPE 1
+
+
+/* This value determines the default for when
+ * we try to add more on the send queue., if
+ * there is room. This prevents us from cycling
+ * into the copy_resume routine to often if
+ * we have not got enough space to add a decent
+ * enough size message. Note that if we have enough
+ * space to complete the message copy we will always
+ * add to the message, no matter what the size. Its
+ * only when we reach the point that we have some left
+ * to add, there is only room for part of it that we
+ * will use this threshold. Its also a sysctl.
+ */
+#define SCTP_DEFAULT_ADD_MORE 1452
+
+#ifndef SCTP_PCBHASHSIZE
+/* default number of association hash buckets in each endpoint */
+#define SCTP_PCBHASHSIZE 256
+#endif
+#ifndef SCTP_TCBHASHSIZE
+#define SCTP_TCBHASHSIZE 1024
+#endif
+
+#ifndef SCTP_CHUNKQUEUE_SCALE
+#define SCTP_CHUNKQUEUE_SCALE 10
+#endif
+
+#ifdef __FreeBSD__
+/* clock variance is 1 ms */
+#define SCTP_CLOCK_GRANULARITY	1
+#else
+/* clock variance is 10 ms */
+#define SCTP_CLOCK_GRANULARITY	10
+#endif
+#define IP_HDR_SIZE 40		/* we use the size of a IP6 header here this
+				 * detracts a small amount for ipv4 but it
+				 * simplifies the ipv6 addition */
+
+/* Argument magic number for sctp_inpcb_free() */
+
+/* third argument */
+#define SCTP_CALLED_DIRECTLY_NOCMPSET     0
+#define SCTP_CALLED_AFTER_CMPSET_OFCLOSE  1
+#define SCTP_CALLED_FROM_INPKILL_TIMER    2
+/* second argument */
+#define SCTP_FREE_SHOULD_USE_ABORT          1
+#define SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE 0
+
+#ifndef IPPROTO_SCTP
+#define IPPROTO_SCTP 132	/* the Official IANA number :-) */
+#endif				/* !IPPROTO_SCTP */
+
+#define SCTP_MAX_DATA_BUNDLING		256
+
+/* modular comparison */
+/* See RFC 1982 for details. */
+#define SCTP_UINT16_GT(a, b) (((a < b) && ((uint16_t)(b - a) > (1U<<15))) || \
+                              ((a > b) && ((uint16_t)(a - b) < (1U<<15))))
+#define SCTP_UINT16_GE(a, b) (SCTP_UINT16_GT(a, b) || (a == b))
+#define SCTP_UINT32_GT(a, b) (((a < b) && ((uint32_t)(b - a) > (1U<<31))) || \
+                              ((a > b) && ((uint32_t)(a - b) < (1U<<31))))
+#define SCTP_UINT32_GE(a, b) (SCTP_UINT32_GT(a, b) || (a == b))
+
+#define SCTP_SSN_GT(a, b) SCTP_UINT16_GT(a, b)
+#define SCTP_SSN_GE(a, b) SCTP_UINT16_GE(a, b)
+#define SCTP_TSN_GT(a, b) SCTP_UINT32_GT(a, b)
+#define SCTP_TSN_GE(a, b) SCTP_UINT32_GE(a, b)
+#define SCTP_MID_GT(i, a, b) (((i) == 1) ? SCTP_UINT32_GT(a, b) : SCTP_UINT16_GT((uint16_t)a, (uint16_t)b))
+#define SCTP_MID_GE(i, a, b) (((i) == 1) ? SCTP_UINT32_GE(a, b) : SCTP_UINT16_GE((uint16_t)a, (uint16_t)b))
+#define SCTP_MID_EQ(i, a, b) (((i) == 1) ? a == b : (uint16_t)a == (uint16_t)b)
+
+/* Mapping array manipulation routines */
+#define SCTP_IS_TSN_PRESENT(arry, gap) ((arry[(gap >> 3)] >> (gap & 0x07)) & 0x01)
+#define SCTP_SET_TSN_PRESENT(arry, gap) (arry[(gap >> 3)] |= (0x01 << ((gap & 0x07))))
+#define SCTP_UNSET_TSN_PRESENT(arry, gap) (arry[(gap >> 3)] &= ((~(0x01 << ((gap & 0x07)))) & 0xff))
+#define SCTP_CALC_TSN_TO_GAP(gap, tsn, mapping_tsn) do { \
+	                if (tsn >= mapping_tsn) { \
+						gap = tsn - mapping_tsn; \
+					} else { \
+						gap = (MAX_TSN - mapping_tsn) + tsn + 1; \
+					} \
+                  } while (0)
+
+
+#define SCTP_RETRAN_DONE -1
+#define SCTP_RETRAN_EXIT -2
+
+/*
+ * This value defines the number of vtag block time wait entry's per list
+ * element.  Each entry will take 2 4 byte ints (and of course the overhead
+ * of the next pointer as well). Using 15 as an example will yield * ((8 *
+ * 15) + 8) or 128 bytes of overhead for each timewait block that gets
+ * initialized. Increasing it to 31 would yield 256 bytes per block.
+ */
+#define SCTP_NUMBER_IN_VTAG_BLOCK 15
+/*
+ * If we use the STACK option, we have an array of this size head pointers.
+ * This array is mod'd the with the size to find which bucket and then all
+ * entries must be searched to see if the tag is in timed wait. If so we
+ * reject it.
+ */
+#define SCTP_STACK_VTAG_HASH_SIZE   32
+
+/*
+ * Number of seconds of time wait for a vtag.
+ */
+#define SCTP_TIME_WAIT 60
+
+/* How many micro seconds is the cutoff from
+ * local lan type rtt's
+ */
+ /*
+  * We allow 900us for the rtt.
+  */
+#define SCTP_LOCAL_LAN_RTT 900
+#define SCTP_LAN_UNKNOWN  0
+#define SCTP_LAN_LOCAL    1
+#define SCTP_LAN_INTERNET 2
+
+#define SCTP_SEND_BUFFER_SPLITTING 0x00000001
+#define SCTP_RECV_BUFFER_SPLITTING 0x00000002
+
+/* The system retains a cache of free chunks such to
+ * cut down on calls the memory allocation system. There
+ * is a per association limit of free items and a overall
+ * system limit. If either one gets hit then the resource
+ * stops being cached.
+ */
+
+#define SCTP_DEF_ASOC_RESC_LIMIT 10
+#define SCTP_DEF_SYSTEM_RESC_LIMIT 1000
+
+/*-
+ * defines for socket lock states.
+ * Used by __APPLE__ and SCTP_SO_LOCK_TESTING
+ */
+#define SCTP_SO_LOCKED		1
+#define SCTP_SO_NOT_LOCKED	0
+
+
+#define SCTP_HOLDS_LOCK 1
+#define SCTP_NOT_LOCKED 0
+
+/*-
+ * For address locks, do we hold the lock?
+ */
+#define SCTP_ADDR_LOCKED 1
+#define SCTP_ADDR_NOT_LOCKED 0
+
+#define IN4_ISPRIVATE_ADDRESS(a) \
+   ((((uint8_t *)&(a)->s_addr)[0] == 10) || \
+    ((((uint8_t *)&(a)->s_addr)[0] == 172) && \
+     (((uint8_t *)&(a)->s_addr)[1] >= 16) && \
+     (((uint8_t *)&(a)->s_addr)[1] <= 32)) || \
+    ((((uint8_t *)&(a)->s_addr)[0] == 192) && \
+     (((uint8_t *)&(a)->s_addr)[1] == 168)))
+
+#define IN4_ISLOOPBACK_ADDRESS(a) \
+    (((uint8_t *)&(a)->s_addr)[0] == 127)
+
+#define IN4_ISLINKLOCAL_ADDRESS(a) \
+    ((((uint8_t *)&(a)->s_addr)[0] == 169) && \
+     (((uint8_t *)&(a)->s_addr)[1] == 254))
+
+#if defined(__Userspace__)
+#if defined(__Userspace_os_Windows)
+#define SCTP_GETTIME_TIMEVAL(x)	getwintimeofday(x)
+#define SCTP_GETPTIME_TIMEVAL(x) getwintimeofday(x) /* this doesn't seem to ever be used.. */
+#else
+#define SCTP_GETTIME_TIMEVAL(x)	gettimeofday(x, NULL)
+#define SCTP_GETPTIME_TIMEVAL(x) gettimeofday(x, NULL)
+#endif
+#endif
+
+#if defined(_KERNEL)
+#define SCTP_GETTIME_TIMEVAL(x) (getmicrouptime(x))
+#define SCTP_GETPTIME_TIMEVAL(x) (microuptime(x))
+#endif
+
+#if defined(_KERNEL) || defined(__Userspace__)
+#define sctp_sowwakeup(inp, so) \
+do { \
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+		inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEOUTPUT; \
+	} else { \
+		sowwakeup(so); \
+	} \
+} while (0)
+
+#if defined(__FreeBSD__) || defined(__Windows__) || defined(__Userspace__)
+#define sctp_sowwakeup_locked(inp, so) \
+do { \
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+                SOCKBUF_UNLOCK(&((so)->so_snd)); \
+		inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEOUTPUT; \
+	} else { \
+		sowwakeup_locked(so); \
+	} \
+} while (0)
+#else
+#define sctp_sowwakeup_locked(inp, so) \
+do { \
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+                SOCKBUF_UNLOCK(&((so)->so_snd)); \
+		inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEOUTPUT; \
+	} else { \
+		sowwakeup(so); \
+	} \
+} while (0)
+#endif
+
+#define sctp_sorwakeup(inp, so) \
+do { \
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+		inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEINPUT; \
+	} else { \
+		sorwakeup(so); \
+	} \
+} while (0)
+
+#if defined(__FreeBSD__) || defined(__Windows__) || defined(__Userspace__)
+#define sctp_sorwakeup_locked(inp, so) \
+do { \
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+		inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEINPUT; \
+                SOCKBUF_UNLOCK(&((so)->so_rcv)); \
+	} else { \
+		sorwakeup_locked(so); \
+	} \
+} while (0)
+#else
+
+#define sctp_sorwakeup_locked(inp, so) \
+do { \
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) { \
+		inp->sctp_flags |= SCTP_PCB_FLAGS_WAKEINPUT; \
+                SOCKBUF_UNLOCK(&((so)->so_rcv)); \
+	} else { \
+		sorwakeup(so); \
+	} \
+} while (0)
+#endif
+
+#endif				/* _KERNEL || __Userspace__*/
+#endif
diff --git a/usrsctplib/netinet/sctp_crc32.c b/usrsctplib/netinet/sctp_crc32.c
new file mode 100755
index 0000000..27536f1
--- /dev/null
+++ b/usrsctplib/netinet/sctp_crc32.c
@@ -0,0 +1,825 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_crc32.c 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_crc32.h>
+#include <netinet/sctp_pcb.h>
+
+
+#if !defined(SCTP_WITH_NO_CSUM)
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+#else
+/**
+ *
+ * Routine Description:
+ *
+ * Computes the CRC32c checksum for the specified buffer using the slicing by 8
+ * algorithm over 64 bit quantities.
+ *
+ * Arguments:
+ *
+ *		p_running_crc - pointer to the initial or final remainder value
+ *				used in CRC computations. It should be set to
+ *				non-NULL if the mode argument is equal to CONT or END
+ *		p_buf - the packet buffer where crc computations are being performed
+ *		length - the length of p_buf in bytes
+ *		init_bytes - the number of initial bytes that need to be procesed before
+ *					 aligning p_buf to multiples of 4 bytes
+ *		mode - can be any of the following: BEGIN, CONT, END, BODY, ALIGN
+ *
+ * Return value:
+ *
+ *		The computed CRC32c value
+ */
+
+
+/*
+ * Copyright (c) 2004-2006 Intel Corporation - All Rights Reserved
+ *
+ *
+ * This software program is licensed subject to the BSD License, available at
+ * http://www.opensource.org/licenses/bsd-license.html.
+ *
+ * Abstract:
+ *
+ * Tables for software CRC generation
+ */
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41
+ * Generator Polynomial Length = .......... 32 bits
+ * Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits
+ * Number of Slices = ..................... 8 slices
+ * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
+ * Directory Name = ....................... .\
+ * File Name = ............................ 8x256_tables.c
+ */
+
+static const uint32_t sctp_crc_tableil8_o32[256] =
+{
+	0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
+	0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
+	0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
+	0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
+	0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
+	0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
+	0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
+	0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
+	0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
+	0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
+	0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
+	0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
+	0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
+	0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
+	0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
+	0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
+	0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
+	0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
+	0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
+	0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
+	0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
+	0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
+	0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
+	0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
+	0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
+	0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
+	0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
+	0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
+	0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
+	0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
+	0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
+	0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o32
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41
+ * Generator Polynomial Length = .......... 32 bits
+ * Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits
+ * Number of Slices = ..................... 8 slices
+ * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
+ * Directory Name = ....................... .\
+ * File Name = ............................ 8x256_tables.c
+ */
+
+static const uint32_t sctp_crc_tableil8_o40[256] =
+{
+	0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945,
+	0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21, 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD,
+	0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918, 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4,
+	0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C,
+	0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B, 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47,
+	0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823, 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF,
+	0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6,
+	0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2, 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E,
+	0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D, 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41,
+	0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25, 0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9,
+	0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C, 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0,
+	0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4, 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78,
+	0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F, 0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43,
+	0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27, 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB,
+	0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E, 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2,
+	0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6, 0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A,
+	0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260, 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC,
+	0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8, 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004,
+	0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1, 0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D,
+	0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059, 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185,
+	0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162, 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE,
+	0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA, 0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306,
+	0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3, 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F,
+	0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B, 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287,
+	0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464, 0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8,
+	0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC, 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600,
+	0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5, 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439,
+	0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D, 0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781,
+	0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766, 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA,
+	0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE, 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502,
+	0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7, 0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B,
+	0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F, 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o40
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41
+ * Generator Polynomial Length = .......... 32 bits
+ * Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits
+ * Number of Slices = ..................... 8 slices
+ * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
+ * Directory Name = ....................... .\
+ * File Name = ............................ 8x256_tables.c
+ */
+
+static const uint32_t sctp_crc_tableil8_o48[256] =
+{
+	0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469,
+	0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6, 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC,
+	0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9, 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3,
+	0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726,
+	0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67, 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D,
+	0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2, 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8,
+	0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7,
+	0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828, 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32,
+	0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA, 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0,
+	0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F, 0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75,
+	0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20, 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A,
+	0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5, 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF,
+	0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE, 0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4,
+	0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B, 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161,
+	0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634, 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E,
+	0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1, 0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB,
+	0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730, 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A,
+	0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5, 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF,
+	0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA, 0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0,
+	0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F, 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065,
+	0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24, 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E,
+	0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1, 0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB,
+	0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE, 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4,
+	0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B, 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71,
+	0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9, 0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3,
+	0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C, 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36,
+	0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63, 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79,
+	0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6, 0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC,
+	0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD, 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7,
+	0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238, 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622,
+	0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177, 0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D,
+	0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2, 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o48
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41
+ * Generator Polynomial Length = .......... 32 bits
+ * Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits
+ * Number of Slices = ..................... 8 slices
+ * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
+ * Directory Name = ....................... .\
+ * File Name = ............................ 8x256_tables.c
+ */
+
+static const uint32_t sctp_crc_tableil8_o56[256] =
+{
+	0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA,
+	0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF, 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C,
+	0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804, 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7,
+	0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11,
+	0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2, 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41,
+	0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54, 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7,
+	0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C,
+	0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69, 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A,
+	0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE, 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D,
+	0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538, 0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB,
+	0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3, 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610,
+	0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405, 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6,
+	0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255, 0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6,
+	0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3, 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040,
+	0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368, 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B,
+	0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E, 0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D,
+	0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006, 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5,
+	0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0, 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213,
+	0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B, 0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8,
+	0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD, 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E,
+	0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D, 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E,
+	0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B, 0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698,
+	0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0, 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443,
+	0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656, 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5,
+	0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1, 0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12,
+	0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07, 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4,
+	0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC, 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F,
+	0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A, 0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9,
+	0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A, 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99,
+	0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C, 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F,
+	0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57, 0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4,
+	0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1, 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o56
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41
+ * Generator Polynomial Length = .......... 32 bits
+ * Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits
+ * Number of Slices = ..................... 8 slices
+ * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
+ * Directory Name = ....................... .\
+ * File Name = ............................ 8x256_tables.c
+ */
+
+static const uint32_t sctp_crc_tableil8_o64[256] =
+{
+	0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44,
+	0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65, 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5,
+	0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127, 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97,
+	0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406,
+	0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3, 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13,
+	0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32, 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082,
+	0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0,
+	0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1, 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151,
+	0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A, 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA,
+	0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB, 0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B,
+	0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89, 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539,
+	0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018, 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8,
+	0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D, 0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD,
+	0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C, 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C,
+	0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE, 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E,
+	0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F, 0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF,
+	0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8, 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18,
+	0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39, 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089,
+	0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B, 0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB,
+	0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA, 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A,
+	0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF, 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F,
+	0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E, 0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE,
+	0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C, 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C,
+	0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD, 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D,
+	0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06, 0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6,
+	0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497, 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27,
+	0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5, 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065,
+	0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544, 0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4,
+	0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51, 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1,
+	0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0, 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70,
+	0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82, 0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532,
+	0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013, 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o64
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41
+ * Generator Polynomial Length = .......... 32 bits
+ * Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits
+ * Number of Slices = ..................... 8 slices
+ * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
+ * Directory Name = ....................... .\
+ * File Name = ............................ 8x256_tables.c
+ */
+
+static const uint32_t sctp_crc_tableil8_o72[256] =
+{
+	0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD,
+	0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5, 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2,
+	0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4, 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93,
+	0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C,
+	0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57, 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20,
+	0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548, 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F,
+	0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E,
+	0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576, 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201,
+	0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031, 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746,
+	0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E, 0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59,
+	0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F, 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778,
+	0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810, 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67,
+	0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC, 0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB,
+	0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3, 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4,
+	0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682, 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5,
+	0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D, 0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA,
+	0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C, 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B,
+	0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413, 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364,
+	0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32, 0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45,
+	0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D, 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A,
+	0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81, 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6,
+	0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E, 0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9,
+	0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF, 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8,
+	0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0, 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7,
+	0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7, 0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090,
+	0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8, 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F,
+	0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9, 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE,
+	0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6, 0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1,
+	0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A, 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D,
+	0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975, 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02,
+	0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154, 0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623,
+	0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B, 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o72
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41
+ * Generator Polynomial Length = .......... 32 bits
+ * Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits
+ * Number of Slices = ..................... 8 slices
+ * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
+ * Directory Name = ....................... .\
+ * File Name = ............................ 8x256_tables.c
+ */
+
+static const uint32_t sctp_crc_tableil8_o80[256] =
+{
+	0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089,
+	0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B, 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA,
+	0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE, 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F,
+	0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C,
+	0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5, 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334,
+	0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6, 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67,
+	0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992,
+	0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110, 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1,
+	0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222, 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3,
+	0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71, 0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0,
+	0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884, 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55,
+	0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7, 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006,
+	0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F, 0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E,
+	0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC, 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D,
+	0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39, 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8,
+	0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A, 0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB,
+	0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC, 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D,
+	0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF, 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E,
+	0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A, 0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB,
+	0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59, 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988,
+	0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811, 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0,
+	0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542, 0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093,
+	0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7, 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766,
+	0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4, 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35,
+	0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6, 0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907,
+	0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185, 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454,
+	0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670, 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1,
+	0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23, 0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2,
+	0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B, 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA,
+	0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238, 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9,
+	0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD, 0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C,
+	0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E, 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o80
+ */
+
+
+
+/*
+ * The following CRC lookup table was generated automagically using the
+ * following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41
+ * Generator Polynomial Length = .......... 32 bits
+ * Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits
+ * Number of Slices = ..................... 8 slices
+ * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
+ * Directory Name = ....................... .\
+ * File Name = ............................ 8x256_tables.c
+ */
+
+static const uint32_t sctp_crc_tableil8_o88[256] =
+{
+	0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504,
+	0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3, 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE,
+	0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD, 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0,
+	0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A,
+	0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0, 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D,
+	0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A, 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447,
+	0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929,
+	0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E, 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3,
+	0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B, 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36,
+	0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881, 0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC,
+	0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF, 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782,
+	0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135, 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358,
+	0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2, 0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF,
+	0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18, 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75,
+	0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076, 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B,
+	0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC, 0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1,
+	0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D, 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360,
+	0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7, 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA,
+	0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9, 0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4,
+	0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63, 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E,
+	0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494, 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9,
+	0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E, 0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223,
+	0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20, 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D,
+	0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA, 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97,
+	0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F, 0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852,
+	0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5, 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88,
+	0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B, 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6,
+	0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751, 0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C,
+	0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6, 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB,
+	0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C, 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911,
+	0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612, 0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F,
+	0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8, 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5
+};
+
+/*
+ * end of the CRC lookup table crc_tableil8_o88
+ */
+
+
+static uint32_t
+sctp_crc32c_sb8_64_bit(uint32_t crc,
+                       const unsigned char *p_buf,
+                       uint32_t length,
+                       uint32_t init_bytes)
+{
+	uint32_t li;
+	uint32_t term1, term2;
+	uint32_t running_length;
+	uint32_t end_bytes;
+
+	running_length = ((length - init_bytes) / 8) * 8;
+	end_bytes = length - init_bytes - running_length;
+
+	for (li = 0; li < init_bytes; li++)
+		crc = sctp_crc_tableil8_o32[(crc ^ *p_buf++) & 0x000000FF] ^
+		    (crc >> 8);
+	for (li = 0; li < running_length / 8; li++) {
+#if BYTE_ORDER == BIG_ENDIAN
+		crc ^= *p_buf++;
+		crc ^= (*p_buf++) << 8;
+		crc ^= (*p_buf++) << 16;
+		crc ^= (*p_buf++) << 24;
+#else
+		crc ^= *(const uint32_t *) p_buf;
+		p_buf += 4;
+#endif
+		term1 = sctp_crc_tableil8_o88[crc & 0x000000FF] ^
+		    sctp_crc_tableil8_o80[(crc >> 8) & 0x000000FF];
+		term2 = crc >> 16;
+		crc = term1 ^
+		    sctp_crc_tableil8_o72[term2 & 0x000000FF] ^
+		    sctp_crc_tableil8_o64[(term2 >> 8) & 0x000000FF];
+
+#if BYTE_ORDER == BIG_ENDIAN
+		crc ^= sctp_crc_tableil8_o56[*p_buf++];
+		crc ^= sctp_crc_tableil8_o48[*p_buf++];
+		crc ^= sctp_crc_tableil8_o40[*p_buf++];
+		crc ^= sctp_crc_tableil8_o32[*p_buf++];
+#else
+		term1 = sctp_crc_tableil8_o56[(*(const uint32_t *) p_buf) & 0x000000FF] ^
+		    sctp_crc_tableil8_o48[((*(const uint32_t *) p_buf) >> 8) & 0x000000FF];
+
+		term2 = (*(const uint32_t *) p_buf) >> 16;
+		crc = crc ^
+		    term1 ^
+		    sctp_crc_tableil8_o40[term2 & 0x000000FF] ^
+		    sctp_crc_tableil8_o32[(term2 >> 8) & 0x000000FF];
+		p_buf += 4;
+#endif
+	}
+	for (li = 0; li < end_bytes; li++)
+		crc = sctp_crc_tableil8_o32[(crc ^ *p_buf++) & 0x000000FF] ^
+		    (crc >> 8);
+	return (crc);
+}
+
+
+/**
+ *
+ * Routine Description:
+ *
+ * warms the tables
+ *
+ * Arguments:
+ *
+ *		none
+ *
+ * Return value:
+ *
+ *		none
+ */
+static uint32_t
+multitable_crc32c(uint32_t crc32c,
+                  const unsigned char *buffer,
+                  unsigned int length)
+{
+	uint32_t to_even_word;
+
+	if (length == 0) {
+		return (crc32c);
+	}
+	to_even_word = (4 - (((uintptr_t) buffer) & 0x3));
+	return (sctp_crc32c_sb8_64_bit(crc32c, buffer, length, to_even_word));
+}
+
+static const uint32_t sctp_crc_c[256] = {
+	0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
+	0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
+	0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
+	0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
+	0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
+	0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
+	0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
+	0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
+	0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
+	0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
+	0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
+	0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
+	0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
+	0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
+	0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
+	0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
+	0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
+	0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
+	0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
+	0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
+	0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
+	0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
+	0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
+	0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
+	0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
+	0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
+	0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
+	0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
+	0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
+	0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
+	0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
+	0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
+	0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
+	0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
+	0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
+	0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
+	0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
+	0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
+	0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
+	0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
+	0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
+	0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
+	0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
+	0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
+	0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
+	0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
+	0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
+	0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
+	0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
+	0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
+	0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
+	0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
+	0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
+	0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
+	0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
+	0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
+	0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
+	0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
+	0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
+	0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
+	0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
+	0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
+	0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
+	0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351,
+};
+
+
+#define SCTP_CRC32C(c,d) (c=(c>>8)^sctp_crc_c[(c^(d))&0xFF])
+
+static uint32_t
+singletable_crc32c(uint32_t crc32c,
+                   const unsigned char *buffer,
+                   unsigned int length)
+{
+	unsigned int i;
+
+	for (i = 0; i < length; i++) {
+		SCTP_CRC32C(crc32c, buffer[i]);
+	}
+	return (crc32c);
+}
+
+#if defined(__Userspace__)
+uint32_t
+#else
+static uint32_t
+#endif
+calculate_crc32c(uint32_t crc32c,
+                 const unsigned char *buffer,
+                 unsigned int length)
+{
+	if (length < 4) {
+		return (singletable_crc32c(crc32c, buffer, length));
+	} else {
+		return (multitable_crc32c(crc32c, buffer, length));
+	}
+}
+#endif /* FreeBSD < 80000 || other OS */
+
+#if defined(__Userspace__)
+uint32_t
+#else
+static uint32_t
+#endif
+sctp_finalize_crc32c(uint32_t crc32c)
+{
+	uint32_t result;
+
+#if BYTE_ORDER == BIG_ENDIAN
+	uint8_t byte0, byte1, byte2, byte3;
+
+#endif
+	/* Complement the result */
+	result = ~crc32c;
+#if BYTE_ORDER == BIG_ENDIAN
+	/*
+	 * For BIG-ENDIAN.. aka Motorola byte order the result is in
+	 * little-endian form. So we must manually swap the bytes. Then we
+	 * can call htonl() which does nothing...
+	 */
+	byte0 = result & 0x000000ff;
+	byte1 = (result >> 8) & 0x000000ff;
+	byte2 = (result >> 16) & 0x000000ff;
+	byte3 = (result >> 24) & 0x000000ff;
+	crc32c = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3);
+#else
+	/*
+	 * For INTEL platforms the result comes out in network order. No
+	 * htonl is required or the swap above. So we optimize out both the
+	 * htonl and the manual swap above.
+	 */
+	crc32c = result;
+#endif
+	return (crc32c);
+}
+
+uint32_t
+sctp_calculate_cksum(struct mbuf *m, uint32_t offset)
+{
+	/*
+	 * given a mbuf chain with a packetheader offset by 'offset'
+	 * pointing at a sctphdr (with csum set to 0) go through the chain
+	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
+	 * has a side bonus as it will calculate the total length of the
+	 * mbuf chain. Note: if offset is greater than the total mbuf
+	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
+	 */
+	uint32_t base = 0xffffffff;
+	struct mbuf *at;
+
+	at = m;
+	/* find the correct mbuf and offset into mbuf */
+	while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) {
+		offset -= SCTP_BUF_LEN(at);	/* update remaining offset
+						 * left */
+		at = SCTP_BUF_NEXT(at);
+	}
+	while (at != NULL) {
+		if ((SCTP_BUF_LEN(at) - offset) > 0) {
+			base = calculate_crc32c(base,
+			    (unsigned char *)(SCTP_BUF_AT(at, offset)),
+			    (unsigned int)(SCTP_BUF_LEN(at) - offset));
+		}
+		if (offset) {
+			/* we only offset once into the first mbuf */
+			if (offset < (uint32_t) SCTP_BUF_LEN(at))
+				offset = 0;
+			else
+				offset -= SCTP_BUF_LEN(at);
+		}
+		at = SCTP_BUF_NEXT(at);
+	}
+	base = sctp_finalize_crc32c(base);
+	return (base);
+}
+#endif				/* !defined(SCTP_WITH_NO_CSUM) */
+
+
+#if defined(__FreeBSD__)
+void
+sctp_delayed_cksum(struct mbuf *m, uint32_t offset)
+{
+#if defined(SCTP_WITH_NO_CSUM)
+#ifdef INVARIANTS
+	panic("sctp_delayed_cksum() called when using no SCTP CRC.");
+#endif
+#else
+	uint32_t checksum;
+
+	checksum = sctp_calculate_cksum(m, offset);
+	SCTP_STAT_DECR(sctps_sendhwcrc);
+	SCTP_STAT_INCR(sctps_sendswcrc);
+	offset += offsetof(struct sctphdr, checksum);
+
+	if (offset + sizeof(uint32_t) > (uint32_t) (m->m_len)) {
+		SCTP_PRINTF("sctp_delayed_cksum(): m->len: %d,  off: %d.\n",
+		            (uint32_t) m->m_len, offset);
+		/*
+		 * XXX this shouldn't happen, but if it does, the correct
+		 * behavior may be to insert the checksum in the appropriate
+		 * next mbuf in the chain.
+		 */
+		return;
+	}
+	*(uint32_t *) (m->m_data + offset) = checksum;
+#endif
+}
+#endif
+
diff --git a/usrsctplib/netinet/sctp_crc32.h b/usrsctplib/netinet/sctp_crc32.h
new file mode 100755
index 0000000..0b1cbbb
--- /dev/null
+++ b/usrsctplib/netinet/sctp_crc32.h
@@ -0,0 +1,56 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_crc32.h 309607 2016-12-06 10:21:25Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_CRC32_H_
+#define _NETINET_SCTP_CRC32_H_
+
+#if defined(_KERNEL)
+#if !defined(SCTP_WITH_NO_CSUM)
+uint32_t sctp_calculate_cksum(struct mbuf *, uint32_t);
+#endif
+#if defined(__FreeBSD__)
+void sctp_delayed_cksum(struct mbuf *, uint32_t offset);
+#endif
+#endif				/* _KERNEL */
+#if defined(__Userspace__)
+#if !defined(SCTP_WITH_NO_CSUM)
+uint32_t calculate_crc32c(uint32_t, const unsigned char *, unsigned int);
+uint32_t sctp_finalize_crc32c(uint32_t);
+uint32_t sctp_calculate_cksum(struct mbuf *, uint32_t);
+#endif
+#endif
+#endif				/* __crc32c_h__ */
diff --git a/usrsctplib/netinet/sctp_header.h b/usrsctplib/netinet/sctp_header.h
new file mode 100755
index 0000000..1019ee2
--- /dev/null
+++ b/usrsctplib/netinet/sctp_header.h
@@ -0,0 +1,609 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_header.h 309682 2016-12-07 19:30:59Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_HEADER_H_
+#define _NETINET_SCTP_HEADER_H_
+
+#if defined(__Windows__) && !defined(__Userspace_os_Windows)
+#include <packon.h>
+#endif
+#if !defined(__Userspace_os_Windows)
+#include <sys/time.h>
+#endif
+#include <netinet/sctp.h>
+#include <netinet/sctp_constants.h>
+
+#if !defined(__Userspace_os_Windows)
+#define SCTP_PACKED __attribute__((packed))
+#else
+#pragma pack (push, 1)
+#define SCTP_PACKED
+#endif
+
+/*
+ * Parameter structures
+ */
+struct sctp_ipv4addr_param {
+	struct sctp_paramhdr ph;/* type=SCTP_IPV4_PARAM_TYPE, len=8 */
+	uint32_t addr;		/* IPV4 address */
+} SCTP_PACKED;
+
+#define SCTP_V6_ADDR_BYTES 16
+
+
+struct sctp_ipv6addr_param {
+	struct sctp_paramhdr ph;/* type=SCTP_IPV6_PARAM_TYPE, len=20 */
+	uint8_t addr[SCTP_V6_ADDR_BYTES];	/* IPV6 address */
+} SCTP_PACKED;
+
+/* Cookie Preservative */
+struct sctp_cookie_perserve_param {
+	struct sctp_paramhdr ph;/* type=SCTP_COOKIE_PRESERVE, len=8 */
+	uint32_t time;		/* time in ms to extend cookie */
+} SCTP_PACKED;
+
+#define SCTP_ARRAY_MIN_LEN 1
+/* Host Name Address */
+struct sctp_host_name_param {
+	struct sctp_paramhdr ph;/* type=SCTP_HOSTNAME_ADDRESS */
+	char name[SCTP_ARRAY_MIN_LEN];		/* host name */
+} SCTP_PACKED;
+
+/*
+ * This is the maximum padded size of a s-a-p
+ * so paramheadr + 3 address types (6 bytes) + 2 byte pad = 12
+ */
+#define SCTP_MAX_ADDR_PARAMS_SIZE 12
+/* supported address type */
+struct sctp_supported_addr_param {
+	struct sctp_paramhdr ph;/* type=SCTP_SUPPORTED_ADDRTYPE */
+	uint16_t addr_type[2];	/* array of supported address types */
+} SCTP_PACKED;
+
+/* heartbeat info parameter */
+struct sctp_heartbeat_info_param {
+	struct sctp_paramhdr ph;
+	uint32_t time_value_1;
+	uint32_t time_value_2;
+	uint32_t random_value1;
+	uint32_t random_value2;
+	uint8_t addr_family;
+	uint8_t addr_len;
+	/* make sure that this structure is 4 byte aligned */
+	uint8_t padding[2];
+	char address[SCTP_ADDRMAX];
+} SCTP_PACKED;
+
+
+/* draft-ietf-tsvwg-prsctp */
+/* PR-SCTP supported parameter */
+struct sctp_prsctp_supported_param {
+	struct sctp_paramhdr ph;
+} SCTP_PACKED;
+
+
+/* draft-ietf-tsvwg-addip-sctp */
+struct sctp_asconf_paramhdr {	/* an ASCONF "parameter" */
+	struct sctp_paramhdr ph;/* a SCTP parameter header */
+	uint32_t correlation_id;/* correlation id for this param */
+} SCTP_PACKED;
+
+struct sctp_asconf_addr_param {	/* an ASCONF address parameter */
+	struct sctp_asconf_paramhdr aph;	/* asconf "parameter" */
+	struct sctp_ipv6addr_param addrp;	/* max storage size */
+} SCTP_PACKED;
+
+
+struct sctp_asconf_tag_param {	/* an ASCONF NAT-Vtag parameter */
+	struct sctp_asconf_paramhdr aph;	/* asconf "parameter" */
+        uint32_t local_vtag;
+        uint32_t remote_vtag;
+} SCTP_PACKED;
+
+
+struct sctp_asconf_addrv4_param {	/* an ASCONF address (v4) parameter */
+	struct sctp_asconf_paramhdr aph;	/* asconf "parameter" */
+	struct sctp_ipv4addr_param addrp;	/* max storage size */
+} SCTP_PACKED;
+
+#define SCTP_MAX_SUPPORTED_EXT 256
+
+struct sctp_supported_chunk_types_param {
+	struct sctp_paramhdr ph;/* type = 0x8008  len = x */
+	uint8_t chunk_types[];
+} SCTP_PACKED;
+
+
+/*
+ * Structures for DATA chunks
+ */
+struct sctp_data {
+	uint32_t tsn;
+	uint16_t sid;
+	uint16_t ssn;
+	uint32_t ppid;
+	/* user data follows */
+} SCTP_PACKED;
+
+struct sctp_data_chunk {
+	struct sctp_chunkhdr ch;
+	struct sctp_data dp;
+} SCTP_PACKED;
+
+struct sctp_idata {
+	uint32_t tsn;
+	uint16_t sid;
+	uint16_t reserved;	/* Where does the SSN go? */
+	uint32_t mid;
+	union {
+		uint32_t ppid;
+		uint32_t fsn;	/* Fragment Sequence Number */
+	} ppid_fsn;
+	/* user data follows */
+} SCTP_PACKED;
+
+struct sctp_idata_chunk {
+	struct sctp_chunkhdr ch;
+	struct sctp_idata dp;
+} SCTP_PACKED;
+
+/*
+ * Structures for the control chunks
+ */
+
+/* Initiate (INIT)/Initiate Ack (INIT ACK) */
+struct sctp_init {
+	uint32_t initiate_tag;	/* initiate tag */
+	uint32_t a_rwnd;	/* a_rwnd */
+	uint16_t num_outbound_streams;	/* OS */
+	uint16_t num_inbound_streams;	/* MIS */
+	uint32_t initial_tsn;	/* I-TSN */
+	/* optional param's follow */
+} SCTP_PACKED;
+#define SCTP_IDENTIFICATION_SIZE 16
+#define SCTP_ADDRESS_SIZE 4
+#if defined(__Userspace__)
+#define SCTP_RESERVE_SPACE 5
+#else
+#define SCTP_RESERVE_SPACE 6
+#endif
+/* state cookie header */
+struct sctp_state_cookie {	/* this is our definition... */
+	uint8_t identification[SCTP_IDENTIFICATION_SIZE];/* id of who we are */
+	struct timeval time_entered;	/* the time I built cookie */
+	uint32_t cookie_life;	/* life I will award this cookie */
+	uint32_t tie_tag_my_vtag;	/* my tag in old association */
+
+	uint32_t tie_tag_peer_vtag;	/* peers tag in old association */
+	uint32_t peers_vtag;	/* peers tag in INIT (for quick ref) */
+
+	uint32_t my_vtag;	/* my tag in INIT-ACK (for quick ref) */
+	uint32_t address[SCTP_ADDRESS_SIZE];	/* 4 ints/128 bits */
+	uint32_t addr_type;	/* address type */
+	uint32_t laddress[SCTP_ADDRESS_SIZE];	/* my local from address */
+	uint32_t laddr_type;	/* my local from address type */
+	uint32_t scope_id;	/* v6 scope id for link-locals */
+
+	uint16_t peerport;	/* port address of the peer in the INIT */
+	uint16_t myport;	/* my port address used in the INIT */
+	uint8_t ipv4_addr_legal;/* Are V4 addr legal? */
+	uint8_t ipv6_addr_legal;/* Are V6 addr legal? */
+#if defined(__Userspace__)
+	uint8_t conn_addr_legal;
+#endif
+	uint8_t local_scope;	/* IPv6 local scope flag */
+	uint8_t site_scope;	/* IPv6 site scope flag */
+
+	uint8_t ipv4_scope;	/* IPv4 private addr scope */
+	uint8_t loopback_scope;	/* loopback scope information */
+	uint8_t reserved[SCTP_RESERVE_SPACE];    /* Align to 64 bits */
+	/*
+	 * at the end is tacked on the INIT chunk and the INIT-ACK chunk
+	 * (minus the cookie).
+	 */
+} SCTP_PACKED;
+
+/* state cookie parameter */
+struct sctp_state_cookie_param {
+	struct sctp_paramhdr ph;
+	struct sctp_state_cookie cookie;
+} SCTP_PACKED;
+
+struct sctp_init_chunk {
+	struct sctp_chunkhdr ch;
+	struct sctp_init init;
+} SCTP_PACKED;
+
+struct sctp_init_msg {
+	struct sctphdr sh;
+	struct sctp_init_chunk msg;
+} SCTP_PACKED;
+
+/* ... used for both INIT and INIT ACK */
+#define sctp_init_ack		sctp_init
+#define sctp_init_ack_chunk	sctp_init_chunk
+#define sctp_init_ack_msg	sctp_init_msg
+
+
+/* Selective Ack (SACK) */
+struct sctp_gap_ack_block {
+	uint16_t start;		/* Gap Ack block start */
+	uint16_t end;		/* Gap Ack block end */
+} SCTP_PACKED;
+
+struct sctp_sack {
+	uint32_t cum_tsn_ack;	/* cumulative TSN Ack */
+	uint32_t a_rwnd;	/* updated a_rwnd of sender */
+	uint16_t num_gap_ack_blks;	/* number of Gap Ack blocks */
+	uint16_t num_dup_tsns;	/* number of duplicate TSNs */
+	/* struct sctp_gap_ack_block's follow */
+	/* uint32_t duplicate_tsn's follow */
+} SCTP_PACKED;
+
+struct sctp_sack_chunk {
+	struct sctp_chunkhdr ch;
+	struct sctp_sack sack;
+} SCTP_PACKED;
+
+struct sctp_nr_sack {
+	uint32_t cum_tsn_ack;	/* cumulative TSN Ack */
+	uint32_t a_rwnd;	/* updated a_rwnd of sender */
+	uint16_t num_gap_ack_blks;	/* number of Gap Ack blocks */
+	uint16_t num_nr_gap_ack_blks;	/* number of NR Gap Ack blocks */
+	uint16_t num_dup_tsns;	/* number of duplicate TSNs */
+	uint16_t reserved;	/* not currently used*/
+	/* struct sctp_gap_ack_block's follow */
+	/* uint32_t duplicate_tsn's follow */
+} SCTP_PACKED;
+
+struct sctp_nr_sack_chunk {
+	struct sctp_chunkhdr ch;
+	struct sctp_nr_sack nr_sack;
+} SCTP_PACKED;
+
+
+/* Heartbeat Request (HEARTBEAT) */
+struct sctp_heartbeat {
+	struct sctp_heartbeat_info_param hb_info;
+} SCTP_PACKED;
+
+struct sctp_heartbeat_chunk {
+	struct sctp_chunkhdr ch;
+	struct sctp_heartbeat heartbeat;
+} SCTP_PACKED;
+
+/* ... used for Heartbeat Ack (HEARTBEAT ACK) */
+#define sctp_heartbeat_ack		sctp_heartbeat
+#define sctp_heartbeat_ack_chunk	sctp_heartbeat_chunk
+
+
+/* Abort Asssociation (ABORT) */
+struct sctp_abort_chunk {
+	struct sctp_chunkhdr ch;
+	/* optional error cause may follow */
+} SCTP_PACKED;
+
+struct sctp_abort_msg {
+	struct sctphdr sh;
+	struct sctp_abort_chunk msg;
+} SCTP_PACKED;
+
+
+/* Shutdown Association (SHUTDOWN) */
+struct sctp_shutdown_chunk {
+	struct sctp_chunkhdr ch;
+	uint32_t cumulative_tsn_ack;
+} SCTP_PACKED;
+
+
+/* Shutdown Acknowledgment (SHUTDOWN ACK) */
+struct sctp_shutdown_ack_chunk {
+	struct sctp_chunkhdr ch;
+} SCTP_PACKED;
+
+
+/* Operation Error (ERROR) */
+struct sctp_error_chunk {
+	struct sctp_chunkhdr ch;
+	/* optional error causes follow */
+} SCTP_PACKED;
+
+
+/* Cookie Echo (COOKIE ECHO) */
+struct sctp_cookie_echo_chunk {
+	struct sctp_chunkhdr ch;
+	struct sctp_state_cookie cookie;
+} SCTP_PACKED;
+
+/* Cookie Acknowledgment (COOKIE ACK) */
+struct sctp_cookie_ack_chunk {
+	struct sctp_chunkhdr ch;
+} SCTP_PACKED;
+
+/* Explicit Congestion Notification Echo (ECNE) */
+struct old_sctp_ecne_chunk {
+	struct sctp_chunkhdr ch;
+	uint32_t tsn;
+} SCTP_PACKED;
+
+struct sctp_ecne_chunk {
+	struct sctp_chunkhdr ch;
+	uint32_t tsn;
+	uint32_t num_pkts_since_cwr;
+} SCTP_PACKED;
+
+/* Congestion Window Reduced (CWR) */
+struct sctp_cwr_chunk {
+	struct sctp_chunkhdr ch;
+	uint32_t tsn;
+} SCTP_PACKED;
+
+/* Shutdown Complete (SHUTDOWN COMPLETE) */
+struct sctp_shutdown_complete_chunk {
+	struct sctp_chunkhdr ch;
+} SCTP_PACKED;
+
+struct sctp_adaptation_layer_indication {
+	struct sctp_paramhdr ph;
+	uint32_t indication;
+} SCTP_PACKED;
+
+/*
+ * draft-ietf-tsvwg-addip-sctp
+ */
+/* Address/Stream Configuration Change (ASCONF) */
+struct sctp_asconf_chunk {
+	struct sctp_chunkhdr ch;
+	uint32_t serial_number;
+	/* lookup address parameter (mandatory) */
+	/* asconf parameters follow */
+} SCTP_PACKED;
+
+/* Address/Stream Configuration Acknowledge (ASCONF ACK) */
+struct sctp_asconf_ack_chunk {
+	struct sctp_chunkhdr ch;
+	uint32_t serial_number;
+	/* asconf parameters follow */
+} SCTP_PACKED;
+
+/* draft-ietf-tsvwg-prsctp */
+/* Forward Cumulative TSN (FORWARD TSN) */
+struct sctp_forward_tsn_chunk {
+	struct sctp_chunkhdr ch;
+	uint32_t new_cumulative_tsn;
+	/* stream/sequence pairs (sctp_strseq) follow */
+} SCTP_PACKED;
+
+struct sctp_strseq {
+	uint16_t sid;
+	uint16_t ssn;
+} SCTP_PACKED;
+
+struct sctp_strseq_mid {
+	uint16_t sid;
+	uint16_t flags;
+	uint32_t mid;
+};
+
+struct sctp_forward_tsn_msg {
+	struct sctphdr sh;
+	struct sctp_forward_tsn_chunk msg;
+} SCTP_PACKED;
+
+/* should be a multiple of 4 - 1 aka 3/7/11 etc. */
+
+#define SCTP_NUM_DB_TO_VERIFY 31
+
+struct sctp_chunk_desc {
+	uint8_t chunk_type;
+	uint8_t data_bytes[SCTP_NUM_DB_TO_VERIFY];
+	uint32_t tsn_ifany;
+} SCTP_PACKED;
+
+
+struct sctp_pktdrop_chunk {
+	struct sctp_chunkhdr ch;
+	uint32_t bottle_bw;
+	uint32_t current_onq;
+	uint16_t trunc_len;
+	uint16_t reserved;
+	uint8_t data[];
+} SCTP_PACKED;
+
+/**********STREAM RESET STUFF ******************/
+
+struct sctp_stream_reset_request {
+	struct sctp_paramhdr ph;
+	uint32_t request_seq;
+} SCTP_PACKED;
+
+struct sctp_stream_reset_out_request {
+	struct sctp_paramhdr ph;
+	uint32_t request_seq;	/* monotonically increasing seq no */
+	uint32_t response_seq;	/* if a response, the resp seq no */
+	uint32_t send_reset_at_tsn;	/* last TSN I assigned outbound */
+	uint16_t list_of_streams[];	/* if not all list of streams */
+} SCTP_PACKED;
+
+struct sctp_stream_reset_in_request {
+	struct sctp_paramhdr ph;
+	uint32_t request_seq;
+	uint16_t list_of_streams[];	/* if not all list of streams */
+} SCTP_PACKED;
+
+struct sctp_stream_reset_tsn_request {
+	struct sctp_paramhdr ph;
+	uint32_t request_seq;
+} SCTP_PACKED;
+
+struct sctp_stream_reset_response {
+	struct sctp_paramhdr ph;
+	uint32_t response_seq;	/* if a response, the resp seq no */
+	uint32_t result;
+} SCTP_PACKED;
+
+struct sctp_stream_reset_response_tsn {
+	struct sctp_paramhdr ph;
+	uint32_t response_seq;	/* if a response, the resp seq no */
+	uint32_t result;
+	uint32_t senders_next_tsn;
+	uint32_t receivers_next_tsn;
+} SCTP_PACKED;
+
+struct sctp_stream_reset_add_strm {
+  struct sctp_paramhdr ph;
+  uint32_t request_seq;
+  uint16_t number_of_streams;
+  uint16_t reserved;
+} SCTP_PACKED;
+
+#define SCTP_STREAM_RESET_RESULT_NOTHING_TO_DO   0x00000000 /* XXX: unused */
+#define SCTP_STREAM_RESET_RESULT_PERFORMED       0x00000001
+#define SCTP_STREAM_RESET_RESULT_DENIED          0x00000002
+#define SCTP_STREAM_RESET_RESULT_ERR__WRONG_SSN  0x00000003 /* XXX: unused */
+#define SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS 0x00000004
+#define SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO   0x00000005
+#define SCTP_STREAM_RESET_RESULT_IN_PROGRESS     0x00000006 /* XXX: unused */
+
+/*
+ * convience structures, note that if you are making a request for specific
+ * streams then the request will need to be an overlay structure.
+ */
+
+struct sctp_stream_reset_tsn_req {
+	struct sctp_chunkhdr ch;
+	struct sctp_stream_reset_tsn_request sr_req;
+} SCTP_PACKED;
+
+struct sctp_stream_reset_resp {
+	struct sctp_chunkhdr ch;
+	struct sctp_stream_reset_response sr_resp;
+} SCTP_PACKED;
+
+/* respone only valid with a TSN request */
+struct sctp_stream_reset_resp_tsn {
+	struct sctp_chunkhdr ch;
+	struct sctp_stream_reset_response_tsn sr_resp;
+} SCTP_PACKED;
+
+/****************************************************/
+
+/*
+ * Authenticated chunks support draft-ietf-tsvwg-sctp-auth
+ */
+
+/* Should we make the max be 32? */
+#define SCTP_RANDOM_MAX_SIZE 256
+struct sctp_auth_random {
+	struct sctp_paramhdr ph;/* type = 0x8002 */
+	uint8_t random_data[];
+} SCTP_PACKED;
+
+struct sctp_auth_chunk_list {
+	struct sctp_paramhdr ph;/* type = 0x8003 */
+	uint8_t chunk_types[];
+} SCTP_PACKED;
+
+struct sctp_auth_hmac_algo {
+	struct sctp_paramhdr ph;/* type = 0x8004 */
+	uint16_t hmac_ids[];
+} SCTP_PACKED;
+
+struct sctp_auth_chunk {
+	struct sctp_chunkhdr ch;
+	uint16_t shared_key_id;
+	uint16_t hmac_id;
+	uint8_t hmac[];
+} SCTP_PACKED;
+
+/*
+ * we pre-reserve enough room for a ECNE or CWR AND a SACK with no missing
+ * pieces. If ENCE is missing we could have a couple of blocks. This way we
+ * optimize so we MOST likely can bundle a SACK/ECN with the smallest size
+ * data chunk I will split into. We could increase throughput slightly by
+ * taking out these two but the  24-sack/8-CWR i.e. 32 bytes I pre-reserve I
+ * feel is worth it for now.
+ */
+#ifndef SCTP_MAX_OVERHEAD
+#ifdef INET6
+#define SCTP_MAX_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+			   sizeof(struct sctphdr) + \
+			   sizeof(struct sctp_ecne_chunk) + \
+			   sizeof(struct sctp_sack_chunk) + \
+			   sizeof(struct ip6_hdr))
+
+#define SCTP_MED_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+			   sizeof(struct sctphdr) + \
+			   sizeof(struct ip6_hdr))
+
+
+#define SCTP_MIN_OVERHEAD (sizeof(struct ip6_hdr) + \
+			   sizeof(struct sctphdr))
+
+#else
+#define SCTP_MAX_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+			   sizeof(struct sctphdr) + \
+			   sizeof(struct sctp_ecne_chunk) + \
+			   sizeof(struct sctp_sack_chunk) + \
+			   sizeof(struct ip))
+
+#define SCTP_MED_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+			   sizeof(struct sctphdr) + \
+			   sizeof(struct ip))
+
+
+#define SCTP_MIN_OVERHEAD (sizeof(struct ip) + \
+			   sizeof(struct sctphdr))
+
+#endif /* INET6 */
+#endif /* !SCTP_MAX_OVERHEAD */
+
+#define SCTP_MED_V4_OVERHEAD (sizeof(struct sctp_data_chunk) + \
+			      sizeof(struct sctphdr) + \
+			      sizeof(struct ip))
+
+#define SCTP_MIN_V4_OVERHEAD (sizeof(struct ip) + \
+			      sizeof(struct sctphdr))
+
+#if defined(__Windows__)
+#include <packoff.h>
+#endif
+#if defined(__Userspace_os_Windows)
+#pragma pack ()
+#endif
+#undef SCTP_PACKED
+#endif				/* !__sctp_header_h__ */
diff --git a/usrsctplib/netinet/sctp_indata.c b/usrsctplib/netinet/sctp_indata.c
new file mode 100755
index 0000000..d11475a
--- /dev/null
+++ b/usrsctplib/netinet/sctp_indata.c
@@ -0,0 +1,5633 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.c 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#ifdef __FreeBSD__
+#include <sys/proc.h>
+#endif
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_bsd_addr.h>
+#include <netinet/sctp_input.h>
+#include <netinet/sctp_crc32.h>
+#ifdef __FreeBSD__
+#include <netinet/sctp_lock_bsd.h>
+#endif
+/*
+ * NOTES: On the outbound side of things I need to check the sack timer to
+ * see if I should generate a sack into the chunk queue (if I have data to
+ * send that is and will be sending it .. for bundling.
+ *
+ * The callback in sctp_usrreq.c will get called when the socket is read from.
+ * This will cause sctp_service_queues() to get called on the top entry in
+ * the list.
+ */
+static void
+sctp_add_chk_to_control(struct sctp_queued_to_read *control,
+			struct sctp_stream_in *strm,
+			struct sctp_tcb *stcb,
+			struct sctp_association *asoc,
+			struct sctp_tmit_chunk *chk, int lock_held);
+
+
+void
+sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
+}
+
+/* Calculate what the rwnd would be */
+uint32_t
+sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+	uint32_t calc = 0;
+
+	/*
+	 * This is really set wrong with respect to a 1-2-m socket. Since
+	 * the sb_cc is the count that everyone as put up. When we re-write
+	 * sctp_soreceive then we will fix this so that ONLY this
+	 * associations data is taken into account.
+	 */
+	if (stcb->sctp_socket == NULL) {
+		return (calc);
+	}
+
+	if (stcb->asoc.sb_cc == 0 &&
+	    asoc->size_on_reasm_queue == 0 &&
+	    asoc->size_on_all_streams == 0) {
+		/* Full rwnd granted */
+		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
+		return (calc);
+	}
+	/* get actual space */
+	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
+	/*
+	 * take out what has NOT been put on socket queue and we yet hold
+	 * for putting up.
+	 */
+	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
+	                                         asoc->cnt_on_reasm_queue * MSIZE));
+	calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
+	                                         asoc->cnt_on_all_streams * MSIZE));
+	if (calc == 0) {
+		/* out of space */
+		return (calc);
+	}
+
+	/* what is the overhead of all these rwnd's */
+	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
+	/* If the window gets too small due to ctrl-stuff, reduce it
+	 * to 1, even it is 0. SWS engaged
+	 */
+	if (calc < stcb->asoc.my_rwnd_control_len) {
+		calc = 1;
+	}
+	return (calc);
+}
+
+
+
+/*
+ * Build out our readq entry based on the incoming packet.
+ */
+struct sctp_queued_to_read *
+sctp_build_readq_entry(struct sctp_tcb *stcb,
+    struct sctp_nets *net,
+    uint32_t tsn, uint32_t ppid,
+    uint32_t context, uint16_t sid,
+    uint32_t mid, uint8_t flags,
+    struct mbuf *dm)
+{
+	struct sctp_queued_to_read *read_queue_e = NULL;
+
+	sctp_alloc_a_readq(stcb, read_queue_e);
+	if (read_queue_e == NULL) {
+		goto failed_build;
+	}
+	memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
+	read_queue_e->sinfo_stream = sid;
+	read_queue_e->sinfo_flags = (flags << 8);
+	read_queue_e->sinfo_ppid = ppid;
+	read_queue_e->sinfo_context = context;
+	read_queue_e->sinfo_tsn = tsn;
+	read_queue_e->sinfo_cumtsn = tsn;
+	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
+	read_queue_e->mid = mid;
+	read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
+	TAILQ_INIT(&read_queue_e->reasm);
+	read_queue_e->whoFrom = net;
+	atomic_add_int(&net->ref_count, 1);
+	read_queue_e->data = dm;
+	read_queue_e->stcb = stcb;
+	read_queue_e->port_from = stcb->rport;
+failed_build:
+	return (read_queue_e);
+}
+
+struct mbuf *
+sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
+{
+	struct sctp_extrcvinfo *seinfo;
+	struct sctp_sndrcvinfo *outinfo;
+	struct sctp_rcvinfo *rcvinfo;
+	struct sctp_nxtinfo *nxtinfo;
+#if defined(__Userspace_os_Windows)
+	WSACMSGHDR *cmh;
+#else
+	struct cmsghdr *cmh;
+#endif
+	struct mbuf *ret;
+	int len;
+	int use_extended;
+	int provide_nxt;
+
+	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
+	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
+	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
+		/* user does not want any ancillary data */
+		return (NULL);
+	}
+
+	len = 0;
+	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
+		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
+	}
+	seinfo = (struct sctp_extrcvinfo *)sinfo;
+	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
+	    (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
+		provide_nxt = 1;
+		len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
+	} else {
+		provide_nxt = 0;
+	}
+	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
+			use_extended = 1;
+			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
+		} else {
+			use_extended = 0;
+			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
+		}
+	} else {
+		use_extended = 0;
+	}
+
+	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
+	if (ret == NULL) {
+		/* No space */
+		return (ret);
+	}
+	SCTP_BUF_LEN(ret) = 0;
+
+	/* We need a CMSG header followed by the struct */
+#if defined(__Userspace_os_Windows)
+	cmh = mtod(ret, WSACMSGHDR *);
+#else
+	cmh = mtod(ret, struct cmsghdr *);
+#endif
+	/*
+	 * Make sure that there is no un-initialized padding between
+	 * the cmsg header and cmsg data and after the cmsg data.
+	 */
+	memset(cmh, 0, len);
+	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
+		cmh->cmsg_level = IPPROTO_SCTP;
+		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
+		cmh->cmsg_type = SCTP_RCVINFO;
+		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
+		rcvinfo->rcv_sid = sinfo->sinfo_stream;
+		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
+		rcvinfo->rcv_flags = sinfo->sinfo_flags;
+		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
+		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
+		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
+		rcvinfo->rcv_context = sinfo->sinfo_context;
+		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
+#if defined(__Userspace_os_Windows)
+		cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
+#else
+		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
+#endif
+		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
+	}
+	if (provide_nxt) {
+		cmh->cmsg_level = IPPROTO_SCTP;
+		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
+		cmh->cmsg_type = SCTP_NXTINFO;
+		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
+		nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
+		nxtinfo->nxt_flags = 0;
+		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
+			nxtinfo->nxt_flags |= SCTP_UNORDERED;
+		}
+		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
+			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
+		}
+		if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
+			nxtinfo->nxt_flags |= SCTP_COMPLETE;
+		}
+		nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
+		nxtinfo->nxt_length = seinfo->serinfo_next_length;
+		nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
+#if defined(__Userspace_os_Windows)
+		cmh = (WSACMSGHDR *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
+#else
+		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
+#endif
+		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
+	}
+	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
+		cmh->cmsg_level = IPPROTO_SCTP;
+		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
+		if (use_extended) {
+			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
+			cmh->cmsg_type = SCTP_EXTRCV;
+			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
+			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
+		} else {
+			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
+			cmh->cmsg_type = SCTP_SNDRCV;
+			*outinfo = *sinfo;
+			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
+		}
+	}
+	return (ret);
+}
+
+
+static void
+sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
+{
+	uint32_t gap, i, cumackp1;
+	int fnd = 0;
+	int in_r=0, in_nr=0;
+	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
+		return;
+	}
+	cumackp1 = asoc->cumulative_tsn + 1;
+	if (SCTP_TSN_GT(cumackp1, tsn)) {
+		/* this tsn is behind the cum ack and thus we don't
+		 * need to worry about it being moved from one to the other.
+		 */
+		return;
+	}
+	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
+	in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
+	in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
+	if ((in_r == 0) && (in_nr == 0)) {
+#ifdef INVARIANTS
+		panic("Things are really messed up now");
+#else
+		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
+		sctp_print_mapping_array(asoc);
+#endif
+	}
+	if (in_nr == 0)
+		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+	if (in_r)
+		SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
+	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
+		asoc->highest_tsn_inside_nr_map = tsn;
+	}
+	if (tsn == asoc->highest_tsn_inside_map) {
+		/* We must back down to see what the new highest is */
+		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
+			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
+			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
+				asoc->highest_tsn_inside_map = i;
+				fnd = 1;
+				break;
+			}
+		}
+		if (!fnd) {
+			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
+		}
+	}
+}
+
+static int
+sctp_place_control_in_stream(struct sctp_stream_in *strm,
+			     struct sctp_association *asoc,
+			     struct sctp_queued_to_read *control)
+{
+	struct sctp_queued_to_read *at;
+	struct sctp_readhead *q;
+	uint8_t flags, unordered;
+
+	flags = (control->sinfo_flags >> 8);
+	unordered = flags & SCTP_DATA_UNORDERED;
+	if (unordered) {
+		q = &strm->uno_inqueue;
+		if (asoc->idata_supported == 0) {
+			if (!TAILQ_EMPTY(q)) {
+				/* Only one stream can be here in old style  -- abort */
+				return (-1);
+			}
+			TAILQ_INSERT_TAIL(q, control, next_instrm);
+			control->on_strm_q = SCTP_ON_UNORDERED;
+			return (0);
+		}
+	} else {
+		q = &strm->inqueue;
+	}
+	if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
+		control->end_added = 1;
+		control->first_frag_seen = 1;
+		control->last_frag_seen = 1;
+	}
+	if (TAILQ_EMPTY(q)) {
+		/* Empty queue */
+		TAILQ_INSERT_HEAD(q, control, next_instrm);
+		if (unordered) {
+			control->on_strm_q = SCTP_ON_UNORDERED;
+		} else {
+			control->on_strm_q = SCTP_ON_ORDERED;
+		}
+		return (0);
+	} else {
+		TAILQ_FOREACH(at, q, next_instrm) {
+			if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
+				/*
+				 * one in queue is bigger than the
+				 * new one, insert before this one
+				 */
+				TAILQ_INSERT_BEFORE(at, control, next_instrm);
+				if (unordered) {
+					control->on_strm_q = SCTP_ON_UNORDERED;
+				} else {
+					control->on_strm_q = SCTP_ON_ORDERED ;
+				}
+				break;
+			} else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
+				/*
+				 * Gak, He sent me a duplicate msg
+				 * id number?? return -1 to abort.
+				 */
+				return (-1);
+			} else {
+				if (TAILQ_NEXT(at, next_instrm) == NULL) {
+					/*
+					 * We are at the end, insert
+					 * it after this one
+					 */
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+						sctp_log_strm_del(control, at,
+								  SCTP_STR_LOG_FROM_INSERT_TL);
+					}
+					TAILQ_INSERT_AFTER(q, at, control, next_instrm);
+					if (unordered) {
+						control->on_strm_q = SCTP_ON_UNORDERED ;
+					} else {
+						control->on_strm_q = SCTP_ON_ORDERED ;
+					}
+					break;
+				}
+			}
+		}
+	}
+	return (0);
+}
+
+static void
+sctp_abort_in_reasm(struct sctp_tcb *stcb,
+                    struct sctp_queued_to_read *control,
+                    struct sctp_tmit_chunk *chk,
+                    int *abort_flag, int opspot)
+{
+	char msg[SCTP_DIAG_INFO_LEN];
+	struct mbuf *oper;
+
+	if (stcb->asoc.idata_supported) {
+		snprintf(msg, sizeof(msg),
+			 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
+			 opspot,
+			 control->fsn_included,
+			 chk->rec.data.tsn,
+			 chk->rec.data.sid,
+			 chk->rec.data.fsn, chk->rec.data.mid);
+	} else {
+		snprintf(msg, sizeof(msg),
+			 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
+			 opspot,
+			 control->fsn_included,
+			 chk->rec.data.tsn,
+			 chk->rec.data.sid,
+			 chk->rec.data.fsn,
+			 (uint16_t)chk->rec.data.mid);
+	}
+	oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+	sctp_m_freem(chk->data);
+	chk->data = NULL;
+	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+	stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
+	sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
+	*abort_flag = 1;
+}
+
+static void
+sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
+{
+	/* 
+	 * The control could not be placed and must be cleaned.
+	 */
+	struct sctp_tmit_chunk *chk, *nchk;
+	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
+		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
+		if (chk->data)
+			sctp_m_freem(chk->data);
+		chk->data = NULL;
+		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+	}
+	sctp_free_a_readq(stcb, control);	
+}
+
+/*
+ * Queue the chunk either right into the socket buffer if it is the next one
+ * to go OR put it in the correct place in the delivery queue.  If we do
+ * append to the so_buf, keep doing so until we are out of order as
+ * long as the control's entered are non-fragmented.
+ */
+static void
+sctp_queue_data_to_stream(struct sctp_tcb *stcb,
+    struct sctp_association *asoc,
+    struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
+{
+	/*
+	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
+	 * all the data in one stream this could happen quite rapidly. One
+	 * could use the TSN to keep track of things, but this scheme breaks
+	 * down in the other type of stream usage that could occur. Send a
+	 * single msg to stream 0, send 4Billion messages to stream 1, now
+	 * send a message to stream 0. You have a situation where the TSN
+	 * has wrapped but not in the stream. Is this worth worrying about
+	 * or should we just change our queue sort at the bottom to be by
+	 * TSN.
+	 *
+	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
+	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
+	 * assignment this could happen... and I don't see how this would be
+	 * a violation. So for now I am undecided an will leave the sort by
+	 * SSN alone. Maybe a hybred approach is the answer
+	 *
+	 */
+	struct sctp_queued_to_read *at;
+	int queue_needed;
+	uint32_t nxt_todel;
+	struct mbuf *op_err;
+	struct sctp_stream_in *strm;
+	char msg[SCTP_DIAG_INFO_LEN];
+
+	strm = &asoc->strmin[control->sinfo_stream];
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
+	}
+	if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
+		/* The incoming sseq is behind where we last delivered? */
+		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
+			strm->last_mid_delivered, control->mid);
+		/*
+		 * throw it in the stream so it gets cleaned up in
+		 * association destruction
+		 */
+		TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
+		if (asoc->idata_supported) {
+			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
+			         strm->last_mid_delivered, control->sinfo_tsn,
+			         control->sinfo_stream, control->mid);
+		} else {
+			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
+			         (uint16_t)strm->last_mid_delivered,
+			         control->sinfo_tsn,
+			         control->sinfo_stream,
+			         (uint16_t)control->mid);
+		}
+		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
+		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+		*abort_flag = 1;
+		return;
+
+	}
+	queue_needed = 1;
+	asoc->size_on_all_streams += control->length;
+	sctp_ucount_incr(asoc->cnt_on_all_streams);
+	nxt_todel = strm->last_mid_delivered + 1;
+	if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		struct socket *so;
+
+		so = SCTP_INP_SO(stcb->sctp_ep);
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_SOCKET_LOCK(so, 1);
+		SCTP_TCB_LOCK(stcb);
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+			SCTP_SOCKET_UNLOCK(so, 1);
+			return;
+		}
+#endif
+		/* can be delivered right away? */
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
+		}
+		/* EY it wont be queued if it could be delivered directly */
+		queue_needed = 0;
+		asoc->size_on_all_streams -= control->length;
+		sctp_ucount_decr(asoc->cnt_on_all_streams);
+		strm->last_mid_delivered++;
+		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
+		sctp_add_to_readq(stcb->sctp_ep, stcb,
+		                  control,
+		                  &stcb->sctp_socket->so_rcv, 1,
+		                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
+		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
+			/* all delivered */
+			nxt_todel = strm->last_mid_delivered + 1;
+			if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
+			    (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
+				asoc->size_on_all_streams -= control->length;
+				sctp_ucount_decr(asoc->cnt_on_all_streams);
+				if (control->on_strm_q == SCTP_ON_ORDERED) {
+					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
+#ifdef INVARIANTS
+				} else {
+					panic("Huh control: %p is on_strm_q: %d",
+					      control, control->on_strm_q);
+#endif
+				}
+				control->on_strm_q = 0;
+				strm->last_mid_delivered++;
+				/*
+				 * We ignore the return of deliver_data here
+				 * since we always can hold the chunk on the
+				 * d-queue. And we have a finite number that
+				 * can be delivered from the strq.
+				 */
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+					sctp_log_strm_del(control, NULL,
+							  SCTP_STR_LOG_FROM_IMMED_DEL);
+				}
+				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
+				sctp_add_to_readq(stcb->sctp_ep, stcb,
+				                  control,
+				                  &stcb->sctp_socket->so_rcv, 1,
+				                  SCTP_READ_LOCK_NOT_HELD,
+				                  SCTP_SO_LOCKED);
+				continue;
+			} else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
+				*need_reasm = 1;
+			}
+			break;
+		}
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+	}
+	if (queue_needed) {
+		/*
+		 * Ok, we did not deliver this guy, find the correct place
+		 * to put it on the queue.
+		 */
+		if (sctp_place_control_in_stream(strm, asoc, control)) {
+			snprintf(msg, sizeof(msg),
+				 "Queue to str MID: %u duplicate",
+				 control->mid);
+			sctp_clean_up_control(stcb, control);
+			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
+			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+			*abort_flag = 1;
+		}
+	}
+}
+
+
+static void
+sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
+{
+	struct mbuf *m, *prev = NULL;
+	struct sctp_tcb *stcb;
+
+	stcb = control->stcb;
+	control->held_length = 0;
+	control->length = 0;
+	m = control->data;
+	while (m) {
+		if (SCTP_BUF_LEN(m) == 0) {
+			/* Skip mbufs with NO length */
+			if (prev == NULL) {
+				/* First one */
+				control->data = sctp_m_free(m);
+				m = control->data;
+			} else {
+				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
+				m = SCTP_BUF_NEXT(prev);
+			}
+			if (m == NULL) {
+				control->tail_mbuf = prev;
+			}
+			continue;
+		}
+		prev = m;
+		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
+		if (control->on_read_q) {
+			/*
+			 * On read queue so we must increment the
+			 * SB stuff, we assume caller has done any locks of SB.
+			 */
+			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
+		}
+		m = SCTP_BUF_NEXT(m);
+	}
+	if (prev) {
+		control->tail_mbuf = prev;
+	}
+}
+
+static void
+sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m)
+{
+	struct mbuf *prev=NULL;
+	struct sctp_tcb *stcb;
+
+	stcb = control->stcb;
+	if (stcb == NULL) {
+#ifdef INVARIANTS
+		panic("Control broken");
+#else
+		return;
+#endif
+	}
+	if (control->tail_mbuf == NULL) {
+		/* TSNH */
+		control->data = m;
+		sctp_setup_tail_pointer(control);
+		return;
+	}
+	control->tail_mbuf->m_next = m;
+	while (m) {
+		if (SCTP_BUF_LEN(m) == 0) {
+			/* Skip mbufs with NO length */
+			if (prev == NULL) {
+				/* First one */
+				control->tail_mbuf->m_next = sctp_m_free(m);
+				m = control->tail_mbuf->m_next;
+			} else {
+				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
+				m = SCTP_BUF_NEXT(prev);
+			}
+			if (m == NULL) {
+				control->tail_mbuf = prev;
+			}
+			continue;
+		}
+		prev = m;
+		if (control->on_read_q) {
+			/*
+			 * On read queue so we must increment the
+			 * SB stuff, we assume caller has done any locks of SB.
+			 */
+			sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
+		}
+		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
+		m = SCTP_BUF_NEXT(m);
+	}
+	if (prev) {
+		control->tail_mbuf = prev;
+	}
+}
+
+static void 
+sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
+{
+	memset(nc, 0, sizeof(struct sctp_queued_to_read));
+	nc->sinfo_stream = control->sinfo_stream;
+	nc->mid = control->mid;
+	TAILQ_INIT(&nc->reasm);
+	nc->top_fsn = control->top_fsn;
+	nc->mid = control->mid;
+	nc->sinfo_flags = control->sinfo_flags;
+	nc->sinfo_ppid = control->sinfo_ppid;
+	nc->sinfo_context = control->sinfo_context;
+	nc->fsn_included = 0xffffffff;
+	nc->sinfo_tsn = control->sinfo_tsn;
+	nc->sinfo_cumtsn = control->sinfo_cumtsn;
+	nc->sinfo_assoc_id = control->sinfo_assoc_id;
+	nc->whoFrom = control->whoFrom;
+	atomic_add_int(&nc->whoFrom->ref_count, 1);
+	nc->stcb = control->stcb;
+	nc->port_from = control->port_from;
+}
+
+static void 
+sctp_reset_a_control(struct sctp_queued_to_read *control,
+                     struct sctp_inpcb *inp, uint32_t tsn)
+{
+	control->fsn_included = tsn;
+	if (control->on_read_q) {
+		/* 
+		 * We have to purge it from there,
+		 * hopefully this will work :-)
+		 */
+		TAILQ_REMOVE(&inp->read_queue, control, next);
+		control->on_read_q = 0;
+	}
+}
+
+static int
+sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
+                               struct sctp_association *asoc,
+                               struct sctp_stream_in *strm,
+                               struct sctp_queued_to_read *control,
+                               uint32_t pd_point,
+                               int inp_read_lock_held)
+{
+	/* Special handling for the old un-ordered data chunk.
+	 * All the chunks/TSN's go to mid 0. So
+	 * we have to do the old style watching to see
+	 * if we have it all. If you return one, no other
+	 * control entries on the un-ordered queue will
+	 * be looked at. In theory there should be no others
+	 * entries in reality, unless the guy is sending both
+	 * unordered NDATA and unordered DATA...
+	 */
+	struct sctp_tmit_chunk *chk, *lchk, *tchk;
+	uint32_t fsn;
+	struct sctp_queued_to_read *nc;
+	int cnt_added;
+
+	if (control->first_frag_seen == 0) {
+		/* Nothing we can do, we have not seen the first piece yet */
+		return (1);
+	}
+	/* Collapse any we can */
+	cnt_added = 0;
+restart:
+	fsn = control->fsn_included + 1;
+	/* Now what can we add? */
+	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
+		if (chk->rec.data.fsn == fsn) {
+			/* Ok lets add it */
+			sctp_alloc_a_readq(stcb, nc);
+			if (nc == NULL) {
+				break;
+			}
+			memset(nc, 0, sizeof(struct sctp_queued_to_read));
+			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
+			sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD);
+			fsn++;
+			cnt_added++;
+			chk = NULL;
+			if (control->end_added) {
+				/* We are done */
+				if (!TAILQ_EMPTY(&control->reasm)) {
+					/* 
+					 * Ok we have to move anything left on
+					 * the control queue to a new control.
+					 */
+					sctp_build_readq_entry_from_ctl(nc, control);
+					tchk = TAILQ_FIRST(&control->reasm);
+					if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
+						asoc->size_on_reasm_queue -= tchk->send_size;
+						sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+						nc->first_frag_seen = 1;
+						nc->fsn_included = tchk->rec.data.fsn;
+						nc->data = tchk->data;
+						nc->sinfo_ppid = tchk->rec.data.ppid;
+						nc->sinfo_tsn = tchk->rec.data.tsn;
+						sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
+						tchk->data = NULL;
+						sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
+						sctp_setup_tail_pointer(nc);
+						tchk = TAILQ_FIRST(&control->reasm);
+					}
+					/* Spin the rest onto the queue */
+					while (tchk) {
+						TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
+						TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
+						tchk = TAILQ_FIRST(&control->reasm);
+					}
+					/* Now lets add it to the queue after removing control */
+					TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
+					nc->on_strm_q = SCTP_ON_UNORDERED;
+					if (control->on_strm_q) {
+						TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
+						control->on_strm_q = 0;
+					}
+				}
+				if (control->pdapi_started) {
+					strm->pd_api_started = 0;
+					control->pdapi_started = 0;
+				}
+				if (control->on_strm_q) {
+					TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
+					control->on_strm_q = 0;
+					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
+				}
+				if (control->on_read_q == 0) {
+					sctp_add_to_readq(stcb->sctp_ep, stcb, control,
+							  &stcb->sctp_socket->so_rcv, control->end_added,
+							  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+#if defined(__Userspace__)
+				} else {
+					sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, inp_read_lock_held);
+#endif
+				}
+				sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+				if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
+					/* Switch to the new guy and continue */
+					control = nc;
+					goto restart;
+				} else {
+					if (nc->on_strm_q == 0) {
+						sctp_free_a_readq(stcb, nc);
+					}
+				}
+				return (1);
+			} else {
+				sctp_free_a_readq(stcb, nc);
+			}
+		} else {
+			/* Can't add more */
+			break;
+		}
+	}
+	if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
+		strm->pd_api_started = 1;
+		control->pdapi_started = 1;
+		sctp_add_to_readq(stcb->sctp_ep, stcb, control,
+		                  &stcb->sctp_socket->so_rcv, control->end_added,
+		                  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+		return (0);
+	} else {
+		return (1);
+	}
+}
+
+static void
+sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
+                               struct sctp_association *asoc,
+                               struct sctp_queued_to_read *control,
+                               struct sctp_tmit_chunk *chk,
+                               int *abort_flag)
+{
+	struct sctp_tmit_chunk *at;
+	int inserted;
+	/*
+	 * Here we need to place the chunk into the control structure
+	 * sorted in the correct order.
+	 */
+	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+		/* Its the very first one. */
+		SCTPDBG(SCTP_DEBUG_XXX,
+			"chunk is a first fsn: %u becomes fsn_included\n",
+			chk->rec.data.fsn);
+		if (control->first_frag_seen) {
+			/*
+			 * In old un-ordered we can reassembly on
+			 * one control multiple messages. As long
+			 * as the next FIRST is greater then the old
+			 * first (TSN i.e. FSN wise)
+			 */
+			struct mbuf *tdata;
+			uint32_t tmp;
+
+			if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
+				/* Easy way the start of a new guy beyond the lowest */
+				goto place_chunk;
+			}
+			if ((chk->rec.data.fsn == control->fsn_included) ||
+			    (control->pdapi_started)) {
+				/* 
+				 * Ok this should not happen, if it does
+				 * we started the pd-api on the higher TSN (since
+				 * the equals part is a TSN failure it must be that).
+				 *
+				 * We are completly hosed in that case since I have
+				 * no way to recover. This really will only happen
+				 * if we can get more TSN's higher before the pd-api-point.
+				 */
+				sctp_abort_in_reasm(stcb, control, chk,
+						    abort_flag,
+						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
+
+				return;
+			}
+			/*
+			 * Ok we have two firsts and the one we just got
+			 * is smaller than the one we previously placed.. yuck!
+			 * We must swap them out.
+			 */
+			/* swap the mbufs */
+			tdata = control->data;
+			control->data = chk->data;
+			chk->data = tdata;
+			/* Save the lengths */
+			chk->send_size = control->length;
+			/* Recompute length of control and tail pointer */
+			sctp_setup_tail_pointer(control);
+			/* Fix the FSN included */
+			tmp = control->fsn_included;
+			control->fsn_included = chk->rec.data.fsn;
+			chk->rec.data.fsn = tmp;
+			/* Fix the TSN included */
+			tmp = control->sinfo_tsn;
+			control->sinfo_tsn = chk->rec.data.tsn;
+			chk->rec.data.tsn = tmp;
+			/* Fix the PPID included */
+			tmp = control->sinfo_ppid;
+			control->sinfo_ppid = chk->rec.data.ppid;
+			chk->rec.data.ppid = tmp;
+			/* Fix tail pointer */
+			goto place_chunk;
+		}
+		control->first_frag_seen = 1;
+		control->fsn_included = chk->rec.data.fsn;
+		control->top_fsn = chk->rec.data.fsn;
+		control->sinfo_tsn = chk->rec.data.tsn;
+		control->sinfo_ppid = chk->rec.data.ppid;
+		control->data = chk->data;
+		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
+		chk->data = NULL;
+		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+		sctp_setup_tail_pointer(control);
+		return;
+	}
+place_chunk:
+	inserted = 0;
+	TAILQ_FOREACH(at, &control->reasm, sctp_next) {
+		if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
+			/*
+			 * This one in queue is bigger than the new one, insert
+			 * the new one before at.
+			 */
+			asoc->size_on_reasm_queue += chk->send_size;
+			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+			inserted = 1;
+			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
+			break;
+		} else if (at->rec.data.fsn == chk->rec.data.fsn) {
+			/* 
+			 * They sent a duplicate fsn number. This
+			 * really should not happen since the FSN is
+			 * a TSN and it should have been dropped earlier.
+			 */
+			sctp_abort_in_reasm(stcb, control, chk,
+			                    abort_flag,
+			                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
+			return;
+		}
+
+	}
+	if (inserted == 0) {
+		/* Its at the end */
+		asoc->size_on_reasm_queue += chk->send_size;
+		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+		control->top_fsn = chk->rec.data.fsn;
+		TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
+	}
+}
+
+static int
+sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
+                         struct sctp_stream_in *strm, int inp_read_lock_held)
+{
+	/*
+	 * Given a stream, strm, see if any of
+	 * the SSN's on it that are fragmented
+	 * are ready to deliver. If so go ahead
+	 * and place them on the read queue. In
+	 * so placing if we have hit the end, then
+	 * we need to remove them from the stream's queue.
+	 */
+	struct sctp_queued_to_read *control, *nctl = NULL;
+	uint32_t next_to_del;
+	uint32_t pd_point;
+	int ret = 0;
+
+	if (stcb->sctp_socket) {
+		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
+			       stcb->sctp_ep->partial_delivery_point);
+	} else {
+		pd_point = stcb->sctp_ep->partial_delivery_point;
+	}
+	control = TAILQ_FIRST(&strm->uno_inqueue);
+
+	if ((control != NULL) &&
+	    (asoc->idata_supported == 0)) {
+		/* Special handling needed for "old" data format */
+		if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
+			goto done_un;
+		}
+	}
+	if (strm->pd_api_started) {
+		/* Can't add more */
+		return (0);
+	}
+	while (control) {
+		SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
+			control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
+		nctl = TAILQ_NEXT(control, next_instrm);
+		if (control->end_added) {
+			/* We just put the last bit on */
+			if (control->on_strm_q) {
+#ifdef INVARIANTS
+				if (control->on_strm_q != SCTP_ON_UNORDERED ) {
+					panic("Huh control: %p on_q: %d -- not unordered?",
+					      control, control->on_strm_q);
+				}
+#endif
+				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
+				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
+				control->on_strm_q = 0;
+			}
+			if (control->on_read_q == 0) {
+				sctp_add_to_readq(stcb->sctp_ep, stcb,
+						  control,
+						  &stcb->sctp_socket->so_rcv, control->end_added,
+						  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+			}
+		} else {
+			/* Can we do a PD-API for this un-ordered guy? */
+			if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
+				strm->pd_api_started = 1;
+				control->pdapi_started = 1;
+				sctp_add_to_readq(stcb->sctp_ep, stcb,
+						  control,
+						  &stcb->sctp_socket->so_rcv, control->end_added,
+						  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+
+				break;
+			}
+		}
+		control = nctl;
+	}
+done_un:
+	control = TAILQ_FIRST(&strm->inqueue);
+	if (strm->pd_api_started) {
+		/* Can't add more */
+		return (0);
+	}
+	if (control == NULL) {
+		return (ret);
+	}
+	if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
+		/* Ok the guy at the top was being partially delivered
+		 * completed, so we remove it. Note
+		 * the pd_api flag was taken off when the
+		 * chunk was merged on in sctp_queue_data_for_reasm below.
+		 */
+		nctl = TAILQ_NEXT(control, next_instrm);
+		SCTPDBG(SCTP_DEBUG_XXX,
+			"Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
+			control, control->end_added, control->mid,
+			control->top_fsn, control->fsn_included,
+			strm->last_mid_delivered);
+		if (control->end_added) {
+			if (control->on_strm_q) {
+#ifdef INVARIANTS
+				if (control->on_strm_q != SCTP_ON_ORDERED ) {
+					panic("Huh control: %p on_q: %d -- not ordered?",
+					      control, control->on_strm_q);
+				}
+#endif
+				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
+				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
+				control->on_strm_q = 0;
+			}
+			if (strm->pd_api_started && control->pdapi_started) {
+				control->pdapi_started = 0;
+				strm->pd_api_started = 0;
+			}
+			if (control->on_read_q == 0) {
+				sctp_add_to_readq(stcb->sctp_ep, stcb,
+						  control,
+						  &stcb->sctp_socket->so_rcv, control->end_added,
+						  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+			}
+			control = nctl;
+		}
+	}
+	if (strm->pd_api_started) {
+		/* Can't add more must have gotten an un-ordered above being partially delivered. */
+		return (0);
+	}
+deliver_more:
+	next_to_del = strm->last_mid_delivered + 1;
+	if (control) {
+		SCTPDBG(SCTP_DEBUG_XXX,
+			"Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
+			control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
+			next_to_del);
+		nctl = TAILQ_NEXT(control, next_instrm);
+		if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
+		    (control->first_frag_seen)) {
+			int done;
+
+			/* Ok we can deliver it onto the stream. */
+			if (control->end_added) {
+				/* We are done with it afterwards */
+				if (control->on_strm_q) {
+#ifdef INVARIANTS
+					if (control->on_strm_q != SCTP_ON_ORDERED ) {
+						panic("Huh control: %p on_q: %d -- not ordered?",
+						      control, control->on_strm_q);
+					}
+#endif
+					SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
+					TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
+					control->on_strm_q = 0;
+				}
+				ret++;
+			}
+			if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
+				/* A singleton now slipping through - mark it non-revokable too */
+				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
+			} else if (control->end_added == 0) {
+				/* Check if we can defer adding until its all there */
+				if ((control->length < pd_point) || (strm->pd_api_started)) {
+					/* Don't need it or cannot add more (one being delivered that way) */
+					goto out;
+				}
+			}
+			done = (control->end_added) && (control->last_frag_seen);
+			if (control->on_read_q == 0) {
+				sctp_add_to_readq(stcb->sctp_ep, stcb,
+						  control,
+						  &stcb->sctp_socket->so_rcv, control->end_added,
+						  inp_read_lock_held, SCTP_SO_NOT_LOCKED);
+			}
+			strm->last_mid_delivered = next_to_del;
+			if (done) {
+				control = nctl;
+				goto deliver_more;
+			} else {
+				/* We are now doing PD API */
+				strm->pd_api_started = 1;
+				control->pdapi_started = 1;
+			}
+		}
+	}
+out:
+	return (ret);
+}
+
+
+void
+sctp_add_chk_to_control(struct sctp_queued_to_read *control,
+			struct sctp_stream_in *strm,
+			struct sctp_tcb *stcb, struct sctp_association *asoc,
+			struct sctp_tmit_chunk *chk, int hold_rlock)
+{
+	/*
+	 * Given a control and a chunk, merge the
+	 * data from the chk onto the control and free
+	 * up the chunk resources.
+	 */
+	int i_locked = 0;
+
+	if (control->on_read_q && (hold_rlock == 0)) {
+		/*
+		 * Its being pd-api'd so we must
+		 * do some locks.
+		 */
+		SCTP_INP_READ_LOCK(stcb->sctp_ep);
+		i_locked = 1;
+	}
+	if (control->data == NULL) {
+		control->data = chk->data;
+		sctp_setup_tail_pointer(control);
+	} else {
+		sctp_add_to_tail_pointer(control, chk->data);
+	}
+	control->fsn_included = chk->rec.data.fsn;
+	asoc->size_on_reasm_queue -= chk->send_size;
+	sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+	sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
+	chk->data = NULL;
+	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+		control->first_frag_seen = 1;
+		control->sinfo_tsn = chk->rec.data.tsn;
+		control->sinfo_ppid = chk->rec.data.ppid;
+	}
+	if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+		/* Its complete */
+		if ((control->on_strm_q) && (control->on_read_q)) {
+			if (control->pdapi_started) {
+				control->pdapi_started = 0;
+				strm->pd_api_started = 0;
+			}
+			if (control->on_strm_q == SCTP_ON_UNORDERED) {
+				/* Unordered */
+				TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
+				control->on_strm_q = 0;
+			} else if (control->on_strm_q == SCTP_ON_ORDERED) {
+				/* Ordered */
+				TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
+				control->on_strm_q = 0;
+#ifdef INVARIANTS
+			} else if (control->on_strm_q) {
+				panic("Unknown state on ctrl: %p on_strm_q: %d", control,
+				      control->on_strm_q);
+#endif
+			}
+		}
+		control->end_added = 1;
+		control->last_frag_seen = 1;
+	}
+	if (i_locked) {
+		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+	}
+	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+}
+
+/*
+ * Dump onto the re-assembly queue, in its proper place. After dumping on the
+ * queue, see if anthing can be delivered. If so pull it off (or as much as
+ * we can. If we run out of space then we must dump what we can and set the
+ * appropriate flag to say we queued what we could.
+ */
+static void
+sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
+			  struct sctp_queued_to_read *control,
+			  struct sctp_tmit_chunk *chk,
+			  int created_control,
+			  int *abort_flag, uint32_t tsn)
+{
+	uint32_t next_fsn;
+	struct sctp_tmit_chunk *at, *nat;
+	struct sctp_stream_in *strm;
+	int do_wakeup, unordered;
+
+	strm = &asoc->strmin[control->sinfo_stream];
+	/*
+	 * For old un-ordered data chunks.
+	 */
+	if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
+		unordered = 1;
+	} else {
+		unordered = 0;
+	}
+	/* Must be added to the stream-in queue */
+	if (created_control) {
+		if (sctp_place_control_in_stream(strm, asoc, control)) {
+			/* Duplicate SSN? */
+			sctp_clean_up_control(stcb, control);
+			sctp_abort_in_reasm(stcb, control, chk,
+					    abort_flag,
+					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
+			return;
+		}
+		if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
+			/* Ok we created this control and now
+			 * lets validate that its legal i.e. there
+			 * is a B bit set, if not and we have
+			 * up to the cum-ack then its invalid.
+			 */
+			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
+				sctp_abort_in_reasm(stcb, control, chk,
+				                    abort_flag,
+				                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
+				return;
+			}
+		}
+	}
+	if ((asoc->idata_supported == 0) && (unordered == 1)) {
+		sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
+		return;
+	}
+	/*
+	 * Ok we must queue the chunk into the reasembly portion:
+	 *  o if its the first it goes to the control mbuf.
+	 *  o if its not first but the next in sequence it goes to the control,
+	 *    and each succeeding one in order also goes.
+	 *  o if its not in order we place it on the list in its place.
+	 */
+	if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+		/* Its the very first one. */
+		SCTPDBG(SCTP_DEBUG_XXX,
+			"chunk is a first fsn: %u becomes fsn_included\n",
+			chk->rec.data.fsn);
+		if (control->first_frag_seen) {
+			/*
+			 * Error on senders part, they either
+			 * sent us two data chunks with FIRST,
+			 * or they sent two un-ordered chunks that
+			 * were fragmented at the same time in the same stream.
+			 */
+			sctp_abort_in_reasm(stcb, control, chk,
+			                    abort_flag,
+			                    SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
+			return;
+		}
+		control->first_frag_seen = 1;
+		control->sinfo_ppid = chk->rec.data.ppid;
+		control->sinfo_tsn = chk->rec.data.tsn;
+		control->fsn_included = chk->rec.data.fsn;
+		control->data = chk->data;
+		sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
+		chk->data = NULL;
+		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+		sctp_setup_tail_pointer(control);
+	} else {
+		/* Place the chunk in our list */
+		int inserted=0;
+		if (control->last_frag_seen == 0) {
+			/* Still willing to raise highest FSN seen */
+			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
+				SCTPDBG(SCTP_DEBUG_XXX,
+					"We have a new top_fsn: %u\n",
+					chk->rec.data.fsn);
+				control->top_fsn = chk->rec.data.fsn;
+			}
+			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+				SCTPDBG(SCTP_DEBUG_XXX,
+					"The last fsn is now in place fsn: %u\n",
+					chk->rec.data.fsn);
+				control->last_frag_seen = 1;
+			}
+			if (asoc->idata_supported || control->first_frag_seen) {
+				/* 
+				 * For IDATA we always check since we know that
+				 * the first fragment is 0. For old DATA we have
+				 * to receive the first before we know the first FSN
+				 * (which is the TSN).
+				 */
+				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
+					/* We have already delivered up to this so its a dup */
+					sctp_abort_in_reasm(stcb, control, chk,
+							    abort_flag,
+							    SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
+					return;
+				}
+			}
+		} else {
+			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+				/* Second last? huh? */
+				SCTPDBG(SCTP_DEBUG_XXX,
+					"Duplicate last fsn: %u (top: %u) -- abort\n",
+					chk->rec.data.fsn, control->top_fsn);
+				sctp_abort_in_reasm(stcb, control,
+						    chk, abort_flag,
+						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
+				return;
+			}
+			if (asoc->idata_supported || control->first_frag_seen) {
+				/* 
+				 * For IDATA we always check since we know that
+				 * the first fragment is 0. For old DATA we have
+				 * to receive the first before we know the first FSN
+				 * (which is the TSN).
+				 */
+
+				if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
+					/* We have already delivered up to this so its a dup */
+					SCTPDBG(SCTP_DEBUG_XXX,
+						"New fsn: %u is already seen in included_fsn: %u -- abort\n",
+						chk->rec.data.fsn, control->fsn_included);
+					sctp_abort_in_reasm(stcb, control, chk,
+							    abort_flag,
+							    SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
+					return;
+				}
+			}
+			/* validate not beyond top FSN if we have seen last one */
+			if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
+				SCTPDBG(SCTP_DEBUG_XXX,
+					"New fsn: %u is beyond or at top_fsn: %u -- abort\n",
+					chk->rec.data.fsn,
+					control->top_fsn);
+				sctp_abort_in_reasm(stcb, control, chk,
+						    abort_flag,
+						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
+				return;
+			}
+		}
+		/*
+		 * If we reach here, we need to place the
+		 * new chunk in the reassembly for this 
+		 * control.
+		 */
+		SCTPDBG(SCTP_DEBUG_XXX,
+			"chunk is a not first fsn: %u needs to be inserted\n",
+			chk->rec.data.fsn);
+		TAILQ_FOREACH(at, &control->reasm, sctp_next) {
+			if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
+				/*
+				 * This one in queue is bigger than the new one, insert
+				 * the new one before at.
+				 */
+				SCTPDBG(SCTP_DEBUG_XXX,
+					"Insert it before fsn: %u\n",
+					at->rec.data.fsn);
+				asoc->size_on_reasm_queue += chk->send_size;
+				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+				TAILQ_INSERT_BEFORE(at, chk, sctp_next);
+				inserted = 1;
+				break;
+			} else if (at->rec.data.fsn == chk->rec.data.fsn) {
+				/* Gak, He sent me a duplicate str seq number */
+				/*
+				 * foo bar, I guess I will just free this new guy,
+				 * should we abort too? FIX ME MAYBE? Or it COULD be
+				 * that the SSN's have wrapped. Maybe I should
+				 * compare to TSN somehow... sigh for now just blow
+				 * away the chunk!
+				 */
+				SCTPDBG(SCTP_DEBUG_XXX,
+					"Duplicate to fsn: %u -- abort\n",
+					at->rec.data.fsn);
+				sctp_abort_in_reasm(stcb, control,
+						    chk, abort_flag,
+						    SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
+				return;
+			}
+		}
+		if (inserted == 0) {
+			/* Goes on the end */
+			SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
+				chk->rec.data.fsn);
+			asoc->size_on_reasm_queue += chk->send_size;
+			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
+			TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
+		}
+	}
+	/*
+	 * Ok lets see if we can suck any up into the control
+	 * structure that are in seq if it makes sense.
+	 */
+	do_wakeup = 0;
+	/*
+	 * If the first fragment has not been
+	 * seen there is no sense in looking.
+	 */
+	if (control->first_frag_seen) {
+		next_fsn = control->fsn_included + 1;
+		TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
+			if (at->rec.data.fsn == next_fsn) {
+				/* We can add this one now to the control */
+				SCTPDBG(SCTP_DEBUG_XXX,
+					"Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
+					control, at,
+					at->rec.data.fsn,
+					next_fsn, control->fsn_included);
+				TAILQ_REMOVE(&control->reasm, at, sctp_next);
+				sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
+				if (control->on_read_q) {
+					do_wakeup = 1;
+				}
+				next_fsn++;
+				if (control->end_added && control->pdapi_started) {
+					if (strm->pd_api_started) {
+						strm->pd_api_started = 0;
+						control->pdapi_started = 0;
+					}
+					if (control->on_read_q == 0) {
+						sctp_add_to_readq(stcb->sctp_ep, stcb,
+								  control,
+								  &stcb->sctp_socket->so_rcv, control->end_added,
+								  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+						do_wakeup = 1;
+					}
+					break;
+				}
+			} else {
+				break;
+			}
+		}
+	}
+	if (do_wakeup) {
+#if defined(__Userspace__)
+		sctp_invoke_recv_callback(stcb->sctp_ep, stcb, control, SCTP_READ_LOCK_NOT_HELD);
+#endif
+		/* Need to wakeup the reader */
+		sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+	}
+}
+
+static struct sctp_queued_to_read *
+sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
+{
+	struct sctp_queued_to_read *control;
+
+	if (ordered) {
+		TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
+			if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
+				break;
+			}
+		}
+	} else {
+		if (idata_supported) {
+			TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
+				if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
+					break;
+				}
+			}
+		} else {
+			control = TAILQ_FIRST(&strm->uno_inqueue);
+		}
+	}
+	return (control);
+}
+
+static int
+sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
+			  struct mbuf **m, int offset,  int chk_length,
+			  struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
+			  int *break_flag, int last_chunk, uint8_t chk_type)
+{
+	/* Process a data chunk */
+	/* struct sctp_tmit_chunk *chk; */
+	struct sctp_tmit_chunk *chk = NULL;
+	uint32_t tsn, fsn, gap, mid;
+	struct mbuf *dmbuf;
+	int the_len;
+	int need_reasm_check = 0;
+	uint16_t sid;
+	struct mbuf *op_err;
+	char msg[SCTP_DIAG_INFO_LEN];
+	struct sctp_queued_to_read *control = NULL;
+	uint32_t ppid;
+	uint8_t chk_flags;
+	struct sctp_stream_reset_list *liste;
+	int ordered;
+	size_t clen;
+	int created_control = 0;
+
+	if (chk_type == SCTP_IDATA) {
+		struct sctp_idata_chunk *chunk, chunk_buf;
+
+		chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
+		                                                 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
+		chk_flags = chunk->ch.chunk_flags;
+		clen = sizeof(struct sctp_idata_chunk);
+		tsn = ntohl(chunk->dp.tsn);
+		sid = ntohs(chunk->dp.sid);
+		mid = ntohl(chunk->dp.mid);
+		if (chk_flags & SCTP_DATA_FIRST_FRAG) {
+			fsn = 0;
+			ppid = chunk->dp.ppid_fsn.ppid;
+		} else {
+			fsn = ntohl(chunk->dp.ppid_fsn.fsn);
+			ppid = 0xffffffff; /* Use as an invalid value. */
+		}
+	} else {
+		struct sctp_data_chunk *chunk, chunk_buf;
+
+		chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
+		                                                sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
+		chk_flags = chunk->ch.chunk_flags;
+		clen = sizeof(struct sctp_data_chunk);
+		tsn = ntohl(chunk->dp.tsn);
+		sid = ntohs(chunk->dp.sid);
+		mid = (uint32_t)(ntohs(chunk->dp.ssn));
+		fsn = tsn;
+		ppid = chunk->dp.ppid;
+	}
+	if ((size_t)chk_length == clen) {
+		/*
+		 * Need to send an abort since we had a
+		 * empty data chunk.
+		 */
+		op_err = sctp_generate_no_user_data_cause(tsn);
+		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
+		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+		*abort_flag = 1;
+		return (0);
+	}
+	if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
+		asoc->send_sack = 1;
+	}
+	ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
+	}
+	if (stcb == NULL) {
+		return (0);
+	}
+	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
+	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
+		/* It is a duplicate */
+		SCTP_STAT_INCR(sctps_recvdupdata);
+		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
+			/* Record a dup for the next outbound sack */
+			asoc->dup_tsns[asoc->numduptsns] = tsn;
+			asoc->numduptsns++;
+		}
+		asoc->send_sack = 1;
+		return (0);
+	}
+	/* Calculate the number of TSN's between the base and this TSN */
+	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
+	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
+		/* Can't hold the bit in the mapping at max array, toss it */
+		return (0);
+	}
+	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
+		SCTP_TCB_LOCK_ASSERT(stcb);
+		if (sctp_expand_mapping_array(asoc, gap)) {
+			/* Can't expand, drop it */
+			return (0);
+		}
+	}
+	if (SCTP_TSN_GT(tsn, *high_tsn)) {
+		*high_tsn = tsn;
+	}
+	/* See if we have received this one already */
+	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
+	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
+		SCTP_STAT_INCR(sctps_recvdupdata);
+		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
+			/* Record a dup for the next outbound sack */
+			asoc->dup_tsns[asoc->numduptsns] = tsn;
+			asoc->numduptsns++;
+		}
+		asoc->send_sack = 1;
+		return (0);
+	}
+	/*
+	 * Check to see about the GONE flag, duplicates would cause a sack
+	 * to be sent up above
+	 */
+	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+	     (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
+		/*
+		 * wait a minute, this guy is gone, there is no longer a
+		 * receiver. Send peer an ABORT!
+		 */
+		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
+		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+		*abort_flag = 1;
+		return (0);
+	}
+	/*
+	 * Now before going further we see if there is room. If NOT then we
+	 * MAY let one through only IF this TSN is the one we are waiting
+	 * for on a partial delivery API.
+	 */
+
+	/* Is the stream valid? */
+	if (sid >= asoc->streamincnt) {
+		struct sctp_error_invalid_stream *cause;
+
+		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
+		                               0, M_NOWAIT, 1, MT_DATA);
+		if (op_err != NULL) {
+			/* add some space up front so prepend will work well */
+			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
+			cause = mtod(op_err, struct sctp_error_invalid_stream *);
+			/*
+			 * Error causes are just param's and this one has
+			 * two back to back phdr, one with the error type
+			 * and size, the other with the streamid and a rsvd
+			 */
+			SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
+			cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
+			cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
+			cause->stream_id = htons(sid);
+			cause->reserved = htons(0);
+			sctp_queue_op_err(stcb, op_err);
+		}
+		SCTP_STAT_INCR(sctps_badsid);
+		SCTP_TCB_LOCK_ASSERT(stcb);
+		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
+			asoc->highest_tsn_inside_nr_map = tsn;
+		}
+		if (tsn == (asoc->cumulative_tsn + 1)) {
+			/* Update cum-ack */
+			asoc->cumulative_tsn = tsn;
+		}
+		return (0);
+	}
+	/*
+	 * If its a fragmented message, lets see if we can
+	 * find the control on the reassembly queues.
+	 */
+	if ((chk_type == SCTP_IDATA) &&
+	    ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
+	    (fsn == 0)) {
+		/* 
+		 *  The first *must* be fsn 0, and other 
+		 *  (middle/end) pieces can *not* be fsn 0.
+		 * XXX: This can happen in case of a wrap around.
+		 *      Ignore is for now.
+		 */
+		snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x",
+		         mid, chk_flags);
+		goto err_out;
+	}
+	control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
+	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
+		chk_flags, control);
+	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
+		/* See if we can find the re-assembly entity */
+		if (control != NULL) {
+			/* We found something, does it belong? */
+			if (ordered && (mid != control->mid)) {
+				snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
+			err_out:
+				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
+				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+				*abort_flag = 1;
+				return (0);
+			}
+			if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
+				/* We can't have a switched order with an unordered chunk */
+				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
+					 tsn);
+				goto err_out;
+			}
+			if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
+				/* We can't have a switched unordered with a ordered chunk */
+				snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
+					 tsn);
+				goto err_out;
+			}
+		}
+	} else {
+		/* Its a complete segment. Lets validate we
+		 * don't have a re-assembly going on with
+		 * the same Stream/Seq (for ordered) or in
+		 * the same Stream for unordered.
+		 */
+		if (control != NULL) {
+			if (ordered || asoc->idata_supported) {
+				SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
+					chk_flags, mid);
+				snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
+				goto err_out;
+			} else {
+				if ((tsn == control->fsn_included + 1) &&
+				    (control->end_added == 0)) {
+					snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included);
+					goto err_out;
+				} else {
+					control = NULL;
+				}
+			}
+		}
+	}
+	/* now do the tests */
+	if (((asoc->cnt_on_all_streams +
+	      asoc->cnt_on_reasm_queue +
+	      asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
+	    (((int)asoc->my_rwnd) <= 0)) {
+		/*
+		 * When we have NO room in the rwnd we check to make sure
+		 * the reader is doing its job...
+		 */
+		if (stcb->sctp_socket->so_rcv.sb_cc) {
+			/* some to read, wake-up */
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+			struct socket *so;
+
+			so = SCTP_INP_SO(stcb->sctp_ep);
+			atomic_add_int(&stcb->asoc.refcnt, 1);
+			SCTP_TCB_UNLOCK(stcb);
+			SCTP_SOCKET_LOCK(so, 1);
+			SCTP_TCB_LOCK(stcb);
+			atomic_subtract_int(&stcb->asoc.refcnt, 1);
+			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+				/* assoc was freed while we were unlocked */
+				SCTP_SOCKET_UNLOCK(so, 1);
+				return (0);
+			}
+#endif
+			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+			SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+		}
+		/* now is it in the mapping array of what we have accepted? */
+		if (chk_type == SCTP_DATA) {
+			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
+			    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
+				/* Nope not in the valid range dump it */
+			dump_packet:
+				sctp_set_rwnd(stcb, asoc);
+				if ((asoc->cnt_on_all_streams +
+				     asoc->cnt_on_reasm_queue +
+				     asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
+					SCTP_STAT_INCR(sctps_datadropchklmt);
+				} else {
+					SCTP_STAT_INCR(sctps_datadroprwnd);
+				}
+				*break_flag = 1;
+				return (0);
+			}
+		} else {
+			if (control == NULL) {
+				goto dump_packet;
+			}
+			if (SCTP_TSN_GT(fsn, control->top_fsn)) {
+				goto dump_packet;
+			}
+		}
+	}
+#ifdef SCTP_ASOCLOG_OF_TSNS
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
+		asoc->tsn_in_at = 0;
+		asoc->tsn_in_wrapped = 1;
+	}
+	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
+	asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
+	asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
+	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
+	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
+	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
+	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
+	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
+	asoc->tsn_in_at++;
+#endif
+	/*
+	 * Before we continue lets validate that we are not being fooled by
+	 * an evil attacker. We can only have Nk chunks based on our TSN
+	 * spread allowed by the mapping array N * 8 bits, so there is no
+	 * way our stream sequence numbers could have wrapped. We of course
+	 * only validate the FIRST fragment so the bit must be set.
+	 */
+	if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
+	    (TAILQ_EMPTY(&asoc->resetHead)) &&
+	    (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
+	    SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
+		/* The incoming sseq is behind where we last delivered? */
+		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
+			mid, asoc->strmin[sid].last_mid_delivered);
+
+		if (asoc->idata_supported) {
+			snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
+			         asoc->strmin[sid].last_mid_delivered,
+			         tsn,
+			         sid,
+			         mid);
+		} else {
+			snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
+			         (uint16_t)asoc->strmin[sid].last_mid_delivered,
+			         tsn,
+			         sid,
+			         (uint16_t)mid);
+		}
+		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
+		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+		*abort_flag = 1;
+		return (0);
+	}
+	if (chk_type == SCTP_IDATA) {
+		the_len = (chk_length - sizeof(struct sctp_idata_chunk));
+	} else {
+		the_len = (chk_length - sizeof(struct sctp_data_chunk));
+	}
+	if (last_chunk == 0) {
+		if (chk_type == SCTP_IDATA) {
+			dmbuf = SCTP_M_COPYM(*m,
+					     (offset + sizeof(struct sctp_idata_chunk)),
+					     the_len, M_NOWAIT);
+		} else {
+			dmbuf = SCTP_M_COPYM(*m,
+					     (offset + sizeof(struct sctp_data_chunk)),
+					     the_len, M_NOWAIT);
+		}
+#ifdef SCTP_MBUF_LOGGING
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
+		}
+#endif
+	} else {
+		/* We can steal the last chunk */
+		int l_len;
+		dmbuf = *m;
+		/* lop off the top part */
+		if (chk_type == SCTP_IDATA) {
+			m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
+		} else {
+			m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
+		}
+		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
+			l_len = SCTP_BUF_LEN(dmbuf);
+		} else {
+			/* need to count up the size hopefully
+			 * does not hit this to often :-0
+			 */
+			struct mbuf *lat;
+
+			l_len = 0;
+			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
+				l_len += SCTP_BUF_LEN(lat);
+			}
+		}
+		if (l_len > the_len) {
+			/* Trim the end round bytes off  too */
+			m_adj(dmbuf, -(l_len - the_len));
+		}
+	}
+	if (dmbuf == NULL) {
+		SCTP_STAT_INCR(sctps_nomem);
+		return (0);
+	}
+	/*
+	 * Now no matter what, we need a control, get one
+	 * if we don't have one (we may have gotten it
+	 * above when we found the message was fragmented
+	 */
+	if (control == NULL) {
+		sctp_alloc_a_readq(stcb, control);
+		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
+					   ppid,
+					   sid,
+					   chk_flags,
+					   NULL, fsn, mid);
+		if (control == NULL) {
+			SCTP_STAT_INCR(sctps_nomem);
+			return (0);
+		}
+		if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
+			control->data = dmbuf;
+			control->tail_mbuf = NULL;
+			control->end_added = 1;
+			control->last_frag_seen = 1;
+			control->first_frag_seen = 1;
+			control->fsn_included = fsn;
+			control->top_fsn = fsn;
+		}
+		created_control = 1;
+	}
+	SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
+		chk_flags, ordered, mid, control);
+	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
+	    TAILQ_EMPTY(&asoc->resetHead) &&
+	    ((ordered == 0) ||
+	     (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
+	      TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
+		/* Candidate for express delivery */
+		/*
+		 * Its not fragmented, No PD-API is up, Nothing in the
+		 * delivery queue, Its un-ordered OR ordered and the next to
+		 * deliver AND nothing else is stuck on the stream queue,
+		 * And there is room for it in the socket buffer. Lets just
+		 * stuff it up the buffer....
+		 */
+		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
+			asoc->highest_tsn_inside_nr_map = tsn;
+		}
+		SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
+			control, mid);
+
+		sctp_add_to_readq(stcb->sctp_ep, stcb,
+		                  control, &stcb->sctp_socket->so_rcv,
+		                  1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+
+		if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
+			/* for ordered, bump what we delivered */
+			asoc->strmin[sid].last_mid_delivered++;
+		}
+		SCTP_STAT_INCR(sctps_recvexpress);
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+			sctp_log_strm_del_alt(stcb, tsn, mid, sid,
+					      SCTP_STR_LOG_FROM_EXPRS_DEL);
+		}
+		control = NULL;
+		goto finish_express_del;
+	}
+
+	/* Now will we need a chunk too? */
+	if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
+		sctp_alloc_a_chunk(stcb, chk);
+		if (chk == NULL) {
+			/* No memory so we drop the chunk */
+			SCTP_STAT_INCR(sctps_nomem);
+			if (last_chunk == 0) {
+				/* we copied it, free the copy */
+				sctp_m_freem(dmbuf);
+			}
+			return (0);
+		}
+		chk->rec.data.tsn = tsn;
+		chk->no_fr_allowed = 0;
+		chk->rec.data.fsn = fsn;
+		chk->rec.data.mid = mid;
+		chk->rec.data.sid = sid;
+		chk->rec.data.ppid = ppid;
+		chk->rec.data.context = stcb->asoc.context;
+		chk->rec.data.doing_fast_retransmit = 0;
+		chk->rec.data.rcv_flags = chk_flags;
+		chk->asoc = asoc;
+		chk->send_size = the_len;
+		chk->whoTo = net;
+		SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
+			chk,
+			control, mid);
+		atomic_add_int(&net->ref_count, 1);
+		chk->data = dmbuf;
+	}
+	/* Set the appropriate TSN mark */
+	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
+		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
+		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
+			asoc->highest_tsn_inside_nr_map = tsn;
+		}
+	} else {
+		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
+		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
+			asoc->highest_tsn_inside_map = tsn;
+		}
+	}
+	/* Now is it complete (i.e. not fragmented)? */
+	if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
+		/*
+		 * Special check for when streams are resetting. We
+		 * could be more smart about this and check the
+		 * actual stream to see if it is not being reset..
+		 * that way we would not create a HOLB when amongst
+		 * streams being reset and those not being reset.
+		 *
+		 */
+		if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
+		    SCTP_TSN_GT(tsn, liste->tsn)) {
+			/*
+			 * yep its past where we need to reset... go
+			 * ahead and queue it.
+			 */
+			if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
+				/* first one on */
+				TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
+			} else {
+				struct sctp_queued_to_read *ctlOn, *nctlOn;
+				unsigned char inserted = 0;
+				TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
+					if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
+
+						continue;
+					} else {
+						/* found it */
+						TAILQ_INSERT_BEFORE(ctlOn, control, next);
+						inserted = 1;
+						break;
+					}
+				}
+				if (inserted == 0) {
+					/*
+					 * must be put at end, use
+					 * prevP (all setup from
+					 * loop) to setup nextP.
+					 */
+					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
+				}
+			}
+			goto finish_express_del;
+		}
+		if (chk_flags & SCTP_DATA_UNORDERED) {
+			/* queue directly into socket buffer */
+			SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
+				control, mid);
+			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
+			sctp_add_to_readq(stcb->sctp_ep, stcb,
+			                  control,
+			                  &stcb->sctp_socket->so_rcv, 1,
+			                  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+
+		} else {
+			SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
+				mid);
+			sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
+			if (*abort_flag) {
+				if (last_chunk) {
+					*m = NULL;
+				}
+				return (0);
+			}
+		}
+		goto finish_express_del;
+	}
+	/* If we reach here its a reassembly */
+	need_reasm_check = 1;
+	SCTPDBG(SCTP_DEBUG_XXX,
+		"Queue data to stream for reasm control: %p MID: %u\n",
+		control, mid);
+	sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
+	if (*abort_flag) {
+		/*
+		 * the assoc is now gone and chk was put onto the
+		 * reasm queue, which has all been freed.
+		 */
+		if (last_chunk) {
+			*m = NULL;
+		}
+		return (0);
+	}
+finish_express_del:
+	/* Here we tidy up things */
+	if (tsn == (asoc->cumulative_tsn + 1)) {
+		/* Update cum-ack */
+		asoc->cumulative_tsn = tsn;
+	}
+	if (last_chunk) {
+		*m = NULL;
+	}
+	if (ordered) {
+		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
+	} else {
+		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
+	}
+	SCTP_STAT_INCR(sctps_recvdata);
+	/* Set it present please */
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
+		sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
+	}
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
+			     asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
+	}
+	if (need_reasm_check) {
+		(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
+		need_reasm_check = 0;
+	}
+	/* check the special flag for stream resets */
+	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
+	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
+		/*
+		 * we have finished working through the backlogged TSN's now
+		 * time to reset streams. 1: call reset function. 2: free
+		 * pending_reply space 3: distribute any chunks in
+		 * pending_reply_queue.
+		 */
+		struct sctp_queued_to_read *ctl, *nctl;
+
+		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
+		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
+		sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
+		SCTP_FREE(liste, SCTP_M_STRESET);
+		/*sa_ignore FREED_MEMORY*/
+		liste = TAILQ_FIRST(&asoc->resetHead);
+		if (TAILQ_EMPTY(&asoc->resetHead)) {
+			/* All can be removed */
+			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
+				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
+				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag, &need_reasm_check);
+				if (*abort_flag) {
+					return (0);
+				}
+				if (need_reasm_check) {
+					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[ctl->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
+					need_reasm_check = 0;
+				}
+			}
+		} else {
+			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
+				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
+					break;
+				}
+				/*
+				 * if ctl->sinfo_tsn is <= liste->tsn we can
+				 * process it which is the NOT of
+				 * ctl->sinfo_tsn > liste->tsn
+				 */
+				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
+				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag, &need_reasm_check);
+				if (*abort_flag) {
+					return (0);
+				}
+				if (need_reasm_check) {
+					(void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[ctl->sinfo_stream], SCTP_READ_LOCK_NOT_HELD);
+					need_reasm_check = 0;
+				}
+			}
+		}
+	}
+	return (1);
+}
+
+static const int8_t sctp_map_lookup_tab[256] = {
+  0, 1, 0, 2, 0, 1, 0, 3,
+  0, 1, 0, 2, 0, 1, 0, 4,
+  0, 1, 0, 2, 0, 1, 0, 3,
+  0, 1, 0, 2, 0, 1, 0, 5,
+  0, 1, 0, 2, 0, 1, 0, 3,
+  0, 1, 0, 2, 0, 1, 0, 4,
+  0, 1, 0, 2, 0, 1, 0, 3,
+  0, 1, 0, 2, 0, 1, 0, 6,
+  0, 1, 0, 2, 0, 1, 0, 3,
+  0, 1, 0, 2, 0, 1, 0, 4,
+  0, 1, 0, 2, 0, 1, 0, 3,
+  0, 1, 0, 2, 0, 1, 0, 5,
+  0, 1, 0, 2, 0, 1, 0, 3,
+  0, 1, 0, 2, 0, 1, 0, 4,
+  0, 1, 0, 2, 0, 1, 0, 3,
+  0, 1, 0, 2, 0, 1, 0, 7,
+  0, 1, 0, 2, 0, 1, 0, 3,
+  0, 1, 0, 2, 0, 1, 0, 4,
+  0, 1, 0, 2, 0, 1, 0, 3,
+  0, 1, 0, 2, 0, 1, 0, 5,
+  0, 1, 0, 2, 0, 1, 0, 3,
+  0, 1, 0, 2, 0, 1, 0, 4,
+  0, 1, 0, 2, 0, 1, 0, 3,
+  0, 1, 0, 2, 0, 1, 0, 6,
+  0, 1, 0, 2, 0, 1, 0, 3,
+  0, 1, 0, 2, 0, 1, 0, 4,
+  0, 1, 0, 2, 0, 1, 0, 3,
+  0, 1, 0, 2, 0, 1, 0, 5,
+  0, 1, 0, 2, 0, 1, 0, 3,
+  0, 1, 0, 2, 0, 1, 0, 4,
+  0, 1, 0, 2, 0, 1, 0, 3,
+  0, 1, 0, 2, 0, 1, 0, 8
+};
+
+
+void
+sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
+{
+	/*
+	 * Now we also need to check the mapping array in a couple of ways.
+	 * 1) Did we move the cum-ack point?
+	 *
+	 * When you first glance at this you might think
+	 * that all entries that make up the position
+	 * of the cum-ack would be in the nr-mapping array
+	 * only.. i.e. things up to the cum-ack are always
+	 * deliverable. Thats true with one exception, when
+	 * its a fragmented message we may not deliver the data
+	 * until some threshold (or all of it) is in place. So
+	 * we must OR the nr_mapping_array and mapping_array to
+	 * get a true picture of the cum-ack.
+	 */
+	struct sctp_association *asoc;
+	int at;
+	uint8_t val;
+	int slide_from, slide_end, lgap, distance;
+	uint32_t old_cumack, old_base, old_highest, highest_tsn;
+
+	asoc = &stcb->asoc;
+
+	old_cumack = asoc->cumulative_tsn;
+	old_base = asoc->mapping_array_base_tsn;
+	old_highest = asoc->highest_tsn_inside_map;
+	/*
+	 * We could probably improve this a small bit by calculating the
+	 * offset of the current cum-ack as the starting point.
+	 */
+	at = 0;
+	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
+		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
+		if (val == 0xff) {
+			at += 8;
+		} else {
+			/* there is a 0 bit */
+			at += sctp_map_lookup_tab[val];
+			break;
+		}
+	}
+	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at-1);
+
+	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
+            SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
+#ifdef INVARIANTS
+		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
+		      asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
+#else
+		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
+			    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
+		sctp_print_mapping_array(asoc);
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+		}
+		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
+		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
+#endif
+	}
+	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
+		highest_tsn = asoc->highest_tsn_inside_nr_map;
+	} else {
+		highest_tsn = asoc->highest_tsn_inside_map;
+	}
+	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
+		/* The complete array was completed by a single FR */
+		/* highest becomes the cum-ack */
+		int clr;
+#ifdef INVARIANTS
+		unsigned int i;
+#endif
+
+		/* clear the array */
+		clr = ((at+7) >> 3);
+		if (clr > asoc->mapping_array_size) {
+			clr = asoc->mapping_array_size;
+		}
+		memset(asoc->mapping_array, 0, clr);
+		memset(asoc->nr_mapping_array, 0, clr);
+#ifdef INVARIANTS
+		for (i = 0; i < asoc->mapping_array_size; i++) {
+			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
+				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
+				sctp_print_mapping_array(asoc);
+			}
+		}
+#endif
+		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
+		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
+	} else if (at >= 8) {
+		/* we can slide the mapping array down */
+		/* slide_from holds where we hit the first NON 0xff byte */
+
+		/*
+		 * now calculate the ceiling of the move using our highest
+		 * TSN value
+		 */
+		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
+		slide_end = (lgap >> 3);
+		if (slide_end < slide_from) {
+			sctp_print_mapping_array(asoc);
+#ifdef INVARIANTS
+			panic("impossible slide");
+#else
+			SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
+			            lgap, slide_end, slide_from, at);
+			return;
+#endif
+		}
+		if (slide_end > asoc->mapping_array_size) {
+#ifdef INVARIANTS
+			panic("would overrun buffer");
+#else
+			SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
+			            asoc->mapping_array_size, slide_end);
+			slide_end = asoc->mapping_array_size;
+#endif
+		}
+		distance = (slide_end - slide_from) + 1;
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+			sctp_log_map(old_base, old_cumack, old_highest,
+				     SCTP_MAP_PREPARE_SLIDE);
+			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
+				     (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
+		}
+		if (distance + slide_from > asoc->mapping_array_size ||
+		    distance < 0) {
+			/*
+			 * Here we do NOT slide forward the array so that
+			 * hopefully when more data comes in to fill it up
+			 * we will be able to slide it forward. Really I
+			 * don't think this should happen :-0
+			 */
+
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
+					     (uint32_t) asoc->mapping_array_size,
+					     SCTP_MAP_SLIDE_NONE);
+			}
+		} else {
+			int ii;
+
+			for (ii = 0; ii < distance; ii++) {
+				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
+				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
+
+			}
+			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
+				asoc->mapping_array[ii] = 0;
+				asoc->nr_mapping_array[ii] = 0;
+			}
+			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
+				asoc->highest_tsn_inside_map += (slide_from << 3);
+			}
+			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
+				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
+			}
+			asoc->mapping_array_base_tsn += (slide_from << 3);
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+				sctp_log_map(asoc->mapping_array_base_tsn,
+					     asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
+					     SCTP_MAP_SLIDE_RESULT);
+			}
+		}
+	}
+}
+
+void
+sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
+{
+	struct sctp_association *asoc;
+	uint32_t highest_tsn;
+	int is_a_gap;
+
+	sctp_slide_mapping_arrays(stcb);
+	asoc = &stcb->asoc;
+	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
+		highest_tsn = asoc->highest_tsn_inside_nr_map;
+	} else {
+		highest_tsn = asoc->highest_tsn_inside_map;
+	}
+	/* Is there a gap now? */
+	is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
+
+	/*
+	 * Now we need to see if we need to queue a sack or just start the
+	 * timer (if allowed).
+	 */
+	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
+		/*
+		 * Ok special case, in SHUTDOWN-SENT case. here we
+		 * maker sure SACK timer is off and instead send a
+		 * SHUTDOWN and a SACK
+		 */
+		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
+			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+			                stcb->sctp_ep, stcb, NULL,
+			                SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
+		}
+		sctp_send_shutdown(stcb,
+		                   ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
+		if (is_a_gap) {
+			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
+		}
+	} else {
+		/*
+		 * CMT DAC algorithm: increase number of packets
+		 * received since last ack
+		 */
+		stcb->asoc.cmt_dac_pkts_rcvd++;
+
+		if ((stcb->asoc.send_sack == 1) ||      /* We need to send a SACK */
+		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
+		                                         * longer is one */
+		    (stcb->asoc.numduptsns) ||          /* we have dup's */
+		    (is_a_gap) ||                       /* is still a gap */
+		    (stcb->asoc.delayed_ack == 0) ||    /* Delayed sack disabled */
+		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
+			) {
+
+			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
+			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
+			    (stcb->asoc.send_sack == 0) &&
+			    (stcb->asoc.numduptsns == 0) &&
+			    (stcb->asoc.delayed_ack) &&
+			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
+
+				/*
+				 * CMT DAC algorithm: With CMT,
+				 * delay acks even in the face of
+
+				 * reordering. Therefore, if acks
+				 * that do not have to be sent
+				 * because of the above reasons,
+				 * will be delayed. That is, acks
+				 * that would have been sent due to
+				 * gap reports will be delayed with
+				 * DAC. Start the delayed ack timer.
+				 */
+				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+				                 stcb->sctp_ep, stcb, NULL);
+			} else {
+				/*
+				 * Ok we must build a SACK since the
+				 * timer is pending, we got our
+				 * first packet OR there are gaps or
+				 * duplicates.
+				 */
+				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
+				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
+			}
+		} else {
+			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
+				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+				                 stcb->sctp_ep, stcb, NULL);
+			}
+		}
+	}
+}
+
+int
+sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
+                  struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+                  struct sctp_nets *net, uint32_t *high_tsn)
+{
+	struct sctp_chunkhdr *ch, chunk_buf;
+	struct sctp_association *asoc;
+	int num_chunks = 0;	/* number of control chunks processed */
+	int stop_proc = 0;
+	int chk_length, break_flag, last_chunk;
+	int abort_flag = 0, was_a_gap;
+	struct mbuf *m;
+	uint32_t highest_tsn;
+
+	/* set the rwnd */
+	sctp_set_rwnd(stcb, &stcb->asoc);
+
+	m = *mm;
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	asoc = &stcb->asoc;
+	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
+		highest_tsn = asoc->highest_tsn_inside_nr_map;
+	} else {
+		highest_tsn = asoc->highest_tsn_inside_map;
+	}
+	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
+	/*
+	 * setup where we got the last DATA packet from for any SACK that
+	 * may need to go out. Don't bump the net. This is done ONLY when a
+	 * chunk is assigned.
+	 */
+	asoc->last_data_chunk_from = net;
+
+#ifndef __Panda__
+	/*-
+	 * Now before we proceed we must figure out if this is a wasted
+	 * cluster... i.e. it is a small packet sent in and yet the driver
+	 * underneath allocated a full cluster for it. If so we must copy it
+	 * to a smaller mbuf and free up the cluster mbuf. This will help
+	 * with cluster starvation. Note for __Panda__ we don't do this
+	 * since it has clusters all the way down to 64 bytes.
+	 */
+	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
+		/* we only handle mbufs that are singletons.. not chains */
+		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
+		if (m) {
+			/* ok lets see if we can copy the data up */
+			caddr_t *from, *to;
+			/* get the pointers and copy */
+			to = mtod(m, caddr_t *);
+			from = mtod((*mm), caddr_t *);
+			memcpy(to, from, SCTP_BUF_LEN((*mm)));
+			/* copy the length and free up the old */
+			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
+			sctp_m_freem(*mm);
+			/* success, back copy */
+			*mm = m;
+		} else {
+			/* We are in trouble in the mbuf world .. yikes */
+			m = *mm;
+		}
+	}
+#endif
+	/* get pointer to the first chunk header */
+	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+						     sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
+	if (ch == NULL) {
+		return (1);
+	}
+	/*
+	 * process all DATA chunks...
+	 */
+	*high_tsn = asoc->cumulative_tsn;
+	break_flag = 0;
+	asoc->data_pkts_seen++;
+	while (stop_proc == 0) {
+		/* validate chunk length */
+		chk_length = ntohs(ch->chunk_length);
+		if (length - *offset < chk_length) {
+			/* all done, mutulated chunk */
+			stop_proc = 1;
+			continue;
+		}
+		if ((asoc->idata_supported == 1) &&
+		    (ch->chunk_type == SCTP_DATA)) {
+			struct mbuf *op_err;
+			char msg[SCTP_DIAG_INFO_LEN];
+
+			snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
+			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
+			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+			return (2);
+		}
+		if ((asoc->idata_supported == 0) &&
+		    (ch->chunk_type == SCTP_IDATA)) {
+			struct mbuf *op_err;
+			char msg[SCTP_DIAG_INFO_LEN];
+
+			snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
+			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
+			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+			return (2);
+		}
+		if ((ch->chunk_type == SCTP_DATA) ||
+		    (ch->chunk_type == SCTP_IDATA)) {
+			int clen;
+
+			if (ch->chunk_type == SCTP_DATA) {
+				clen = sizeof(struct sctp_data_chunk);
+			} else {
+				clen = sizeof(struct sctp_idata_chunk);
+			}
+			if (chk_length < clen) {
+				/*
+				 * Need to send an abort since we had a
+				 * invalid data chunk.
+				 */
+				struct mbuf *op_err;
+				char msg[SCTP_DIAG_INFO_LEN];
+
+				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
+				         chk_length);
+				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20;
+				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+				return (2);
+			}
+#ifdef SCTP_AUDITING_ENABLED
+			sctp_audit_log(0xB1, 0);
+#endif
+			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
+				last_chunk = 1;
+			} else {
+				last_chunk = 0;
+			}
+			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, 
+						      chk_length, net, high_tsn, &abort_flag, &break_flag,
+						      last_chunk, ch->chunk_type)) {
+				num_chunks++;
+			}
+			if (abort_flag)
+				return (2);
+
+			if (break_flag) {
+				/*
+				 * Set because of out of rwnd space and no
+				 * drop rep space left.
+				 */
+				stop_proc = 1;
+				continue;
+			}
+		} else {
+			/* not a data chunk in the data region */
+			switch (ch->chunk_type) {
+			case SCTP_INITIATION:
+			case SCTP_INITIATION_ACK:
+			case SCTP_SELECTIVE_ACK:
+			case SCTP_NR_SELECTIVE_ACK:
+			case SCTP_HEARTBEAT_REQUEST:
+			case SCTP_HEARTBEAT_ACK:
+			case SCTP_ABORT_ASSOCIATION:
+			case SCTP_SHUTDOWN:
+			case SCTP_SHUTDOWN_ACK:
+			case SCTP_OPERATION_ERROR:
+			case SCTP_COOKIE_ECHO:
+			case SCTP_COOKIE_ACK:
+			case SCTP_ECN_ECHO:
+			case SCTP_ECN_CWR:
+			case SCTP_SHUTDOWN_COMPLETE:
+			case SCTP_AUTHENTICATION:
+			case SCTP_ASCONF_ACK:
+			case SCTP_PACKET_DROPPED:
+			case SCTP_STREAM_RESET:
+			case SCTP_FORWARD_CUM_TSN:
+			case SCTP_ASCONF:
+			{
+				/*
+				 * Now, what do we do with KNOWN chunks that
+				 * are NOT in the right place?
+				 *
+				 * For now, I do nothing but ignore them. We
+				 * may later want to add sysctl stuff to
+				 * switch out and do either an ABORT() or
+				 * possibly process them.
+				 */
+				struct mbuf *op_err;
+				char msg[SCTP_DIAG_INFO_LEN];
+
+				snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
+				         ch->chunk_type);
+				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+				sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+				return (2);
+			}
+			default:
+				/* unknown chunk type, use bit rules */
+				if (ch->chunk_type & 0x40) {
+					/* Add a error report to the queue */
+					struct mbuf *op_err;
+					struct sctp_gen_error_cause *cause;
+
+					op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
+					                               0, M_NOWAIT, 1, MT_DATA);
+					if (op_err != NULL) {
+						cause  = mtod(op_err, struct sctp_gen_error_cause *);
+						cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
+						cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
+						SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
+						SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
+						if (SCTP_BUF_NEXT(op_err) != NULL) {
+							sctp_queue_op_err(stcb, op_err);
+						} else {
+							sctp_m_freem(op_err);
+						}
+					}
+				}
+				if ((ch->chunk_type & 0x80) == 0) {
+					/* discard the rest of this packet */
+					stop_proc = 1;
+				}	/* else skip this bad chunk and
+					 * continue... */
+				break;
+			}	/* switch of chunk type */
+		}
+		*offset += SCTP_SIZE32(chk_length);
+		if ((*offset >= length) || stop_proc) {
+			/* no more data left in the mbuf chain */
+			stop_proc = 1;
+			continue;
+		}
+		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+							     sizeof(struct sctp_chunkhdr), (uint8_t *) & chunk_buf);
+		if (ch == NULL) {
+			*offset = length;
+			stop_proc = 1;
+			continue;
+		}
+	}
+	if (break_flag) {
+		/*
+		 * we need to report rwnd overrun drops.
+		 */
+		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
+	}
+	if (num_chunks) {
+		/*
+		 * Did we get data, if so update the time for auto-close and
+		 * give peer credit for being alive.
+		 */
+		SCTP_STAT_INCR(sctps_recvpktwithdata);
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+				       stcb->asoc.overall_error_count,
+				       0,
+				       SCTP_FROM_SCTP_INDATA,
+				       __LINE__);
+		}
+		stcb->asoc.overall_error_count = 0;
+		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
+	}
+	/* now service all of the reassm queue if needed */
+	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
+		/* Assure that we ack right away */
+		stcb->asoc.send_sack = 1;
+	}
+	/* Start a sack timer or QUEUE a SACK for sending */
+	sctp_sack_check(stcb, was_a_gap);
+	return (0);
+}
+
+static int
+sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
+			   uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
+			   int *num_frs,
+			   uint32_t *biggest_newly_acked_tsn,
+			   uint32_t  *this_sack_lowest_newack,
+			   int *rto_ok)
+{
+	struct sctp_tmit_chunk *tp1;
+	unsigned int theTSN;
+	int j, wake_him = 0, circled = 0;
+
+	/* Recover the tp1 we last saw */
+	tp1 = *p_tp1;
+	if (tp1 == NULL) {
+		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
+	}
+	for (j = frag_strt; j <= frag_end; j++) {
+		theTSN = j + last_tsn;
+		while (tp1) {
+			if (tp1->rec.data.doing_fast_retransmit)
+				(*num_frs) += 1;
+
+			/*-
+			 * CMT: CUCv2 algorithm. For each TSN being
+			 * processed from the sent queue, track the
+			 * next expected pseudo-cumack, or
+			 * rtx_pseudo_cumack, if required. Separate
+			 * cumack trackers for first transmissions,
+			 * and retransmissions.
+			 */
+			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
+			    (tp1->whoTo->find_pseudo_cumack == 1) &&
+			    (tp1->snd_count == 1)) {
+				tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
+				tp1->whoTo->find_pseudo_cumack = 0;
+			}
+			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
+			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
+			    (tp1->snd_count > 1)) {
+				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
+				tp1->whoTo->find_rtx_pseudo_cumack = 0;
+			}
+			if (tp1->rec.data.tsn == theTSN) {
+				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
+					/*-
+					 * must be held until
+					 * cum-ack passes
+					 */
+					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+						/*-
+						 * If it is less than RESEND, it is
+						 * now no-longer in flight.
+						 * Higher values may already be set
+						 * via previous Gap Ack Blocks...
+						 * i.e. ACKED or RESEND.
+						 */
+						if (SCTP_TSN_GT(tp1->rec.data.tsn,
+						                *biggest_newly_acked_tsn)) {
+							*biggest_newly_acked_tsn = tp1->rec.data.tsn;
+						}
+						/*-
+						 * CMT: SFR algo (and HTNA) - set
+						 * saw_newack to 1 for dest being
+						 * newly acked. update
+						 * this_sack_highest_newack if
+						 * appropriate.
+						 */
+						if (tp1->rec.data.chunk_was_revoked == 0)
+							tp1->whoTo->saw_newack = 1;
+
+						if (SCTP_TSN_GT(tp1->rec.data.tsn,
+						                tp1->whoTo->this_sack_highest_newack)) {
+							tp1->whoTo->this_sack_highest_newack =
+								tp1->rec.data.tsn;
+						}
+						/*-
+						 * CMT DAC algo: also update
+						 * this_sack_lowest_newack
+						 */
+						if (*this_sack_lowest_newack == 0) {
+							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+								sctp_log_sack(*this_sack_lowest_newack,
+									      last_tsn,
+									      tp1->rec.data.tsn,
+									      0,
+									      0,
+									      SCTP_LOG_TSN_ACKED);
+							}
+							*this_sack_lowest_newack = tp1->rec.data.tsn;
+						}
+						/*-
+						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
+						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
+						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
+						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
+						 * Separate pseudo_cumack trackers for first transmissions and
+						 * retransmissions.
+						 */
+						if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
+							if (tp1->rec.data.chunk_was_revoked == 0) {
+								tp1->whoTo->new_pseudo_cumack = 1;
+							}
+							tp1->whoTo->find_pseudo_cumack = 1;
+						}
+						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
+						}
+						if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
+							if (tp1->rec.data.chunk_was_revoked == 0) {
+								tp1->whoTo->new_pseudo_cumack = 1;
+							}
+							tp1->whoTo->find_rtx_pseudo_cumack = 1;
+						}
+						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+							sctp_log_sack(*biggest_newly_acked_tsn,
+								      last_tsn,
+								      tp1->rec.data.tsn,
+								      frag_strt,
+								      frag_end,
+								      SCTP_LOG_TSN_ACKED);
+						}
+						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
+								       tp1->whoTo->flight_size,
+								       tp1->book_size,
+								       (uint32_t)(uintptr_t)tp1->whoTo,
+								       tp1->rec.data.tsn);
+						}
+						sctp_flight_size_decrease(tp1);
+						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
+							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
+														     tp1);
+						}
+						sctp_total_flight_decrease(stcb, tp1);
+
+						tp1->whoTo->net_ack += tp1->send_size;
+						if (tp1->snd_count < 2) {
+							/*-
+							 * True non-retransmited chunk
+							 */
+							tp1->whoTo->net_ack2 += tp1->send_size;
+
+							/*-
+							 * update RTO too ?
+							 */
+							if (tp1->do_rtt) {
+								if (*rto_ok) {
+									tp1->whoTo->RTO =
+										sctp_calculate_rto(stcb,
+												   &stcb->asoc,
+												   tp1->whoTo,
+												   &tp1->sent_rcv_time,
+												   sctp_align_safe_nocopy,
+												   SCTP_RTT_FROM_DATA);
+									*rto_ok = 0;
+								}
+								if (tp1->whoTo->rto_needed == 0) {
+									tp1->whoTo->rto_needed = 1;
+								}
+								tp1->do_rtt = 0;
+							}
+						}
+
+					}
+					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
+						if (SCTP_TSN_GT(tp1->rec.data.tsn,
+						                stcb->asoc.this_sack_highest_gap)) {
+							stcb->asoc.this_sack_highest_gap =
+								tp1->rec.data.tsn;
+						}
+						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
+#ifdef SCTP_AUDITING_ENABLED
+							sctp_audit_log(0xB2,
+								       (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
+#endif
+						}
+					}
+					/*-
+					 * All chunks NOT UNSENT fall through here and are marked
+					 * (leave PR-SCTP ones that are to skip alone though)
+					 */
+					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
+					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
+						tp1->sent = SCTP_DATAGRAM_MARKED;
+					}
+					if (tp1->rec.data.chunk_was_revoked) {
+						/* deflate the cwnd */
+						tp1->whoTo->cwnd -= tp1->book_size;
+						tp1->rec.data.chunk_was_revoked = 0;
+					}
+					/* NR Sack code here */
+					if (nr_sacking &&
+					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
+						if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
+							stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
+#ifdef INVARIANTS
+						} else {
+							panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
+#endif
+						}
+						if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
+						    (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
+						    TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
+							stcb->asoc.trigger_reset = 1;
+						}
+						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
+						if (tp1->data) {
+							/* sa_ignore NO_NULL_CHK */
+							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
+							sctp_m_freem(tp1->data);
+							tp1->data = NULL;
+						}
+						wake_him++;
+					}
+				}
+				break;
+			}	/* if (tp1->tsn == theTSN) */
+			if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
+				break;
+			}
+			tp1 = TAILQ_NEXT(tp1, sctp_next);
+			if ((tp1 == NULL) && (circled == 0)) {
+				circled++;
+				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
+			}
+		}	/* end while (tp1) */
+		if (tp1 == NULL) {
+			circled = 0;
+			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
+		}
+		/* In case the fragments were not in order we must reset */
+	} /* end for (j = fragStart */
+	*p_tp1 = tp1;
+	return (wake_him);	/* Return value only used for nr-sack */
+}
+
+
+static int
+sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
+		uint32_t last_tsn, uint32_t *biggest_tsn_acked,
+		uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
+		int num_seg, int num_nr_seg, int *rto_ok)
+{
+	struct sctp_gap_ack_block *frag, block;
+	struct sctp_tmit_chunk *tp1;
+	int i;
+	int num_frs = 0;
+	int chunk_freed;
+	int non_revocable;
+	uint16_t frag_strt, frag_end, prev_frag_end;
+
+	tp1 = TAILQ_FIRST(&asoc->sent_queue);
+	prev_frag_end = 0;
+	chunk_freed = 0;
+
+	for (i = 0; i < (num_seg + num_nr_seg); i++) {
+		if (i == num_seg) {
+			prev_frag_end = 0;
+			tp1 = TAILQ_FIRST(&asoc->sent_queue);
+		}
+		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
+		                                                  sizeof(struct sctp_gap_ack_block), (uint8_t *) &block);
+		*offset += sizeof(block);
+		if (frag == NULL) {
+			return (chunk_freed);
+		}
+		frag_strt = ntohs(frag->start);
+		frag_end = ntohs(frag->end);
+
+		if (frag_strt > frag_end) {
+			/* This gap report is malformed, skip it. */
+			continue;
+		}
+		if (frag_strt <= prev_frag_end) {
+			/* This gap report is not in order, so restart. */
+			 tp1 = TAILQ_FIRST(&asoc->sent_queue);
+		}
+		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
+			*biggest_tsn_acked = last_tsn + frag_end;
+		}
+		if (i < num_seg) {
+			non_revocable = 0;
+		} else {
+			non_revocable = 1;
+		}
+		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
+		                               non_revocable, &num_frs, biggest_newly_acked_tsn,
+		                               this_sack_lowest_newack, rto_ok)) {
+			chunk_freed = 1;
+		}
+		prev_frag_end = frag_end;
+	}
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+		if (num_frs)
+			sctp_log_fr(*biggest_tsn_acked,
+			            *biggest_newly_acked_tsn,
+			            last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
+	}
+	return (chunk_freed);
+}
+
+static void
+sctp_check_for_revoked(struct sctp_tcb *stcb,
+		       struct sctp_association *asoc, uint32_t cumack,
+		       uint32_t biggest_tsn_acked)
+{
+	struct sctp_tmit_chunk *tp1;
+
+	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+		if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
+			/*
+			 * ok this guy is either ACK or MARKED. If it is
+			 * ACKED it has been previously acked but not this
+			 * time i.e. revoked.  If it is MARKED it was ACK'ed
+			 * again.
+			 */
+			if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
+				break;
+			}
+			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
+				/* it has been revoked */
+				tp1->sent = SCTP_DATAGRAM_SENT;
+				tp1->rec.data.chunk_was_revoked = 1;
+				/* We must add this stuff back in to
+				 * assure timers and such get started.
+				 */
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
+						       tp1->whoTo->flight_size,
+						       tp1->book_size,
+						       (uint32_t)(uintptr_t)tp1->whoTo,
+						       tp1->rec.data.tsn);
+				}
+				sctp_flight_size_increase(tp1);
+				sctp_total_flight_increase(stcb, tp1);
+				/* We inflate the cwnd to compensate for our
+				 * artificial inflation of the flight_size.
+				 */
+				tp1->whoTo->cwnd += tp1->book_size;
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+					sctp_log_sack(asoc->last_acked_seq,
+						      cumack,
+						      tp1->rec.data.tsn,
+						      0,
+						      0,
+						      SCTP_LOG_TSN_REVOKED);
+				}
+			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
+				/* it has been re-acked in this SACK */
+				tp1->sent = SCTP_DATAGRAM_ACKED;
+			}
+		}
+		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
+			break;
+	}
+}
+
+
+static void
+sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
+			   uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
+{
+	struct sctp_tmit_chunk *tp1;
+	int strike_flag = 0;
+	struct timeval now;
+	int tot_retrans = 0;
+	uint32_t sending_seq;
+	struct sctp_nets *net;
+	int num_dests_sacked = 0;
+
+	/*
+	 * select the sending_seq, this is either the next thing ready to be
+	 * sent but not transmitted, OR, the next seq we assign.
+	 */
+	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
+	if (tp1 == NULL) {
+		sending_seq = asoc->sending_seq;
+	} else {
+		sending_seq = tp1->rec.data.tsn;
+	}
+
+	/* CMT DAC algo: finding out if SACK is a mixed SACK */
+	if ((asoc->sctp_cmt_on_off > 0) &&
+	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
+		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+			if (net->saw_newack)
+				num_dests_sacked++;
+		}
+	}
+	if (stcb->asoc.prsctp_supported) {
+		(void)SCTP_GETTIME_TIMEVAL(&now);
+	}
+	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+		strike_flag = 0;
+		if (tp1->no_fr_allowed) {
+			/* this one had a timeout or something */
+			continue;
+		}
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+			if (tp1->sent < SCTP_DATAGRAM_RESEND)
+				sctp_log_fr(biggest_tsn_newly_acked,
+					    tp1->rec.data.tsn,
+					    tp1->sent,
+					    SCTP_FR_LOG_CHECK_STRIKE);
+		}
+		if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
+		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
+			/* done */
+			break;
+		}
+		if (stcb->asoc.prsctp_supported) {
+			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
+				/* Is it expired? */
+#ifndef __FreeBSD__
+				if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
+#else
+				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
+#endif
+					/* Yes so drop it */
+					if (tp1->data != NULL) {
+						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
+										 SCTP_SO_NOT_LOCKED);
+					}
+					continue;
+				}
+			}
+
+		}
+		if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap)) {
+			/* we are beyond the tsn in the sack  */
+			break;
+		}
+		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
+			/* either a RESEND, ACKED, or MARKED */
+			/* skip */
+			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
+				/* Continue strikin FWD-TSN chunks */
+				tp1->rec.data.fwd_tsn_cnt++;
+			}
+			continue;
+		}
+		/*
+		 * CMT : SFR algo (covers part of DAC and HTNA as well)
+		 */
+		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
+			/*
+			 * No new acks were receieved for data sent to this
+			 * dest. Therefore, according to the SFR algo for
+			 * CMT, no data sent to this dest can be marked for
+			 * FR using this SACK.
+			 */
+			continue;
+		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.tsn,
+		                                     tp1->whoTo->this_sack_highest_newack)) {
+			/*
+			 * CMT: New acks were receieved for data sent to
+			 * this dest. But no new acks were seen for data
+			 * sent after tp1. Therefore, according to the SFR
+			 * algo for CMT, tp1 cannot be marked for FR using
+			 * this SACK. This step covers part of the DAC algo
+			 * and the HTNA algo as well.
+			 */
+			continue;
+		}
+		/*
+		 * Here we check to see if we were have already done a FR
+		 * and if so we see if the biggest TSN we saw in the sack is
+		 * smaller than the recovery point. If so we don't strike
+		 * the tsn... otherwise we CAN strike the TSN.
+		 */
+		/*
+		 * @@@ JRI: Check for CMT
+		 * if (accum_moved && asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 0)) {
+		 */
+		if (accum_moved && asoc->fast_retran_loss_recovery) {
+			/*
+			 * Strike the TSN if in fast-recovery and cum-ack
+			 * moved.
+			 */
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+				sctp_log_fr(biggest_tsn_newly_acked,
+					    tp1->rec.data.tsn,
+					    tp1->sent,
+					    SCTP_FR_LOG_STRIKE_CHUNK);
+			}
+			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+				tp1->sent++;
+			}
+			if ((asoc->sctp_cmt_on_off > 0) &&
+			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
+				/*
+				 * CMT DAC algorithm: If SACK flag is set to
+				 * 0, then lowest_newack test will not pass
+				 * because it would have been set to the
+				 * cumack earlier. If not already to be
+				 * rtx'd, If not a mixed sack and if tp1 is
+				 * not between two sacked TSNs, then mark by
+				 * one more.
+				 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
+				 * two packets have been received after this missing TSN.
+				 */
+				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
+				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+						sctp_log_fr(16 + num_dests_sacked,
+							    tp1->rec.data.tsn,
+							    tp1->sent,
+							    SCTP_FR_LOG_STRIKE_CHUNK);
+					}
+					tp1->sent++;
+				}
+			}
+		} else if ((tp1->rec.data.doing_fast_retransmit) &&
+		           (asoc->sctp_cmt_on_off == 0)) {
+			/*
+			 * For those that have done a FR we must take
+			 * special consideration if we strike. I.e the
+			 * biggest_newly_acked must be higher than the
+			 * sending_seq at the time we did the FR.
+			 */
+			if (
+#ifdef SCTP_FR_TO_ALTERNATE
+				/*
+				 * If FR's go to new networks, then we must only do
+				 * this for singly homed asoc's. However if the FR's
+				 * go to the same network (Armando's work) then its
+				 * ok to FR multiple times.
+				 */
+				(asoc->numnets < 2)
+#else
+				(1)
+#endif
+				) {
+
+				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
+				                tp1->rec.data.fast_retran_tsn)) {
+					/*
+					 * Strike the TSN, since this ack is
+					 * beyond where things were when we
+					 * did a FR.
+					 */
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+						sctp_log_fr(biggest_tsn_newly_acked,
+							    tp1->rec.data.tsn,
+							    tp1->sent,
+							    SCTP_FR_LOG_STRIKE_CHUNK);
+					}
+					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+						tp1->sent++;
+					}
+					strike_flag = 1;
+					if ((asoc->sctp_cmt_on_off > 0) &&
+					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
+						/*
+						 * CMT DAC algorithm: If
+						 * SACK flag is set to 0,
+						 * then lowest_newack test
+						 * will not pass because it
+						 * would have been set to
+						 * the cumack earlier. If
+						 * not already to be rtx'd,
+						 * If not a mixed sack and
+						 * if tp1 is not between two
+						 * sacked TSNs, then mark by
+						 * one more.
+						 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
+						 * two packets have been received after this missing TSN.
+						 */
+						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
+						    (num_dests_sacked == 1) &&
+						    SCTP_TSN_GT(this_sack_lowest_newack,
+						                tp1->rec.data.tsn)) {
+							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+								sctp_log_fr(32 + num_dests_sacked,
+									    tp1->rec.data.tsn,
+									    tp1->sent,
+									    SCTP_FR_LOG_STRIKE_CHUNK);
+							}
+							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+								tp1->sent++;
+							}
+						}
+					}
+				}
+			}
+			/*
+			 * JRI: TODO: remove code for HTNA algo. CMT's
+			 * SFR algo covers HTNA.
+			 */
+		} else if (SCTP_TSN_GT(tp1->rec.data.tsn,
+		                       biggest_tsn_newly_acked)) {
+			/*
+			 * We don't strike these: This is the  HTNA
+			 * algorithm i.e. we don't strike If our TSN is
+			 * larger than the Highest TSN Newly Acked.
+			 */
+			;
+		} else {
+			/* Strike the TSN */
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+				sctp_log_fr(biggest_tsn_newly_acked,
+					    tp1->rec.data.tsn,
+					    tp1->sent,
+					    SCTP_FR_LOG_STRIKE_CHUNK);
+			}
+			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+				tp1->sent++;
+			}
+			if ((asoc->sctp_cmt_on_off > 0) &&
+			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
+				/*
+				 * CMT DAC algorithm: If SACK flag is set to
+				 * 0, then lowest_newack test will not pass
+				 * because it would have been set to the
+				 * cumack earlier. If not already to be
+				 * rtx'd, If not a mixed sack and if tp1 is
+				 * not between two sacked TSNs, then mark by
+				 * one more.
+				 * NOTE that we are marking by one additional time since the SACK DAC flag indicates that
+				 * two packets have been received after this missing TSN.
+				 */
+				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
+				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+						sctp_log_fr(48 + num_dests_sacked,
+							    tp1->rec.data.tsn,
+							    tp1->sent,
+							    SCTP_FR_LOG_STRIKE_CHUNK);
+					}
+					tp1->sent++;
+				}
+			}
+		}
+		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+			struct sctp_nets *alt;
+
+			/* fix counts and things */
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
+					       (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
+					       tp1->book_size,
+					       (uint32_t)(uintptr_t)tp1->whoTo,
+					       tp1->rec.data.tsn);
+			}
+			if (tp1->whoTo) {
+				tp1->whoTo->net_ack++;
+				sctp_flight_size_decrease(tp1);
+				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
+					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
+												     tp1);
+				}
+			}
+
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
+					      asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
+			}
+			/* add back to the rwnd */
+			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
+
+			/* remove from the total flight */
+			sctp_total_flight_decrease(stcb, tp1);
+
+			if ((stcb->asoc.prsctp_supported) &&
+			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
+				/* Has it been retransmitted tv_sec times? - we store the retran count there. */
+				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
+					/* Yes, so drop it */
+					if (tp1->data != NULL) {
+						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
+										 SCTP_SO_NOT_LOCKED);
+					}
+					/* Make sure to flag we had a FR */
+					tp1->whoTo->net_ack++;
+					continue;
+				}
+			}
+			/* SCTP_PRINTF("OK, we are now ready to FR this guy\n"); */
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+				sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
+					    0, SCTP_FR_MARKED);
+			}
+			if (strike_flag) {
+				/* This is a subsequent FR */
+				SCTP_STAT_INCR(sctps_sendmultfastretrans);
+			}
+			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+			if (asoc->sctp_cmt_on_off > 0) {
+				/*
+				 * CMT: Using RTX_SSTHRESH policy for CMT.
+				 * If CMT is being used, then pick dest with
+				 * largest ssthresh for any retransmission.
+				 */
+				tp1->no_fr_allowed = 1;
+				alt = tp1->whoTo;
+				/*sa_ignore NO_NULL_CHK*/
+				if (asoc->sctp_cmt_pf > 0) {
+					/* JRS 5/18/07 - If CMT PF is on, use the PF version of find_alt_net() */
+					alt = sctp_find_alternate_net(stcb, alt, 2);
+				} else {
+					/* JRS 5/18/07 - If only CMT is on, use the CMT version of find_alt_net() */
+                                        /*sa_ignore NO_NULL_CHK*/
+					alt = sctp_find_alternate_net(stcb, alt, 1);
+				}
+				if (alt == NULL) {
+					alt = tp1->whoTo;
+				}
+				/*
+				 * CUCv2: If a different dest is picked for
+				 * the retransmission, then new
+				 * (rtx-)pseudo_cumack needs to be tracked
+				 * for orig dest. Let CUCv2 track new (rtx-)
+				 * pseudo-cumack always.
+				 */
+				if (tp1->whoTo) {
+					tp1->whoTo->find_pseudo_cumack = 1;
+					tp1->whoTo->find_rtx_pseudo_cumack = 1;
+				}
+
+			} else {/* CMT is OFF */
+
+#ifdef SCTP_FR_TO_ALTERNATE
+				/* Can we find an alternate? */
+				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
+#else
+				/*
+				 * default behavior is to NOT retransmit
+				 * FR's to an alternate. Armando Caro's
+				 * paper details why.
+				 */
+				alt = tp1->whoTo;
+#endif
+			}
+
+			tp1->rec.data.doing_fast_retransmit = 1;
+			tot_retrans++;
+			/* mark the sending seq for possible subsequent FR's */
+			/*
+			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
+			 * (uint32_t)tpi->rec.data.tsn);
+			 */
+			if (TAILQ_EMPTY(&asoc->send_queue)) {
+				/*
+				 * If the queue of send is empty then its
+				 * the next sequence number that will be
+				 * assigned so we subtract one from this to
+				 * get the one we last sent.
+				 */
+				tp1->rec.data.fast_retran_tsn = sending_seq;
+			} else {
+				/*
+				 * If there are chunks on the send queue
+				 * (unsent data that has made it from the
+				 * stream queues but not out the door, we
+				 * take the first one (which will have the
+				 * lowest TSN) and subtract one to get the
+				 * one we last sent.
+				 */
+				struct sctp_tmit_chunk *ttt;
+
+				ttt = TAILQ_FIRST(&asoc->send_queue);
+				tp1->rec.data.fast_retran_tsn =
+					ttt->rec.data.tsn;
+			}
+
+			if (tp1->do_rtt) {
+				/*
+				 * this guy had a RTO calculation pending on
+				 * it, cancel it
+				 */
+				if ((tp1->whoTo != NULL) &&
+				    (tp1->whoTo->rto_needed == 0)) {
+					tp1->whoTo->rto_needed = 1;
+				}
+				tp1->do_rtt = 0;
+			}
+			if (alt != tp1->whoTo) {
+				/* yes, there is an alternate. */
+				sctp_free_remote_addr(tp1->whoTo);
+				/*sa_ignore FREED_MEMORY*/
+				tp1->whoTo = alt;
+				atomic_add_int(&alt->ref_count, 1);
+			}
+		}
+	}
+}
+
+struct sctp_tmit_chunk *
+sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
+    struct sctp_association *asoc)
+{
+	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
+	struct timeval now;
+	int now_filled = 0;
+
+	if (asoc->prsctp_supported == 0) {
+		return (NULL);
+	}
+	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
+		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
+		    tp1->sent != SCTP_DATAGRAM_RESEND &&
+		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
+			/* no chance to advance, out of here */
+			break;
+		}
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
+			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
+			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
+				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
+					       asoc->advanced_peer_ack_point,
+					       tp1->rec.data.tsn, 0, 0);
+			}
+		}
+		if (!PR_SCTP_ENABLED(tp1->flags)) {
+			/*
+			 * We can't fwd-tsn past any that are reliable aka
+			 * retransmitted until the asoc fails.
+			 */
+			break;
+		}
+		if (!now_filled) {
+			(void)SCTP_GETTIME_TIMEVAL(&now);
+			now_filled = 1;
+		}
+		/*
+		 * now we got a chunk which is marked for another
+		 * retransmission to a PR-stream but has run out its chances
+		 * already maybe OR has been marked to skip now. Can we skip
+		 * it if its a resend?
+		 */
+		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
+		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
+			/*
+			 * Now is this one marked for resend and its time is
+			 * now up?
+			 */
+#ifndef __FreeBSD__
+			if (timercmp(&now, &tp1->rec.data.timetodrop, >)) {
+#else
+			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
+#endif
+				/* Yes so drop it */
+				if (tp1->data) {
+					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
+					    1, SCTP_SO_NOT_LOCKED);
+				}
+			} else {
+				/*
+				 * No, we are done when hit one for resend
+				 * whos time as not expired.
+				 */
+				break;
+			}
+		}
+		/*
+		 * Ok now if this chunk is marked to drop it we can clean up
+		 * the chunk, advance our peer ack point and we can check
+		 * the next chunk.
+		 */
+		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
+		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
+			/* advance PeerAckPoint goes forward */
+			if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
+				asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
+				a_adv = tp1;
+			} else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
+				/* No update but we do save the chk */
+				a_adv = tp1;
+			}
+		} else {
+			/*
+			 * If it is still in RESEND we can advance no
+			 * further
+			 */
+			break;
+		}
+	}
+	return (a_adv);
+}
+
+static int
+sctp_fs_audit(struct sctp_association *asoc)
+{
+	struct sctp_tmit_chunk *chk;
+	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
+	int ret;
+#ifndef INVARIANTS
+	int entry_flight, entry_cnt;
+#endif
+
+	ret = 0;
+#ifndef INVARIANTS
+	entry_flight = asoc->total_flight;
+	entry_cnt = asoc->total_flight_count;
+#endif
+	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
+		return (0);
+
+	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+		if (chk->sent < SCTP_DATAGRAM_RESEND) {
+			SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
+			            chk->rec.data.tsn,
+			            chk->send_size,
+			            chk->snd_count);
+			inflight++;
+		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
+			resend++;
+		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
+			inbetween++;
+		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
+			above++;
+		} else {
+			acked++;
+		}
+	}
+
+	if ((inflight > 0) || (inbetween > 0)) {
+#ifdef INVARIANTS
+		panic("Flight size-express incorrect? \n");
+#else
+		SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
+		            entry_flight, entry_cnt);
+
+		SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
+			    inflight, inbetween, resend, above, acked);
+		ret = 1;
+#endif
+	}
+	return (ret);
+}
+
+
+static void
+sctp_window_probe_recovery(struct sctp_tcb *stcb,
+	                   struct sctp_association *asoc,
+			   struct sctp_tmit_chunk *tp1)
+{
+	tp1->window_probe = 0;
+	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
+		/* TSN's skipped we do NOT move back. */
+		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
+			       tp1->whoTo ? tp1->whoTo->flight_size : 0,
+			       tp1->book_size,
+			       (uint32_t)(uintptr_t)tp1->whoTo,
+			       tp1->rec.data.tsn);
+		return;
+	}
+	/* First setup this by shrinking flight */
+	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
+		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
+									     tp1);
+	}
+	sctp_flight_size_decrease(tp1);
+	sctp_total_flight_decrease(stcb, tp1);
+	/* Now mark for resend */
+	tp1->sent = SCTP_DATAGRAM_RESEND;
+	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
+			       tp1->whoTo->flight_size,
+			       tp1->book_size,
+			       (uint32_t)(uintptr_t)tp1->whoTo,
+			       tp1->rec.data.tsn);
+	}
+}
+
+void
+sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
+                         uint32_t rwnd, int *abort_now, int ecne_seen)
+{
+	struct sctp_nets *net;
+	struct sctp_association *asoc;
+	struct sctp_tmit_chunk *tp1, *tp2;
+	uint32_t old_rwnd;
+	int win_probe_recovery = 0;
+	int win_probe_recovered = 0;
+	int j, done_once = 0;
+	int rto_ok = 1;
+	uint32_t send_s;
+
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
+		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
+		               rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
+	}
+	SCTP_TCB_LOCK_ASSERT(stcb);
+#ifdef SCTP_ASOCLOG_OF_TSNS
+	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
+	stcb->asoc.cumack_log_at++;
+	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
+		stcb->asoc.cumack_log_at = 0;
+	}
+#endif
+	asoc = &stcb->asoc;
+	old_rwnd = asoc->peers_rwnd;
+	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
+		/* old ack */
+		return;
+	} else if (asoc->last_acked_seq == cumack) {
+		/* Window update sack */
+		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
+						    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
+		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+			/* SWS sender side engages */
+			asoc->peers_rwnd = 0;
+		}
+		if (asoc->peers_rwnd > old_rwnd) {
+			goto again;
+		}
+		return;
+	}
+
+	/* First setup for CC stuff */
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
+			/* Drag along the window_tsn for cwr's */
+			net->cwr_window_tsn = cumack;
+		}
+		net->prev_cwnd = net->cwnd;
+		net->net_ack = 0;
+		net->net_ack2 = 0;
+
+		/*
+		 * CMT: Reset CUC and Fast recovery algo variables before
+		 * SACK processing
+		 */
+		net->new_pseudo_cumack = 0;
+		net->will_exit_fast_recovery = 0;
+		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
+			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
+		}
+	}
+	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
+		tp1 = TAILQ_LAST(&asoc->sent_queue,
+				 sctpchunk_listhead);
+		send_s = tp1->rec.data.tsn + 1;
+	} else {
+		send_s = asoc->sending_seq;
+	}
+	if (SCTP_TSN_GE(cumack, send_s)) {
+		struct mbuf *op_err;
+		char msg[SCTP_DIAG_INFO_LEN];
+
+		*abort_now = 1;
+		/* XXX */
+		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
+			 cumack, send_s);
+		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
+		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+		return;
+	}
+	asoc->this_sack_highest_gap = cumack;
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+			       stcb->asoc.overall_error_count,
+			       0,
+			       SCTP_FROM_SCTP_INDATA,
+			       __LINE__);
+	}
+	stcb->asoc.overall_error_count = 0;
+	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
+		/* process the new consecutive TSN first */
+		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
+			if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
+				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
+					SCTP_PRINTF("Warning, an unsent is now acked?\n");
+				}
+				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
+					/*
+					 * If it is less than ACKED, it is
+					 * now no-longer in flight. Higher
+					 * values may occur during marking
+					 */
+					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
+								       tp1->whoTo->flight_size,
+								       tp1->book_size,
+								       (uint32_t)(uintptr_t)tp1->whoTo,
+								       tp1->rec.data.tsn);
+						}
+						sctp_flight_size_decrease(tp1);
+						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
+							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
+														     tp1);
+						}
+						/* sa_ignore NO_NULL_CHK */
+						sctp_total_flight_decrease(stcb, tp1);
+					}
+					tp1->whoTo->net_ack += tp1->send_size;
+					if (tp1->snd_count < 2) {
+						/*
+						 * True non-retransmited
+						 * chunk
+						 */
+						tp1->whoTo->net_ack2 +=
+							tp1->send_size;
+
+						/* update RTO too? */
+						if (tp1->do_rtt) {
+							if (rto_ok) {
+								tp1->whoTo->RTO =
+									/*
+									 * sa_ignore
+									 * NO_NULL_CHK
+									 */
+									sctp_calculate_rto(stcb,
+											   asoc, tp1->whoTo,
+											   &tp1->sent_rcv_time,
+											   sctp_align_safe_nocopy,
+											   SCTP_RTT_FROM_DATA);
+								rto_ok = 0;
+							}
+							if (tp1->whoTo->rto_needed == 0) {
+								tp1->whoTo->rto_needed = 1;
+							}
+							tp1->do_rtt = 0;
+						}
+					}
+					/*
+					 * CMT: CUCv2 algorithm. From the
+					 * cumack'd TSNs, for each TSN being
+					 * acked for the first time, set the
+					 * following variables for the
+					 * corresp destination.
+					 * new_pseudo_cumack will trigger a
+					 * cwnd update.
+					 * find_(rtx_)pseudo_cumack will
+					 * trigger search for the next
+					 * expected (rtx-)pseudo-cumack.
+					 */
+					tp1->whoTo->new_pseudo_cumack = 1;
+					tp1->whoTo->find_pseudo_cumack = 1;
+					tp1->whoTo->find_rtx_pseudo_cumack = 1;
+
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+						/* sa_ignore NO_NULL_CHK */
+						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
+					}
+				}
+				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+				}
+				if (tp1->rec.data.chunk_was_revoked) {
+					/* deflate the cwnd */
+					tp1->whoTo->cwnd -= tp1->book_size;
+					tp1->rec.data.chunk_was_revoked = 0;
+				}
+				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
+					if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
+						asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
+#ifdef INVARIANTS
+					} else {
+						panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
+#endif
+					}
+				}
+				if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
+				    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
+				    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
+					asoc->trigger_reset = 1;
+				}
+				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
+				if (tp1->data) {
+					/* sa_ignore NO_NULL_CHK */
+					sctp_free_bufspace(stcb, asoc, tp1, 1);
+					sctp_m_freem(tp1->data);
+					tp1->data = NULL;
+				}
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+					sctp_log_sack(asoc->last_acked_seq,
+						      cumack,
+						      tp1->rec.data.tsn,
+						      0,
+						      0,
+						      SCTP_LOG_FREE_SENT);
+				}
+				asoc->sent_queue_cnt--;
+				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
+			} else {
+				break;
+			}
+		}
+
+	}
+#if defined(__Userspace__)
+	if (stcb->sctp_ep->recv_callback) {
+		if (stcb->sctp_socket) {
+			uint32_t inqueue_bytes, sb_free_now;
+			struct sctp_inpcb *inp;
+
+			inp = stcb->sctp_ep;
+			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
+			sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
+
+			/* check if the amount free in the send socket buffer crossed the threshold */
+			if (inp->send_callback &&
+			    (((inp->send_sb_threshold > 0) &&
+			      (sb_free_now >= inp->send_sb_threshold) &&
+			      (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
+			     (inp->send_sb_threshold == 0))) {
+				atomic_add_int(&stcb->asoc.refcnt, 1);
+				SCTP_TCB_UNLOCK(stcb);
+				inp->send_callback(stcb->sctp_socket, sb_free_now);
+				SCTP_TCB_LOCK(stcb);
+				atomic_subtract_int(&stcb->asoc.refcnt, 1);
+			}
+		}
+	} else if (stcb->sctp_socket) {
+#else
+	/* sa_ignore NO_NULL_CHK */
+	if (stcb->sctp_socket) {
+#endif
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		struct socket *so;
+
+#endif
+		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+			/* sa_ignore NO_NULL_CHK */
+			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
+		}
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		so = SCTP_INP_SO(stcb->sctp_ep);
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_SOCKET_LOCK(so, 1);
+		SCTP_TCB_LOCK(stcb);
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+			/* assoc was freed while we were unlocked */
+			SCTP_SOCKET_UNLOCK(so, 1);
+			return;
+		}
+#endif
+		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+	} else {
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
+		}
+	}
+
+	/* JRS - Use the congestion control given in the CC module */
+	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
+		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+			if (net->net_ack2 > 0) {
+				/*
+				 * Karn's rule applies to clearing error count, this
+				 * is optional.
+				 */
+				net->error_count = 0;
+				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
+					/* addr came good */
+					net->dest_state |= SCTP_ADDR_REACHABLE;
+					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
+					                0, (void *)net, SCTP_SO_NOT_LOCKED);
+				}
+				if (net == stcb->asoc.primary_destination) {
+					if (stcb->asoc.alternate) {
+						/* release the alternate, primary is good */
+						sctp_free_remote_addr(stcb->asoc.alternate);
+						stcb->asoc.alternate = NULL;
+					}
+				}
+				if (net->dest_state & SCTP_ADDR_PF) {
+					net->dest_state &= ~SCTP_ADDR_PF;
+					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
+					                stcb->sctp_ep, stcb, net,
+					                SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
+					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
+					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
+					/* Done with this net */
+					net->net_ack = 0;
+				}
+				/* restore any doubled timers */
+				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
+				if (net->RTO < stcb->asoc.minrto) {
+					net->RTO = stcb->asoc.minrto;
+				}
+				if (net->RTO > stcb->asoc.maxrto) {
+					net->RTO = stcb->asoc.maxrto;
+				}
+			}
+		}
+		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
+	}
+	asoc->last_acked_seq = cumack;
+
+	if (TAILQ_EMPTY(&asoc->sent_queue)) {
+		/* nothing left in-flight */
+		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+			net->flight_size = 0;
+			net->partial_bytes_acked = 0;
+		}
+		asoc->total_flight = 0;
+		asoc->total_flight_count = 0;
+	}
+
+	/* RWND update */
+	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
+					    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
+	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+		/* SWS sender side engages */
+		asoc->peers_rwnd = 0;
+	}
+	if (asoc->peers_rwnd > old_rwnd) {
+		win_probe_recovery = 1;
+	}
+	/* Now assure a timer where data is queued at */
+again:
+	j = 0;
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+		int to_ticks;
+		if (win_probe_recovery && (net->window_probe)) {
+			win_probe_recovered = 1;
+			/*
+			 * Find first chunk that was used with window probe
+			 * and clear the sent
+			 */
+			/* sa_ignore FREED_MEMORY */
+			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+				if (tp1->window_probe) {
+					/* move back to data send queue */
+					sctp_window_probe_recovery(stcb, asoc, tp1);
+					break;
+				}
+			}
+		}
+		if (net->RTO == 0) {
+			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+		} else {
+			to_ticks = MSEC_TO_TICKS(net->RTO);
+		}
+		if (net->flight_size) {
+			j++;
+			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
+						  sctp_timeout_handler, &net->rxt_timer);
+			if (net->window_probe) {
+				net->window_probe = 0;
+			}
+		} else {
+			if (net->window_probe) {
+				/* In window probes we must assure a timer is still running there */
+				net->window_probe = 0;
+				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
+					                    sctp_timeout_handler, &net->rxt_timer);
+				}
+			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+				                stcb, net,
+				                SCTP_FROM_SCTP_INDATA + SCTP_LOC_23);
+			}
+		}
+	}
+	if ((j == 0) &&
+	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
+	    (asoc->sent_queue_retran_cnt == 0) &&
+	    (win_probe_recovered == 0) &&
+	    (done_once == 0)) {
+		/* huh, this should not happen unless all packets
+		 * are PR-SCTP and marked to skip of course.
+		 */
+		if (sctp_fs_audit(asoc)) {
+			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+				net->flight_size = 0;
+			}
+			asoc->total_flight = 0;
+			asoc->total_flight_count = 0;
+			asoc->sent_queue_retran_cnt = 0;
+			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+					sctp_flight_size_increase(tp1);
+					sctp_total_flight_increase(stcb, tp1);
+				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+				}
+			}
+		}
+		done_once = 1;
+		goto again;
+	}
+	/**********************************/
+	/* Now what about shutdown issues */
+	/**********************************/
+	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
+		/* nothing left on sendqueue.. consider done */
+		/* clean up */
+		if ((asoc->stream_queue_cnt == 1) &&
+		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+		     (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
+		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
+			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+		}
+		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
+		    (asoc->stream_queue_cnt == 0)) {
+			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+				/* Need to abort here */
+				struct mbuf *op_err;
+
+			abort_out_now:
+				*abort_now = 1;
+				/* XXX */
+				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
+				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
+				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+				return;
+			} else {
+				struct sctp_nets *netp;
+
+				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+				}
+				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+				sctp_stop_timers_for_shutdown(stcb);
+				if (asoc->alternate) {
+					netp = asoc->alternate;
+				} else {
+					netp = asoc->primary_destination;
+				}
+				sctp_send_shutdown(stcb, netp);
+				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+						 stcb->sctp_ep, stcb, netp);
+				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+						 stcb->sctp_ep, stcb, netp);
+			}
+		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
+			   (asoc->stream_queue_cnt == 0)) {
+			struct sctp_nets *netp;
+
+			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+				goto abort_out_now;
+			}
+			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
+			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+			sctp_stop_timers_for_shutdown(stcb);
+			if (asoc->alternate) {
+				netp = asoc->alternate;
+			} else {
+				netp = asoc->primary_destination;
+			}
+			sctp_send_shutdown_ack(stcb, netp);
+			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
+					 stcb->sctp_ep, stcb, netp);
+		}
+	}
+	/*********************************************/
+	/* Here we perform PR-SCTP procedures        */
+	/* (section 4.2)                             */
+	/*********************************************/
+	/* C1. update advancedPeerAckPoint */
+	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
+		asoc->advanced_peer_ack_point = cumack;
+	}
+	/* PR-Sctp issues need to be addressed too */
+	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
+		struct sctp_tmit_chunk *lchk;
+		uint32_t old_adv_peer_ack_point;
+
+		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
+		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
+		/* C3. See if we need to send a Fwd-TSN */
+		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
+			/*
+			 * ISSUE with ECN, see FWD-TSN processing.
+			 */
+			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
+				send_forward_tsn(stcb, asoc);
+			} else if (lchk) {
+				/* try to FR fwd-tsn's that get lost too */
+				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
+					send_forward_tsn(stcb, asoc);
+				}
+			}
+		}
+		if (lchk) {
+			/* Assure a timer is up */
+			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+					 stcb->sctp_ep, stcb, lchk->whoTo);
+		}
+	}
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
+		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
+			       rwnd,
+			       stcb->asoc.peers_rwnd,
+			       stcb->asoc.total_flight,
+			       stcb->asoc.total_output_queue_size);
+	}
+}
+
+void
+sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
+                 struct sctp_tcb *stcb,
+                 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
+                 int *abort_now, uint8_t flags,
+                 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
+{
+	struct sctp_association *asoc;
+	struct sctp_tmit_chunk *tp1, *tp2;
+	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
+	uint16_t wake_him = 0;
+	uint32_t send_s = 0;
+	long j;
+	int accum_moved = 0;
+	int will_exit_fast_recovery = 0;
+	uint32_t a_rwnd, old_rwnd;
+	int win_probe_recovery = 0;
+	int win_probe_recovered = 0;
+	struct sctp_nets *net = NULL;
+	int done_once;
+	int rto_ok = 1;
+	uint8_t reneged_all = 0;
+	uint8_t cmt_dac_flag;
+	/*
+	 * we take any chance we can to service our queues since we cannot
+	 * get awoken when the socket is read from :<
+	 */
+	/*
+	 * Now perform the actual SACK handling: 1) Verify that it is not an
+	 * old sack, if so discard. 2) If there is nothing left in the send
+	 * queue (cum-ack is equal to last acked) then you have a duplicate
+	 * too, update any rwnd change and verify no timers are running.
+	 * then return. 3) Process any new consequtive data i.e. cum-ack
+	 * moved process these first and note that it moved. 4) Process any
+	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
+	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
+	 * sync up flightsizes and things, stop all timers and also check
+	 * for shutdown_pending state. If so then go ahead and send off the
+	 * shutdown. If in shutdown recv, send off the shutdown-ack and
+	 * start that timer, Ret. 9) Strike any non-acked things and do FR
+	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
+	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
+	 * if in shutdown_recv state.
+	 */
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	/* CMT DAC algo */
+	this_sack_lowest_newack = 0;
+	SCTP_STAT_INCR(sctps_slowpath_sack);
+	last_tsn = cum_ack;
+	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
+#ifdef SCTP_ASOCLOG_OF_TSNS
+	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
+	stcb->asoc.cumack_log_at++;
+	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
+		stcb->asoc.cumack_log_at = 0;
+	}
+#endif
+	a_rwnd = rwnd;
+
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
+		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
+		               rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
+	}
+
+	old_rwnd = stcb->asoc.peers_rwnd;
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+		               stcb->asoc.overall_error_count,
+		               0,
+		               SCTP_FROM_SCTP_INDATA,
+		               __LINE__);
+	}
+	stcb->asoc.overall_error_count = 0;
+	asoc = &stcb->asoc;
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+		sctp_log_sack(asoc->last_acked_seq,
+		              cum_ack,
+		              0,
+		              num_seg,
+		              num_dup,
+		              SCTP_LOG_NEW_SACK);
+	}
+	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
+		uint16_t i;
+		uint32_t *dupdata, dblock;
+
+		for (i = 0; i < num_dup; i++) {
+			dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
+			                                    sizeof(uint32_t), (uint8_t *)&dblock);
+			if (dupdata == NULL) {
+				break;
+			}
+			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
+		}
+	}
+	/* reality check */
+	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
+		tp1 = TAILQ_LAST(&asoc->sent_queue,
+				 sctpchunk_listhead);
+		send_s = tp1->rec.data.tsn + 1;
+	} else {
+		tp1 = NULL;
+		send_s = asoc->sending_seq;
+	}
+	if (SCTP_TSN_GE(cum_ack, send_s)) {
+		struct mbuf *op_err;
+		char msg[SCTP_DIAG_INFO_LEN];
+
+		/*
+		 * no way, we have not even sent this TSN out yet.
+		 * Peer is hopelessly messed up with us.
+		 */
+		SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
+			    cum_ack, send_s);
+		if (tp1) {
+			SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
+				    tp1->rec.data.tsn, (void *)tp1);
+		}
+	hopeless_peer:
+		*abort_now = 1;
+		/* XXX */
+		snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
+			 cum_ack, send_s);
+		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
+		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+		return;
+	}
+	/**********************/
+	/* 1) check the range */
+	/**********************/
+	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
+		/* acking something behind */
+		return;
+	}
+
+	/* update the Rwnd of the peer */
+	if (TAILQ_EMPTY(&asoc->sent_queue) &&
+	    TAILQ_EMPTY(&asoc->send_queue) &&
+	    (asoc->stream_queue_cnt == 0)) {
+		/* nothing left on send/sent and strmq */
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+			                  asoc->peers_rwnd, 0, 0, a_rwnd);
+		}
+		asoc->peers_rwnd = a_rwnd;
+		if (asoc->sent_queue_retran_cnt) {
+			asoc->sent_queue_retran_cnt = 0;
+		}
+		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+			/* SWS sender side engages */
+			asoc->peers_rwnd = 0;
+		}
+		/* stop any timers */
+		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+			                stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
+			net->partial_bytes_acked = 0;
+			net->flight_size = 0;
+		}
+		asoc->total_flight = 0;
+		asoc->total_flight_count = 0;
+		return;
+	}
+	/*
+	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
+	 * things. The total byte count acked is tracked in netAckSz AND
+	 * netAck2 is used to track the total bytes acked that are un-
+	 * amibguious and were never retransmitted. We track these on a per
+	 * destination address basis.
+	 */
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
+			/* Drag along the window_tsn for cwr's */
+			net->cwr_window_tsn = cum_ack;
+		}
+		net->prev_cwnd = net->cwnd;
+		net->net_ack = 0;
+		net->net_ack2 = 0;
+
+		/*
+		 * CMT: Reset CUC and Fast recovery algo variables before
+		 * SACK processing
+		 */
+		net->new_pseudo_cumack = 0;
+		net->will_exit_fast_recovery = 0;
+		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
+			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack)(stcb, net);
+		}
+	}
+	/* process the new consecutive TSN first */
+	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
+			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
+				accum_moved = 1;
+				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
+					/*
+					 * If it is less than ACKED, it is
+					 * now no-longer in flight. Higher
+					 * values may occur during marking
+					 */
+					if ((tp1->whoTo->dest_state &
+					     SCTP_ADDR_UNCONFIRMED) &&
+					    (tp1->snd_count < 2)) {
+						/*
+						 * If there was no retran
+						 * and the address is
+						 * un-confirmed and we sent
+						 * there and are now
+						 * sacked.. its confirmed,
+						 * mark it so.
+						 */
+						tp1->whoTo->dest_state &=
+							~SCTP_ADDR_UNCONFIRMED;
+					}
+					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
+							               tp1->whoTo->flight_size,
+							               tp1->book_size,
+							               (uint32_t)(uintptr_t)tp1->whoTo,
+							               tp1->rec.data.tsn);
+						}
+						sctp_flight_size_decrease(tp1);
+						sctp_total_flight_decrease(stcb, tp1);
+						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
+							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged)(tp1->whoTo,
+														     tp1);
+						}
+					}
+					tp1->whoTo->net_ack += tp1->send_size;
+
+					/* CMT SFR and DAC algos */
+					this_sack_lowest_newack = tp1->rec.data.tsn;
+					tp1->whoTo->saw_newack = 1;
+
+					if (tp1->snd_count < 2) {
+						/*
+						 * True non-retransmited
+						 * chunk
+						 */
+						tp1->whoTo->net_ack2 +=
+							tp1->send_size;
+
+						/* update RTO too? */
+						if (tp1->do_rtt) {
+							if (rto_ok) {
+								tp1->whoTo->RTO =
+									sctp_calculate_rto(stcb,
+											   asoc, tp1->whoTo,
+											   &tp1->sent_rcv_time,
+											   sctp_align_safe_nocopy,
+											   SCTP_RTT_FROM_DATA);
+								rto_ok = 0;
+							}
+							if (tp1->whoTo->rto_needed == 0) {
+								tp1->whoTo->rto_needed = 1;
+							}
+							tp1->do_rtt = 0;
+						}
+					}
+					/*
+					 * CMT: CUCv2 algorithm. From the
+					 * cumack'd TSNs, for each TSN being
+					 * acked for the first time, set the
+					 * following variables for the
+					 * corresp destination.
+					 * new_pseudo_cumack will trigger a
+					 * cwnd update.
+					 * find_(rtx_)pseudo_cumack will
+					 * trigger search for the next
+					 * expected (rtx-)pseudo-cumack.
+					 */
+					tp1->whoTo->new_pseudo_cumack = 1;
+					tp1->whoTo->find_pseudo_cumack = 1;
+					tp1->whoTo->find_rtx_pseudo_cumack = 1;
+
+
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+						sctp_log_sack(asoc->last_acked_seq,
+						              cum_ack,
+						              tp1->rec.data.tsn,
+						              0,
+						              0,
+						              SCTP_LOG_TSN_ACKED);
+					}
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
+					}
+				}
+				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+#ifdef SCTP_AUDITING_ENABLED
+					sctp_audit_log(0xB3,
+					               (asoc->sent_queue_retran_cnt & 0x000000ff));
+#endif
+				}
+				if (tp1->rec.data.chunk_was_revoked) {
+					/* deflate the cwnd */
+					tp1->whoTo->cwnd -= tp1->book_size;
+					tp1->rec.data.chunk_was_revoked = 0;
+				}
+				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
+					tp1->sent = SCTP_DATAGRAM_ACKED;
+				}
+			}
+		} else {
+			break;
+		}
+	}
+	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
+	/* always set this up to cum-ack */
+	asoc->this_sack_highest_gap = last_tsn;
+
+	if ((num_seg > 0) || (num_nr_seg > 0)) {
+
+		/*
+		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
+		 * to be greater than the cumack. Also reset saw_newack to 0
+		 * for all dests.
+		 */
+		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+			net->saw_newack = 0;
+			net->this_sack_highest_newack = last_tsn;
+		}
+
+		/*
+		 * thisSackHighestGap will increase while handling NEW
+		 * segments this_sack_highest_newack will increase while
+		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
+		 * used for CMT DAC algo. saw_newack will also change.
+		 */
+		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
+			&biggest_tsn_newly_acked, &this_sack_lowest_newack,
+			num_seg, num_nr_seg, &rto_ok)) {
+			wake_him++;
+		}
+		/*
+		 * validate the biggest_tsn_acked in the gap acks if
+		 * strict adherence is wanted.
+		 */
+		if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
+			/*
+			 * peer is either confused or we are under
+			 * attack. We must abort.
+			 */
+			SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
+				    biggest_tsn_acked, send_s);
+			goto hopeless_peer;
+		}
+	}
+	/*******************************************/
+	/* cancel ALL T3-send timer if accum moved */
+	/*******************************************/
+	if (asoc->sctp_cmt_on_off > 0) {
+		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+			if (net->new_pseudo_cumack)
+				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+				                stcb, net,
+				                SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
+
+		}
+	} else {
+		if (accum_moved) {
+			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+				                stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
+			}
+		}
+	}
+	/********************************************/
+	/* drop the acked chunks from the sentqueue */
+	/********************************************/
+	asoc->last_acked_seq = cum_ack;
+
+	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
+		if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
+			break;
+		}
+		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
+			if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
+				asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
+#ifdef INVARIANTS
+			} else {
+				panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
+#endif
+			}
+		}
+		if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
+		    (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
+		    TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
+			asoc->trigger_reset = 1;
+		}
+		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
+		if (PR_SCTP_ENABLED(tp1->flags)) {
+			if (asoc->pr_sctp_cnt != 0)
+				asoc->pr_sctp_cnt--;
+		}
+		asoc->sent_queue_cnt--;
+		if (tp1->data) {
+			/* sa_ignore NO_NULL_CHK */
+			sctp_free_bufspace(stcb, asoc, tp1, 1);
+			sctp_m_freem(tp1->data);
+			tp1->data = NULL;
+			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
+				asoc->sent_queue_cnt_removeable--;
+			}
+		}
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
+			sctp_log_sack(asoc->last_acked_seq,
+			              cum_ack,
+			              tp1->rec.data.tsn,
+			              0,
+			              0,
+			              SCTP_LOG_FREE_SENT);
+		}
+		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
+		wake_him++;
+	}
+	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
+#ifdef INVARIANTS
+		panic("Warning flight size is positive and should be 0");
+#else
+		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
+		            asoc->total_flight);
+#endif
+		asoc->total_flight = 0;
+	}
+
+#if defined(__Userspace__)
+	if (stcb->sctp_ep->recv_callback) {
+		if (stcb->sctp_socket) {
+			uint32_t inqueue_bytes, sb_free_now;
+			struct sctp_inpcb *inp;
+
+			inp = stcb->sctp_ep;
+			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
+			sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
+
+			/* check if the amount free in the send socket buffer crossed the threshold */
+			if (inp->send_callback &&
+			   (((inp->send_sb_threshold > 0) && (sb_free_now >= inp->send_sb_threshold)) ||
+			    (inp->send_sb_threshold == 0))) {
+				atomic_add_int(&stcb->asoc.refcnt, 1);
+				SCTP_TCB_UNLOCK(stcb);
+				inp->send_callback(stcb->sctp_socket, sb_free_now);
+				SCTP_TCB_LOCK(stcb);
+				atomic_subtract_int(&stcb->asoc.refcnt, 1);
+			}
+		}
+	} else if ((wake_him) && (stcb->sctp_socket)) {
+#else
+	/* sa_ignore NO_NULL_CHK */
+	if ((wake_him) && (stcb->sctp_socket)) {
+#endif
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		struct socket *so;
+
+#endif
+		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
+		}
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		so = SCTP_INP_SO(stcb->sctp_ep);
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_SOCKET_LOCK(so, 1);
+		SCTP_TCB_LOCK(stcb);
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+			/* assoc was freed while we were unlocked */
+			SCTP_SOCKET_UNLOCK(so, 1);
+			return;
+		}
+#endif
+		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+	} else {
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
+			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
+		}
+	}
+
+	if (asoc->fast_retran_loss_recovery && accum_moved) {
+		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
+			/* Setup so we will exit RFC2582 fast recovery */
+			will_exit_fast_recovery = 1;
+		}
+	}
+	/*
+	 * Check for revoked fragments:
+	 *
+	 * if Previous sack - Had no frags then we can't have any revoked if
+	 * Previous sack - Had frag's then - If we now have frags aka
+	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
+	 * some of them. else - The peer revoked all ACKED fragments, since
+	 * we had some before and now we have NONE.
+	 */
+
+	if (num_seg) {
+		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
+		asoc->saw_sack_with_frags = 1;
+	} else if (asoc->saw_sack_with_frags) {
+		int cnt_revoked = 0;
+
+		/* Peer revoked all dg's marked or acked */
+		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
+				tp1->sent = SCTP_DATAGRAM_SENT;
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
+					               tp1->whoTo->flight_size,
+					               tp1->book_size,
+					               (uint32_t)(uintptr_t)tp1->whoTo,
+					               tp1->rec.data.tsn);
+				}
+				sctp_flight_size_increase(tp1);
+				sctp_total_flight_increase(stcb, tp1);
+				tp1->rec.data.chunk_was_revoked = 1;
+				/*
+				 * To ensure that this increase in
+				 * flightsize, which is artificial,
+				 * does not throttle the sender, we
+				 * also increase the cwnd
+				 * artificially.
+				 */
+				tp1->whoTo->cwnd += tp1->book_size;
+				cnt_revoked++;
+			}
+		}
+		if (cnt_revoked) {
+			reneged_all = 1;
+		}
+		asoc->saw_sack_with_frags = 0;
+	}
+	if (num_nr_seg > 0)
+		asoc->saw_sack_with_nr_frags = 1;
+	else
+		asoc->saw_sack_with_nr_frags = 0;
+
+	/* JRS - Use the congestion control given in the CC module */
+	if (ecne_seen == 0) {
+		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+			if (net->net_ack2 > 0) {
+				/*
+				 * Karn's rule applies to clearing error count, this
+				 * is optional.
+				 */
+				net->error_count = 0;
+				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
+					/* addr came good */
+					net->dest_state |= SCTP_ADDR_REACHABLE;
+					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
+					                0, (void *)net, SCTP_SO_NOT_LOCKED);
+				}
+
+				if (net == stcb->asoc.primary_destination) {
+					if (stcb->asoc.alternate) {
+						/* release the alternate, primary is good */
+						sctp_free_remote_addr(stcb->asoc.alternate);
+						stcb->asoc.alternate = NULL;
+					}
+				}
+
+				if (net->dest_state & SCTP_ADDR_PF) {
+					net->dest_state &= ~SCTP_ADDR_PF;
+					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
+					                stcb->sctp_ep, stcb, net,
+					                SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
+					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
+					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
+					/* Done with this net */
+					net->net_ack = 0;
+				}
+				/* restore any doubled timers */
+				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
+				if (net->RTO < stcb->asoc.minrto) {
+					net->RTO = stcb->asoc.minrto;
+				}
+				if (net->RTO > stcb->asoc.maxrto) {
+					net->RTO = stcb->asoc.maxrto;
+				}
+			}
+		}
+		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
+	}
+
+	if (TAILQ_EMPTY(&asoc->sent_queue)) {
+		/* nothing left in-flight */
+		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+			/* stop all timers */
+			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+			                stcb, net,
+			                SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
+			net->flight_size = 0;
+			net->partial_bytes_acked = 0;
+		}
+		asoc->total_flight = 0;
+		asoc->total_flight_count = 0;
+	}
+
+	/**********************************/
+	/* Now what about shutdown issues */
+	/**********************************/
+	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
+		/* nothing left on sendqueue.. consider done */
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+			                  asoc->peers_rwnd, 0, 0, a_rwnd);
+		}
+		asoc->peers_rwnd = a_rwnd;
+		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+			/* SWS sender side engages */
+			asoc->peers_rwnd = 0;
+		}
+		/* clean up */
+		if ((asoc->stream_queue_cnt == 1) &&
+		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+		     (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
+		    ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc))) {
+			asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+		}
+		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
+		    (asoc->stream_queue_cnt == 0)) {
+			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+				/* Need to abort here */
+				struct mbuf *op_err;
+
+			abort_out_now:
+				*abort_now = 1;
+				/* XXX */
+				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
+				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
+				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+				return;
+			} else {
+				struct sctp_nets *netp;
+
+				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+				}
+				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+				sctp_stop_timers_for_shutdown(stcb);
+				if (asoc->alternate) {
+					netp = asoc->alternate;
+				} else {
+					netp = asoc->primary_destination;
+				}
+				sctp_send_shutdown(stcb, netp);
+				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+				                 stcb->sctp_ep, stcb, netp);
+				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+				                 stcb->sctp_ep, stcb, netp);
+			}
+			return;
+		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
+			   (asoc->stream_queue_cnt == 0)) {
+			struct sctp_nets *netp;
+
+			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
+				goto abort_out_now;
+			}
+			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
+			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+			sctp_stop_timers_for_shutdown(stcb);
+			if (asoc->alternate) {
+				netp = asoc->alternate;
+			} else {
+				netp = asoc->primary_destination;
+			}
+			sctp_send_shutdown_ack(stcb, netp);
+			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
+			                 stcb->sctp_ep, stcb, netp);
+			return;
+		}
+	}
+	/*
+	 * Now here we are going to recycle net_ack for a different use...
+	 * HEADS UP.
+	 */
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+		net->net_ack = 0;
+	}
+
+	/*
+	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
+	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
+	 * automatically ensure that.
+	 */
+	if ((asoc->sctp_cmt_on_off > 0) &&
+	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
+	    (cmt_dac_flag == 0)) {
+		this_sack_lowest_newack = cum_ack;
+	}
+	if ((num_seg > 0) || (num_nr_seg > 0)) {
+		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
+		                           biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
+	}
+	/* JRS - Use the congestion control given in the CC module */
+	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
+
+	/* Now are we exiting loss recovery ? */
+	if (will_exit_fast_recovery) {
+		/* Ok, we must exit fast recovery */
+		asoc->fast_retran_loss_recovery = 0;
+	}
+	if ((asoc->sat_t3_loss_recovery) &&
+	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
+		/* end satellite t3 loss recovery */
+		asoc->sat_t3_loss_recovery = 0;
+	}
+	/*
+	 * CMT Fast recovery
+	 */
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+		if (net->will_exit_fast_recovery) {
+			/* Ok, we must exit fast recovery */
+			net->fast_retran_loss_recovery = 0;
+		}
+	}
+
+	/* Adjust and set the new rwnd value */
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
+		                  asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
+	}
+	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
+	                                    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
+	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+		/* SWS sender side engages */
+		asoc->peers_rwnd = 0;
+	}
+	if (asoc->peers_rwnd > old_rwnd) {
+		win_probe_recovery = 1;
+	}
+
+	/*
+	 * Now we must setup so we have a timer up for anyone with
+	 * outstanding data.
+	 */
+	done_once = 0;
+again:
+	j = 0;
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+		if (win_probe_recovery && (net->window_probe)) {
+			win_probe_recovered = 1;
+			/*-
+			 * Find first chunk that was used with
+			 * window probe and clear the event. Put
+			 * it back into the send queue as if has
+			 * not been sent.
+			 */
+			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+				if (tp1->window_probe) {
+					sctp_window_probe_recovery(stcb, asoc, tp1);
+					break;
+				}
+			}
+		}
+		if (net->flight_size) {
+			j++;
+			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+				                 stcb->sctp_ep, stcb, net);
+			}
+			if (net->window_probe) {
+				net->window_probe = 0;
+			}
+		} else {
+			if (net->window_probe) {
+				/* In window probes we must assure a timer is still running there */
+				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+					                 stcb->sctp_ep, stcb, net);
+
+				}
+			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+				                stcb, net,
+				                SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
+			}
+		}
+	}
+	if ((j == 0) &&
+	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
+	    (asoc->sent_queue_retran_cnt == 0) &&
+	    (win_probe_recovered == 0) &&
+	    (done_once == 0)) {
+		/* huh, this should not happen unless all packets
+		 * are PR-SCTP and marked to skip of course.
+		 */
+		if (sctp_fs_audit(asoc)) {
+			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+				net->flight_size = 0;
+			}
+			asoc->total_flight = 0;
+			asoc->total_flight_count = 0;
+			asoc->sent_queue_retran_cnt = 0;
+			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
+				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+					sctp_flight_size_increase(tp1);
+					sctp_total_flight_increase(stcb, tp1);
+				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
+					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+				}
+			}
+		}
+		done_once = 1;
+		goto again;
+	}
+	/*********************************************/
+	/* Here we perform PR-SCTP procedures        */
+	/* (section 4.2)                             */
+	/*********************************************/
+	/* C1. update advancedPeerAckPoint */
+	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
+		asoc->advanced_peer_ack_point = cum_ack;
+	}
+	/* C2. try to further move advancedPeerAckPoint ahead */
+	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
+		struct sctp_tmit_chunk *lchk;
+		uint32_t old_adv_peer_ack_point;
+
+		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
+		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
+		/* C3. See if we need to send a Fwd-TSN */
+		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
+			/*
+			 * ISSUE with ECN, see FWD-TSN processing.
+			 */
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
+				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
+				               0xee, cum_ack, asoc->advanced_peer_ack_point,
+				               old_adv_peer_ack_point);
+			}
+			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
+				send_forward_tsn(stcb, asoc);
+			} else if (lchk) {
+				/* try to FR fwd-tsn's that get lost too */
+				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
+					send_forward_tsn(stcb, asoc);
+				}
+			}
+		}
+		if (lchk) {
+			/* Assure a timer is up */
+			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
+			                 stcb->sctp_ep, stcb, lchk->whoTo);
+		}
+	}
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
+		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
+		               a_rwnd,
+		               stcb->asoc.peers_rwnd,
+		               stcb->asoc.total_flight,
+		               stcb->asoc.total_output_queue_size);
+	}
+}
+
+void
+sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
+{
+	/* Copy cum-ack */
+	uint32_t cum_ack, a_rwnd;
+
+	cum_ack = ntohl(cp->cumulative_tsn_ack);
+	/* Arrange so a_rwnd does NOT change */
+	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
+
+	/* Now call the express sack handling */
+	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
+}
+
+static void
+sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
+			       struct sctp_stream_in *strmin)
+{
+	struct sctp_queued_to_read *ctl, *nctl;
+	struct sctp_association *asoc;
+	uint32_t mid;
+	int need_reasm_check = 0;
+
+	asoc = &stcb->asoc;
+	mid = strmin->last_mid_delivered;
+	/*
+	 * First deliver anything prior to and including the stream no that
+	 * came in.
+	 */
+	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
+		if (SCTP_MID_GE(asoc->idata_supported, mid, ctl->mid)) {
+			/* this is deliverable now */
+			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG)  == SCTP_DATA_NOT_FRAG) {
+				if (ctl->on_strm_q) {
+					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
+						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
+					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
+						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
+#ifdef INVARIANTS
+					} else {
+						panic("strmin: %p ctl: %p unknown %d",
+						      strmin, ctl, ctl->on_strm_q);
+#endif
+					}
+					ctl->on_strm_q = 0;
+				}
+				/* subtract pending on streams */
+				asoc->size_on_all_streams -= ctl->length;
+				sctp_ucount_decr(asoc->cnt_on_all_streams);
+				/* deliver it to at least the delivery-q */
+				if (stcb->sctp_socket) {
+					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
+					sctp_add_to_readq(stcb->sctp_ep, stcb,
+							  ctl,
+							  &stcb->sctp_socket->so_rcv,
+							  1, SCTP_READ_LOCK_HELD,
+							  SCTP_SO_NOT_LOCKED);
+				}
+			} else {
+				/* Its a fragmented message */
+				if (ctl->first_frag_seen) {
+					/* Make it so this is next to deliver, we restore later */
+					strmin->last_mid_delivered = ctl->mid - 1;
+					need_reasm_check = 1;
+					break;
+				}
+			}
+		} else {
+			/* no more delivery now. */
+			break;
+		}
+	}
+	if (need_reasm_check) {
+		int ret;
+		ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
+		if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
+			/* Restore the next to deliver unless we are ahead */
+			strmin->last_mid_delivered = mid;
+		}
+		if (ret == 0) {
+			/* Left the front Partial one on */
+			return;
+		}
+		need_reasm_check = 0;
+	}
+	/*
+	 * now we must deliver things in queue the normal way  if any are
+	 * now ready.
+	 */
+	mid = strmin->last_mid_delivered + 1;
+	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next_instrm, nctl) {
+		if (SCTP_MID_EQ(asoc->idata_supported, mid, ctl->mid)) {
+			if (((ctl->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
+				/* this is deliverable now */
+				if (ctl->on_strm_q) {
+					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
+						TAILQ_REMOVE(&strmin->inqueue, ctl, next_instrm);
+					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
+						TAILQ_REMOVE(&strmin->uno_inqueue, ctl, next_instrm);
+#ifdef INVARIANTS
+					} else {
+						panic("strmin: %p ctl: %p unknown %d",
+						      strmin, ctl, ctl->on_strm_q);
+#endif
+					}
+					ctl->on_strm_q = 0;
+				}
+				/* subtract pending on streams */
+				asoc->size_on_all_streams -= ctl->length;
+				sctp_ucount_decr(asoc->cnt_on_all_streams);
+				/* deliver it to at least the delivery-q */
+				strmin->last_mid_delivered = ctl->mid;
+				if (stcb->sctp_socket) {
+					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
+					sctp_add_to_readq(stcb->sctp_ep, stcb,
+							  ctl,
+							  &stcb->sctp_socket->so_rcv, 1,
+							  SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
+
+				}
+				mid = strmin->last_mid_delivered + 1;
+			} else {
+				/* Its a fragmented message */
+				if (ctl->first_frag_seen) {
+					/* Make it so this is next to deliver */
+					strmin->last_mid_delivered = ctl->mid - 1;
+					need_reasm_check = 1;
+					break;
+				}
+			}
+		} else {
+			break;
+		}
+	}
+	if (need_reasm_check) {
+		(void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
+	}
+}
+
+
+
+static void
+sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
+	struct sctp_association *asoc,
+	uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn)
+{
+	struct sctp_queued_to_read *control;
+	struct sctp_stream_in *strm;
+	struct sctp_tmit_chunk *chk, *nchk;
+	int cnt_removed=0;
+
+	/*
+	 * For now large messages held on the stream reasm that are
+	 * complete will be tossed too. We could in theory do more
+	 * work to spin through and stop after dumping one msg aka
+	 * seeing the start of a new msg at the head, and call the
+	 * delivery function... to see if it can be delivered... But
+	 * for now we just dump everything on the queue.
+	 */
+	strm = &asoc->strmin[stream];
+	control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported);
+	if (control == NULL) {
+		/* Not found */
+		return;
+	}
+	if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) {
+		return;
+	}
+	TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
+		/* Purge hanging chunks */
+		if (!asoc->idata_supported && (ordered == 0)) {
+			if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
+				break;
+			}
+		}
+		cnt_removed++;
+		TAILQ_REMOVE(&control->reasm, chk, sctp_next);
+		asoc->size_on_reasm_queue -= chk->send_size;
+		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+		if (chk->data) {
+			sctp_m_freem(chk->data);
+			chk->data = NULL;
+		}
+		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+	}
+	if (!TAILQ_EMPTY(&control->reasm)) {
+		/* This has to be old data, unordered */
+		if (control->data) {
+			sctp_m_freem(control->data);
+			control->data = NULL;
+		}
+		sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
+		chk = TAILQ_FIRST(&control->reasm);
+		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
+			TAILQ_REMOVE(&control->reasm, chk, sctp_next);
+			sctp_add_chk_to_control(control, strm, stcb, asoc,
+						chk, SCTP_READ_LOCK_HELD);
+		}
+		sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
+		return;
+	}
+	if (control->on_strm_q == SCTP_ON_ORDERED) {
+		TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
+		control->on_strm_q = 0;
+	} else if (control->on_strm_q == SCTP_ON_UNORDERED) {
+		TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
+		control->on_strm_q = 0;
+#ifdef INVARIANTS
+	} else if (control->on_strm_q) {
+		panic("strm: %p ctl: %p unknown %d",
+		    strm, control, control->on_strm_q);
+#endif
+	}
+	control->on_strm_q = 0;
+	if (control->on_read_q == 0) {
+		sctp_free_remote_addr(control->whoFrom);
+		if (control->data) {
+			sctp_m_freem(control->data);
+			control->data = NULL;
+		}
+		sctp_free_a_readq(stcb, control);
+	}
+}
+
+void
+sctp_handle_forward_tsn(struct sctp_tcb *stcb,
+                        struct sctp_forward_tsn_chunk *fwd,
+                        int *abort_flag, struct mbuf *m , int offset)
+{
+	/* The pr-sctp fwd tsn */
+	/*
+	 * here we will perform all the data receiver side steps for
+	 * processing FwdTSN, as required in by pr-sctp draft:
+	 *
+	 * Assume we get FwdTSN(x):
+	 *
+	 * 1) update local cumTSN to x 
+	 * 2) try to further advance cumTSN to x + others we have 
+	 * 3) examine and update re-ordering queue on pr-in-streams 
+	 * 4) clean up re-assembly queue 
+	 * 5) Send a sack to report where we are.
+	 */
+	struct sctp_association *asoc;
+	uint32_t new_cum_tsn, gap;
+	unsigned int i, fwd_sz, m_size;
+	uint32_t str_seq;
+	struct sctp_stream_in *strm;
+	struct sctp_queued_to_read *ctl, *sv;
+
+	asoc = &stcb->asoc;
+	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
+		SCTPDBG(SCTP_DEBUG_INDATA1,
+			"Bad size too small/big fwd-tsn\n");
+		return;
+	}
+	m_size = (stcb->asoc.mapping_array_size << 3);
+	/*************************************************************/
+	/* 1. Here we update local cumTSN and shift the bitmap array */
+	/*************************************************************/
+	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
+
+	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
+		/* Already got there ... */
+		return;
+	}
+	/*
+	 * now we know the new TSN is more advanced, let's find the actual
+	 * gap
+	 */
+	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
+	asoc->cumulative_tsn = new_cum_tsn;
+	if (gap >= m_size) {
+		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
+			struct mbuf *op_err;
+			char msg[SCTP_DIAG_INFO_LEN];
+
+			/*
+			 * out of range (of single byte chunks in the rwnd I
+			 * give out). This must be an attacker.
+			 */
+			*abort_flag = 1;
+			snprintf(msg, sizeof(msg),
+			         "New cum ack %8.8x too high, highest TSN %8.8x",
+			         new_cum_tsn, asoc->highest_tsn_inside_map);
+			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
+			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
+			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+			return;
+		}
+		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
+
+		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
+		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
+		asoc->highest_tsn_inside_map = new_cum_tsn;
+
+		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
+		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
+
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+		}
+	} else {
+		SCTP_TCB_LOCK_ASSERT(stcb);
+		for (i = 0; i <= gap; i++) {
+			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
+			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
+				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
+				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
+					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
+				}
+			}
+		}
+	}
+	/*************************************************************/
+	/* 2. Clear up re-assembly queue                             */
+	/*************************************************************/
+
+	/* This is now done as part of clearing up the stream/seq */
+	if (asoc->idata_supported == 0) {
+		uint16_t sid;
+		/* Flush all the un-ordered data based on cum-tsn */
+		SCTP_INP_READ_LOCK(stcb->sctp_ep);
+		for (sid = 0 ; sid < asoc->streamincnt; sid++) {
+			sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn);
+		}
+		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+	}
+	/*******************************************************/
+	/* 3. Update the PR-stream re-ordering queues and fix  */
+	/*    delivery issues as needed.                       */
+	/*******************************************************/
+	fwd_sz -= sizeof(*fwd);
+	if (m && fwd_sz) {
+		/* New method. */
+		unsigned int num_str;
+		uint32_t mid, cur_mid;
+		uint16_t sid;
+		uint16_t ordered, flags;
+		struct sctp_strseq *stseq, strseqbuf;
+		struct sctp_strseq_mid *stseq_m, strseqbuf_m;
+		offset += sizeof(*fwd);
+
+		SCTP_INP_READ_LOCK(stcb->sctp_ep);
+		if (asoc->idata_supported) {
+			num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
+		} else {
+			num_str = fwd_sz / sizeof(struct sctp_strseq);
+		}
+		for (i = 0; i < num_str; i++) {
+			if (asoc->idata_supported) {
+				stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
+									    sizeof(struct sctp_strseq_mid),
+									    (uint8_t *)&strseqbuf_m);
+				offset += sizeof(struct sctp_strseq_mid);
+				if (stseq_m == NULL) {
+					break;
+				}
+				sid = ntohs(stseq_m->sid);
+				mid = ntohl(stseq_m->mid);
+				flags = ntohs(stseq_m->flags);
+				if (flags & PR_SCTP_UNORDERED_FLAG) {
+					ordered = 0;
+				} else {
+					ordered = 1;
+				}
+			} else {
+				stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
+									    sizeof(struct sctp_strseq),
+									    (uint8_t *)&strseqbuf);
+				offset += sizeof(struct sctp_strseq);
+				if (stseq == NULL) {
+					break;
+				}
+				sid = ntohs(stseq->sid);
+				mid = (uint32_t)ntohs(stseq->ssn);
+				ordered = 1;
+			}
+			/* Convert */
+
+			/* now process */
+
+			/*
+			 * Ok we now look for the stream/seq on the read queue
+			 * where its not all delivered. If we find it we transmute the
+			 * read entry into a PDI_ABORTED.
+			 */
+			if (sid >= asoc->streamincnt) {
+				/* screwed up streams, stop!  */
+				break;
+			}
+			if ((asoc->str_of_pdapi == sid) &&
+			    (asoc->ssn_of_pdapi == mid)) {
+				/* If this is the one we were partially delivering
+				 * now then we no longer are. Note this will change
+				 * with the reassembly re-write.
+				 */
+				asoc->fragmented_delivery_inprogress = 0;
+			}
+			strm = &asoc->strmin[sid];
+			for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) {
+				sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn);
+			}
+			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
+				if ((ctl->sinfo_stream == sid) &&
+				    (SCTP_MID_EQ(asoc->idata_supported, ctl->mid, mid))) {
+					str_seq = (sid << 16) | (0x0000ffff & mid);
+					ctl->pdapi_aborted = 1;
+					sv = stcb->asoc.control_pdapi;
+					ctl->end_added = 1;
+					if (ctl->on_strm_q == SCTP_ON_ORDERED) {
+						TAILQ_REMOVE(&strm->inqueue, ctl, next_instrm);
+					} else if (ctl->on_strm_q == SCTP_ON_UNORDERED) {
+						TAILQ_REMOVE(&strm->uno_inqueue, ctl, next_instrm);
+#ifdef INVARIANTS
+					} else if (ctl->on_strm_q) {
+						panic("strm: %p ctl: %p unknown %d",
+						      strm, ctl, ctl->on_strm_q);
+#endif
+					}
+					ctl->on_strm_q = 0;
+					stcb->asoc.control_pdapi = ctl;
+					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
+					                stcb,
+					                SCTP_PARTIAL_DELIVERY_ABORTED,
+					                (void *)&str_seq,
+							SCTP_SO_NOT_LOCKED);
+					stcb->asoc.control_pdapi = sv;
+					break;
+				} else if ((ctl->sinfo_stream == sid) &&
+					   SCTP_MID_GT(asoc->idata_supported, ctl->mid, mid)) {
+					/* We are past our victim SSN */
+					break;
+				}
+			}
+			if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
+				/* Update the sequence number */
+				strm->last_mid_delivered = mid;
+			}
+			/* now kick the stream the new way */
+			/*sa_ignore NO_NULL_CHK*/
+			sctp_kick_prsctp_reorder_queue(stcb, strm);
+		}
+		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+	}
+	/*
+	 * Now slide thing forward.
+	 */
+	sctp_slide_mapping_arrays(stcb);
+}
diff --git a/usrsctplib/netinet/sctp_indata.h b/usrsctplib/netinet/sctp_indata.h
new file mode 100755
index 0000000..625dd62
--- /dev/null
+++ b/usrsctplib/netinet/sctp_indata.h
@@ -0,0 +1,120 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_indata.h 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_INDATA_H_
+#define _NETINET_SCTP_INDATA_H_
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+struct sctp_queued_to_read *
+sctp_build_readq_entry(struct sctp_tcb *stcb,
+    struct sctp_nets *net,
+    uint32_t tsn, uint32_t ppid,
+    uint32_t context, uint16_t sid,
+    uint32_t mid, uint8_t flags,
+    struct mbuf *dm);
+
+
+#define sctp_build_readq_entry_mac(_ctl, in_it, context, net, tsn, ppid, sid, flags, dm, tfsn, mid) do { \
+	if (_ctl) { \
+		atomic_add_int(&((net)->ref_count), 1); \
+		memset(_ctl, 0, sizeof(struct sctp_queued_to_read)); \
+		(_ctl)->sinfo_stream = sid; \
+		TAILQ_INIT(&_ctl->reasm); \
+		(_ctl)->top_fsn = tfsn; \
+		(_ctl)->mid = mid; \
+		(_ctl)->sinfo_flags = (flags << 8); \
+		(_ctl)->sinfo_ppid = ppid; \
+		(_ctl)->sinfo_context = context; \
+		(_ctl)->fsn_included = 0xffffffff; \
+		(_ctl)->top_fsn = 0xffffffff; \
+		(_ctl)->sinfo_tsn = tsn; \
+		(_ctl)->sinfo_cumtsn = tsn; \
+		(_ctl)->sinfo_assoc_id = sctp_get_associd((in_it)); \
+		(_ctl)->whoFrom = net; \
+		(_ctl)->data = dm; \
+		(_ctl)->stcb = (in_it); \
+		(_ctl)->port_from = (in_it)->rport; \
+	} \
+} while (0)
+
+
+
+struct mbuf *
+sctp_build_ctl_nchunk(struct sctp_inpcb *inp,
+		      struct sctp_sndrcvinfo *sinfo);
+
+void sctp_set_rwnd(struct sctp_tcb *, struct sctp_association *);
+
+uint32_t
+sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc);
+
+void
+sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
+			 uint32_t rwnd, int *abort_now, int ecne_seen);
+
+void
+sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
+                 struct sctp_tcb *stcb,
+                 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
+                 int *abort_now, uint8_t flags,
+                 uint32_t cum_ack, uint32_t rwnd, int ecne_seen);
+
+/* draft-ietf-tsvwg-usctp */
+void
+sctp_handle_forward_tsn(struct sctp_tcb *,
+			struct sctp_forward_tsn_chunk *, int *, struct mbuf *, int);
+
+struct sctp_tmit_chunk *
+sctp_try_advance_peer_ack_point(struct sctp_tcb *, struct sctp_association *);
+
+void sctp_service_queues(struct sctp_tcb *, struct sctp_association *);
+
+void
+sctp_update_acked(struct sctp_tcb *, struct sctp_shutdown_chunk *, int *);
+
+int
+sctp_process_data(struct mbuf **, int, int *, int,
+		  struct sctp_inpcb *, struct sctp_tcb *,
+		  struct sctp_nets *, uint32_t *);
+
+void sctp_slide_mapping_arrays(struct sctp_tcb *stcb);
+
+void sctp_sack_check(struct sctp_tcb *, int);
+
+#endif
+#endif
diff --git a/usrsctplib/netinet/sctp_input.c b/usrsctplib/netinet/sctp_input.c
new file mode 100755
index 0000000..4e8da4e
--- /dev/null
+++ b/usrsctplib/netinet/sctp_input.c
@@ -0,0 +1,6609 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_input.h>
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_bsd_addr.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_crc32.h>
+#if defined(INET) || defined(INET6)
+#if !defined(__Userspace_os_Windows)
+#include <netinet/udp.h>
+#endif
+#endif
+#if defined(__FreeBSD__)
+#include <sys/smp.h>
+#endif
+#if defined(__Userspace__)
+#include <user_socketvar.h>
+#endif
+
+#if defined(__APPLE__)
+#define APPLE_FILE_NO 2
+#endif
+
+
+static void
+sctp_stop_all_cookie_timers(struct sctp_tcb *stcb)
+{
+	struct sctp_nets *net;
+
+	/* This now not only stops all cookie timers
+	 * it also stops any INIT timers as well. This
+	 * will make sure that the timers are stopped in
+	 * all collision cases.
+	 */
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+		if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) {
+			sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE,
+					stcb->sctp_ep,
+					stcb,
+					net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1);
+		} else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) {
+			sctp_timer_stop(SCTP_TIMER_TYPE_INIT,
+					stcb->sctp_ep,
+					stcb,
+					net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2);
+		}
+	}
+}
+
+/* INIT handler */
+static void
+sctp_handle_init(struct mbuf *m, int iphlen, int offset,
+                 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
+                 struct sctp_init_chunk *cp, struct sctp_inpcb *inp,
+                 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_no_unlock,
+#if defined(__FreeBSD__)
+                 uint8_t mflowtype, uint32_t mflowid,
+#endif
+                 uint32_t vrf_id, uint16_t port)
+{
+	struct sctp_init *init;
+	struct mbuf *op_err;
+
+	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n",
+		(void *)stcb);
+	if (stcb == NULL) {
+		SCTP_INP_RLOCK(inp);
+	}
+	/* validate length */
+	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) {
+		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+		                       mflowtype, mflowid,
+#endif
+				       vrf_id, port);
+		if (stcb)
+			*abort_no_unlock = 1;
+		goto outnow;
+	}
+	/* validate parameters */
+	init = &cp->init;
+	if (init->initiate_tag == 0) {
+		/* protocol error... send abort */
+		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+		                       mflowtype, mflowid,
+#endif
+				       vrf_id, port);
+		if (stcb)
+			*abort_no_unlock = 1;
+		goto outnow;
+	}
+	if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) {
+		/* invalid parameter... send abort */
+		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+		                       mflowtype, mflowid,
+#endif
+				       vrf_id, port);
+		if (stcb)
+			*abort_no_unlock = 1;
+		goto outnow;
+	}
+	if (init->num_inbound_streams == 0) {
+		/* protocol error... send abort */
+		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+		                       mflowtype, mflowid,
+#endif
+				       vrf_id, port);
+		if (stcb)
+			*abort_no_unlock = 1;
+		goto outnow;
+	}
+	if (init->num_outbound_streams == 0) {
+		/* protocol error... send abort */
+		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+		                       mflowtype, mflowid,
+#endif
+				       vrf_id, port);
+		if (stcb)
+			*abort_no_unlock = 1;
+		goto outnow;
+	}
+	if (sctp_validate_init_auth_params(m, offset + sizeof(*cp),
+					   offset + ntohs(cp->ch.chunk_length))) {
+		/* auth parameter(s) error... send abort */
+		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+		                             "Problem with AUTH parameters");
+		sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+		                       mflowtype, mflowid,
+#endif
+		                       vrf_id, port);
+		if (stcb)
+			*abort_no_unlock = 1;
+		goto outnow;
+	}
+	/* We are only accepting if we have a socket with positive so_qlimit.*/
+	if ((stcb == NULL) &&
+	    ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+	     (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+	     (inp->sctp_socket == NULL) ||
+	     (inp->sctp_socket->so_qlimit == 0))) {
+		/*
+		 * FIX ME ?? What about TCP model and we have a
+		 * match/restart case? Actually no fix is needed.
+		 * the lookup will always find the existing assoc so stcb
+		 * would not be NULL. It may be questionable to do this
+		 * since we COULD just send back the INIT-ACK and hope that
+		 * the app did accept()'s by the time the COOKIE was sent. But
+		 * there is a price to pay for COOKIE generation and I don't
+		 * want to pay it on the chance that the app will actually do
+		 * some accepts(). The App just looses and should NOT be in
+		 * this state :-)
+		 */
+		if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) {
+			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+			                             "No listener");
+			sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err,
+#if defined(__FreeBSD__)
+			                mflowtype, mflowid, inp->fibnum,
+#endif
+			                vrf_id, port);
+		}
+		goto outnow;
+	}
+	if ((stcb != NULL) &&
+	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+		SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n");
+		sctp_send_shutdown_ack(stcb, NULL);
+		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
+	} else {
+		SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n");
+		sctp_send_initiate_ack(inp, stcb, net, m, iphlen, offset,
+		                       src, dst, sh, cp,
+#if defined(__FreeBSD__)
+		                       mflowtype, mflowid,
+#endif
+		                       vrf_id, port,
+		                       ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED));
+	}
+ outnow:
+	if (stcb == NULL) {
+		SCTP_INP_RUNLOCK(inp);
+	}
+}
+
+/*
+ * process peer "INIT/INIT-ACK" chunk returns value < 0 on error
+ */
+
+int
+sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+	SCTP_UNUSED
+#endif
+)
+{
+	int unsent_data;
+	unsigned int i;
+	struct sctp_stream_queue_pending *sp;
+	struct sctp_association *asoc;
+
+	/* This function returns if any stream has true unsent data on it.
+	 * Note that as it looks through it will clean up any places that
+	 * have old data that has been sent but left at top of stream queue.
+	 */
+	asoc = &stcb->asoc;
+	unsent_data = 0;
+	SCTP_TCB_SEND_LOCK(stcb);
+	if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) {
+		/* Check to see if some data queued */
+		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+			/*sa_ignore FREED_MEMORY*/
+			sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue);
+			if (sp == NULL) {
+				continue;
+			}
+			if ((sp->msg_is_complete) &&
+			    (sp->length == 0)  &&
+			    (sp->sender_all_done)) {
+				/* We are doing differed cleanup. Last
+				 * time through when we took all the data
+				 * the sender_all_done was not set.
+				 */
+				if (sp->put_last_out == 0) {
+					SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
+					SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n",
+					            sp->sender_all_done,
+					            sp->length,
+					            sp->msg_is_complete,
+					            sp->put_last_out);
+				}
+				atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
+				TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next);
+				stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, &asoc->strmout[i], sp, 1);
+				if (sp->net) {
+					sctp_free_remote_addr(sp->net);
+					sp->net = NULL;
+				}
+				if (sp->data) {
+					sctp_m_freem(sp->data);
+					sp->data = NULL;
+				}
+				sctp_free_a_strmoq(stcb, sp, so_locked);
+				if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
+					unsent_data++;
+				}
+			} else {
+				unsent_data++;
+			}
+			if (unsent_data > 0) {
+				break;
+			}
+		}
+	}
+	SCTP_TCB_SEND_UNLOCK(stcb);
+	return (unsent_data);
+}
+
+static int
+sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb)
+{
+	struct sctp_init *init;
+	struct sctp_association *asoc;
+	struct sctp_nets *lnet;
+	unsigned int i;
+
+	init = &cp->init;
+	asoc = &stcb->asoc;
+	/* save off parameters */
+	asoc->peer_vtag = ntohl(init->initiate_tag);
+	asoc->peers_rwnd = ntohl(init->a_rwnd);
+	/* init tsn's */
+	asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1;
+
+	if (!TAILQ_EMPTY(&asoc->nets)) {
+		/* update any ssthresh's that may have a default */
+		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
+			lnet->ssthresh = asoc->peers_rwnd;
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE|SCTP_CWND_LOGGING_ENABLE)) {
+				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION);
+			}
+
+		}
+	}
+	SCTP_TCB_SEND_LOCK(stcb);
+	if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) {
+		unsigned int newcnt;
+		struct sctp_stream_out *outs;
+		struct sctp_stream_queue_pending *sp, *nsp;
+		struct sctp_tmit_chunk *chk, *nchk;
+
+		/* abandon the upper streams */
+		newcnt = ntohs(init->num_inbound_streams);
+		TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
+			if (chk->rec.data.sid >= newcnt) {
+				TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
+				asoc->send_queue_cnt--;
+				if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+					asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
+#ifdef INVARIANTS
+				} else {
+					panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
+#endif
+				}
+				if (chk->data != NULL) {
+					sctp_free_bufspace(stcb, asoc, chk, 1);
+					sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
+					                0, chk, SCTP_SO_NOT_LOCKED);
+					if (chk->data) {
+						sctp_m_freem(chk->data);
+						chk->data = NULL;
+					}
+				}
+				sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+				/*sa_ignore FREED_MEMORY*/
+			}
+		}
+		if (asoc->strmout) {
+			for (i = newcnt; i < asoc->pre_open_streams; i++) {
+				outs = &asoc->strmout[i];
+				TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
+					atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1);
+					TAILQ_REMOVE(&outs->outqueue, sp, next);
+					stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 1);
+					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL,
+					    stcb, 0, sp, SCTP_SO_NOT_LOCKED);
+					if (sp->data) {
+						sctp_m_freem(sp->data);
+						sp->data = NULL;
+					}
+					if (sp->net) {
+						sctp_free_remote_addr(sp->net);
+						sp->net = NULL;
+					}
+					/* Free the chunk */
+					sctp_free_a_strmoq(stcb, sp, SCTP_SO_NOT_LOCKED);
+					/*sa_ignore FREED_MEMORY*/
+				}
+				outs->state = SCTP_STREAM_CLOSED;
+			}
+		}
+		/* cut back the count */
+		asoc->pre_open_streams = newcnt;
+	}
+	SCTP_TCB_SEND_UNLOCK(stcb);
+	asoc->streamoutcnt = asoc->pre_open_streams;
+	if (asoc->strmout) {
+		for (i = 0; i < asoc->streamoutcnt; i++) {
+			asoc->strmout[i].state = SCTP_STREAM_OPEN;
+		}
+	}
+	/* EY - nr_sack: initialize highest tsn in nr_mapping_array */
+	asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+		sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+	}
+	/* This is the next one we expect */
+	asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
+
+	asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
+	asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
+
+	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
+	/* open the requested streams */
+
+	if (asoc->strmin != NULL) {
+		/* Free the old ones */
+		for (i = 0; i < asoc->streamincnt; i++) {
+			sctp_clean_up_stream(stcb, &asoc->strmin[i].inqueue);
+			sctp_clean_up_stream(stcb, &asoc->strmin[i].uno_inqueue);
+		}
+		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
+	}
+	if (asoc->max_inbound_streams > ntohs(init->num_outbound_streams)) {
+		asoc->streamincnt = ntohs(init->num_outbound_streams);
+	} else {
+		asoc->streamincnt = asoc->max_inbound_streams;
+	}
+	SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt *
+		    sizeof(struct sctp_stream_in), SCTP_M_STRMI);
+	if (asoc->strmin == NULL) {
+		/* we didn't get memory for the streams! */
+		SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n");
+		return (-1);
+	}
+	for (i = 0; i < asoc->streamincnt; i++) {
+		asoc->strmin[i].sid = i;
+		asoc->strmin[i].last_mid_delivered = 0xffffffff;
+		TAILQ_INIT(&asoc->strmin[i].inqueue);
+		TAILQ_INIT(&asoc->strmin[i].uno_inqueue);
+		asoc->strmin[i].pd_api_started = 0;
+		asoc->strmin[i].delivery_started = 0;
+	}
+	/*
+	 * load_address_from_init will put the addresses into the
+	 * association when the COOKIE is processed or the INIT-ACK is
+	 * processed. Both types of COOKIE's existing and new call this
+	 * routine. It will remove addresses that are no longer in the
+	 * association (for the restarting case where addresses are
+	 * removed). Up front when the INIT arrives we will discard it if it
+	 * is a restart and new addresses have been added.
+	 */
+	/* sa_ignore MEMLEAK */
+	return (0);
+}
+
+/*
+ * INIT-ACK message processing/consumption returns value < 0 on error
+ */
+static int
+sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
+                      struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
+                      struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
+                      struct sctp_nets *net, int *abort_no_unlock,
+#if defined(__FreeBSD__)
+		      uint8_t mflowtype, uint32_t mflowid,
+#endif
+                      uint32_t vrf_id)
+{
+	struct sctp_association *asoc;
+	struct mbuf *op_err;
+	int retval, abort_flag;
+	uint32_t initack_limit;
+	int nat_friendly = 0;
+
+	/* First verify that we have no illegal param's */
+	abort_flag = 0;
+
+	op_err = sctp_arethere_unrecognized_parameters(m,
+						       (offset + sizeof(struct sctp_init_chunk)),
+						       &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly);
+	if (abort_flag) {
+		/* Send an abort and notify peer */
+		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
+		*abort_no_unlock = 1;
+		return (-1);
+	}
+	asoc = &stcb->asoc;
+	asoc->peer_supports_nat = (uint8_t)nat_friendly;
+	/* process the peer's parameters in the INIT-ACK */
+	retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb);
+	if (retval < 0) {
+		return (retval);
+	}
+	initack_limit = offset + ntohs(cp->ch.chunk_length);
+	/* load all addresses */
+	if ((retval = sctp_load_addresses_from_init(stcb, m,
+	    (offset + sizeof(struct sctp_init_chunk)), initack_limit,
+	    src, dst, NULL, stcb->asoc.port))) {
+		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+		                             "Problem with address parameters");
+		SCTPDBG(SCTP_DEBUG_INPUT1,
+			"Load addresses from INIT causes an abort %d\n",
+			retval);
+		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
+		                       src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+		                       mflowtype, mflowid,
+#endif
+		                       vrf_id, net->port);
+		*abort_no_unlock = 1;
+		return (-1);
+	}
+	/* if the peer doesn't support asconf, flush the asconf queue */
+	if (asoc->asconf_supported == 0) {
+		struct sctp_asconf_addr *param, *nparam;
+
+		TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) {
+			TAILQ_REMOVE(&asoc->asconf_queue, param, next);
+			SCTP_FREE(param, SCTP_M_ASC_ADDR);
+		}
+	}
+
+	stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
+	    stcb->asoc.local_hmacs);
+	if (op_err) {
+		sctp_queue_op_err(stcb, op_err);
+		/* queuing will steal away the mbuf chain to the out queue */
+		op_err = NULL;
+	}
+	/* extract the cookie and queue it to "echo" it back... */
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+			       stcb->asoc.overall_error_count,
+			       0,
+			       SCTP_FROM_SCTP_INPUT,
+			       __LINE__);
+	}
+	stcb->asoc.overall_error_count = 0;
+	net->error_count = 0;
+
+	/*
+	 * Cancel the INIT timer, We do this first before queueing the
+	 * cookie. We always cancel at the primary to assue that we are
+	 * canceling the timer started by the INIT which always goes to the
+	 * primary.
+	 */
+	sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb,
+	    asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
+
+	/* calculate the RTO */
+	net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy,
+				      SCTP_RTT_FROM_NON_DATA);
+#if defined(__Userspace__)
+	if (stcb->sctp_ep->recv_callback) {
+		if (stcb->sctp_socket) {
+			uint32_t inqueue_bytes, sb_free_now;
+			struct sctp_inpcb *inp;
+
+			inp = stcb->sctp_ep;
+			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
+			sb_free_now = SCTP_SB_LIMIT_SND(stcb->sctp_socket) - (inqueue_bytes + stcb->asoc.sb_send_resv);
+
+			/* check if the amount free in the send socket buffer crossed the threshold */
+			if (inp->send_callback &&
+			    (((inp->send_sb_threshold > 0) &&
+			      (sb_free_now >= inp->send_sb_threshold) &&
+			      (stcb->asoc.chunks_on_out_queue <= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) ||
+			     (inp->send_sb_threshold == 0))) {
+				atomic_add_int(&stcb->asoc.refcnt, 1);
+				SCTP_TCB_UNLOCK(stcb);
+				inp->send_callback(stcb->sctp_socket, sb_free_now);
+				SCTP_TCB_LOCK(stcb);
+				atomic_subtract_int(&stcb->asoc.refcnt, 1);
+			}
+		}
+	}
+#endif
+	retval = sctp_send_cookie_echo(m, offset, stcb, net);
+	if (retval < 0) {
+		/*
+		 * No cookie, we probably should send a op error. But in any
+		 * case if there is no cookie in the INIT-ACK, we can
+		 * abandon the peer, its broke.
+		 */
+		if (retval == -3) {
+			uint16_t len;
+
+			len = (uint16_t)(sizeof(struct sctp_error_missing_param) + sizeof(uint16_t));
+			/* We abort with an error of missing mandatory param */
+			op_err = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
+			if (op_err != NULL) {
+				struct sctp_error_missing_param *cause;
+
+				SCTP_BUF_LEN(op_err) = len;
+				cause = mtod(op_err, struct sctp_error_missing_param *);
+				/* Subtract the reserved param */
+				cause->cause.code = htons(SCTP_CAUSE_MISSING_PARAM);
+				cause->cause.length = htons(len);
+				cause->num_missing_params = htonl(1);
+				cause->type[0] = htons(SCTP_STATE_COOKIE);
+			}
+			sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
+			                       src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+			                       mflowtype, mflowid,
+#endif
+			                       vrf_id, net->port);
+			*abort_no_unlock = 1;
+		}
+		return (retval);
+	}
+
+	return (0);
+}
+
+static void
+sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
+    struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	union sctp_sockstore store;
+	struct sctp_nets *r_net, *f_net;
+	struct timeval tv;
+	int req_prim = 0;
+	uint16_t old_error_counter;
+
+	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) {
+		/* Invalid length */
+		return;
+	}
+
+	memset(&store, 0, sizeof(store));
+	switch (cp->heartbeat.hb_info.addr_family) {
+#ifdef INET
+	case AF_INET:
+		if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) {
+			store.sin.sin_family = cp->heartbeat.hb_info.addr_family;
+#ifdef HAVE_SIN_LEN
+			store.sin.sin_len = cp->heartbeat.hb_info.addr_len;
+#endif
+			store.sin.sin_port = stcb->rport;
+			memcpy(&store.sin.sin_addr, cp->heartbeat.hb_info.address,
+			       sizeof(store.sin.sin_addr));
+		} else {
+			return;
+		}
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) {
+			store.sin6.sin6_family = cp->heartbeat.hb_info.addr_family;
+#ifdef HAVE_SIN6_LEN
+			store.sin6.sin6_len = cp->heartbeat.hb_info.addr_len;
+#endif
+			store.sin6.sin6_port = stcb->rport;
+			memcpy(&store.sin6.sin6_addr, cp->heartbeat.hb_info.address, sizeof(struct in6_addr));
+		} else {
+			return;
+		}
+		break;
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+		if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_conn)) {
+			store.sconn.sconn_family = cp->heartbeat.hb_info.addr_family;
+#ifdef HAVE_SCONN_LEN
+			store.sconn.sconn_len = cp->heartbeat.hb_info.addr_len;
+#endif
+			store.sconn.sconn_port = stcb->rport;
+			memcpy(&store.sconn.sconn_addr, cp->heartbeat.hb_info.address, sizeof(void *));
+		} else {
+			return;
+		}
+		break;
+#endif
+	default:
+		return;
+	}
+	r_net = sctp_findnet(stcb, &store.sa);
+	if (r_net == NULL) {
+		SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n");
+		return;
+	}
+	if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
+	    (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) &&
+	    (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) {
+		/*
+		 * If the its a HB and it's random value is correct when can
+		 * confirm the destination.
+		 */
+		r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
+		if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) {
+			stcb->asoc.primary_destination = r_net;
+			r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
+			f_net = TAILQ_FIRST(&stcb->asoc.nets);
+			if (f_net != r_net) {
+				/* first one on the list is NOT the primary
+				 * sctp_cmpaddr() is much more efficient if
+				 * the primary is the first on the list, make it
+				 * so.
+				 */
+				TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next);
+				TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next);
+			}
+			req_prim = 1;
+		}
+		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
+		    stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED);
+		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb,
+		                r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
+		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
+	}
+	old_error_counter = r_net->error_count;
+	r_net->error_count = 0;
+	r_net->hb_responded = 1;
+	tv.tv_sec = cp->heartbeat.hb_info.time_value_1;
+	tv.tv_usec = cp->heartbeat.hb_info.time_value_2;
+	/* Now lets do a RTO with this */
+	r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy,
+					SCTP_RTT_FROM_NON_DATA);
+	if (!(r_net->dest_state & SCTP_ADDR_REACHABLE)) {
+		r_net->dest_state |= SCTP_ADDR_REACHABLE;
+		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
+				0, (void *)r_net, SCTP_SO_NOT_LOCKED);
+	}
+	if (r_net->dest_state & SCTP_ADDR_PF) {
+		r_net->dest_state &= ~SCTP_ADDR_PF;
+		stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
+	}
+	if (old_error_counter > 0) {
+		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
+		                stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5);
+		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net);
+	}
+	if (r_net == stcb->asoc.primary_destination) {
+		if (stcb->asoc.alternate) {
+			/* release the alternate, primary is good */
+			sctp_free_remote_addr(stcb->asoc.alternate);
+			stcb->asoc.alternate = NULL;
+		}
+	}
+	/* Mobility adaptation */
+	if (req_prim) {
+		if ((sctp_is_mobility_feature_on(stcb->sctp_ep,
+		                                 SCTP_MOBILITY_BASE) ||
+		    sctp_is_mobility_feature_on(stcb->sctp_ep,
+		                                SCTP_MOBILITY_FASTHANDOFF)) &&
+		    sctp_is_mobility_feature_on(stcb->sctp_ep,
+		                                SCTP_MOBILITY_PRIM_DELETED)) {
+
+			sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED,
+			                stcb->sctp_ep, stcb, NULL,
+			                SCTP_FROM_SCTP_INPUT + SCTP_LOC_6);
+			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+					SCTP_MOBILITY_FASTHANDOFF)) {
+				sctp_assoc_immediate_retrans(stcb,
+					stcb->asoc.primary_destination);
+			}
+			if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+					SCTP_MOBILITY_BASE)) {
+				sctp_move_chunks_from_net(stcb,
+					stcb->asoc.deleted_primary);
+			}
+			sctp_delete_prim_timer(stcb->sctp_ep, stcb,
+					stcb->asoc.deleted_primary);
+		}
+	}
+}
+
+static int
+sctp_handle_nat_colliding_state(struct sctp_tcb *stcb)
+{
+	/* return 0 means we want you to proceed with the abort
+	 * non-zero means no abort processing
+	*/
+	struct sctpasochead *head;
+
+	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
+		/* generate a new vtag and send init */
+		LIST_REMOVE(stcb, sctp_asocs);
+		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
+		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
+		/* put it in the bucket in the vtag hash of assoc's for the system */
+		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
+		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+		return (1);
+	}
+	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
+		/* treat like a case where the cookie expired i.e.:
+		* - dump current cookie.
+		* - generate a new vtag.
+		* - resend init.
+		*/
+		/* generate a new vtag and send init */
+		LIST_REMOVE(stcb, sctp_asocs);
+		stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED;
+		stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT;
+		sctp_stop_all_cookie_timers(stcb);
+		sctp_toss_old_cookies(stcb, &stcb->asoc);
+		stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport,  1);
+		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
+		/* put it in the bucket in the vtag hash of assoc's for the system */
+		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
+		sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+		return (1);
+	}
+	return (0);
+}
+
+static int
+sctp_handle_nat_missing_state(struct sctp_tcb *stcb,
+			      struct sctp_nets *net)
+{
+	/* return 0 means we want you to proceed with the abort
+	 * non-zero means no abort processing
+	 */
+	if (stcb->asoc.auth_supported == 0) {
+		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n");
+		return (0);
+	}
+	sctp_asconf_send_nat_state_update(stcb, net);
+	return (1);
+}
+
+
+static void
+sctp_handle_abort(struct sctp_abort_chunk *abort,
+    struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	struct socket *so;
+#endif
+	uint16_t len;
+	uint16_t error;
+
+	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n");
+	if (stcb == NULL)
+		return;
+
+	len = ntohs(abort->ch.chunk_length);
+	if (len > sizeof (struct sctp_chunkhdr)) {
+		/* Need to check the cause codes for our
+		 * two magic nat aborts which don't kill the assoc
+		 * necessarily.
+		 */
+		struct sctp_gen_error_cause *cause;
+
+		cause = (struct sctp_gen_error_cause *)(abort + 1);
+		error = ntohs(cause->code);
+		if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) {
+			SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
+			                           abort->ch.chunk_flags);
+			if (sctp_handle_nat_colliding_state(stcb)) {
+				return;
+			}
+		} else if (error == SCTP_CAUSE_NAT_MISSING_STATE) {
+			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
+			                           abort->ch.chunk_flags);
+			if (sctp_handle_nat_missing_state(stcb, net)) {
+				return;
+			}
+		}
+	} else {
+		error = 0;
+	}
+	/* stop any receive timers */
+	sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net,
+	                SCTP_FROM_SCTP_INPUT + SCTP_LOC_7);
+	/* notify user of the abort and clean up... */
+	sctp_abort_notification(stcb, 1, error, abort, SCTP_SO_NOT_LOCKED);
+	/* free the tcb */
+	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+	}
+#ifdef SCTP_ASOCLOG_OF_TSNS
+	sctp_print_out_track_log(stcb);
+#endif
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	so = SCTP_INP_SO(stcb->sctp_ep);
+	atomic_add_int(&stcb->asoc.refcnt, 1);
+	SCTP_TCB_UNLOCK(stcb);
+	SCTP_SOCKET_LOCK(so, 1);
+	SCTP_TCB_LOCK(stcb);
+	atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+	stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
+	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
+			      SCTP_FROM_SCTP_INPUT + SCTP_LOC_8);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+	SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n");
+}
+
+static void
+sctp_start_net_timers(struct sctp_tcb *stcb)
+{
+	uint32_t cnt_hb_sent;
+	struct sctp_nets *net;
+
+	cnt_hb_sent = 0;
+	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+		/* For each network start:
+		 * 1) A pmtu timer.
+		 * 2) A HB timer
+		 * 3) If the dest in unconfirmed send
+		 *    a hb as well if under max_hb_burst have
+		 *    been sent.
+		 */
+		sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net);
+		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
+		if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
+		    (cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) {
+			sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
+			cnt_hb_sent++;
+		}
+	}
+	if (cnt_hb_sent) {
+		sctp_chunk_output(stcb->sctp_ep, stcb,
+				  SCTP_OUTPUT_FROM_COOKIE_ACK,
+				  SCTP_SO_NOT_LOCKED);
+	}
+}
+
+
+static void
+sctp_handle_shutdown(struct sctp_shutdown_chunk *cp,
+    struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag)
+{
+	struct sctp_association *asoc;
+	int some_on_streamwheel;
+	int old_state;
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	struct socket *so;
+#endif
+
+	SCTPDBG(SCTP_DEBUG_INPUT2,
+		"sctp_handle_shutdown: handling SHUTDOWN\n");
+	if (stcb == NULL)
+		return;
+	asoc = &stcb->asoc;
+	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
+	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
+		return;
+	}
+	if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) {
+		/* Shutdown NOT the expected size */
+		return;
+	}
+	old_state = SCTP_GET_STATE(asoc);
+	sctp_update_acked(stcb, cp, abort_flag);
+	if (*abort_flag) {
+		return;
+	}
+	if (asoc->control_pdapi) {
+		/* With a normal shutdown
+		 * we assume the end of last record.
+		 */
+		SCTP_INP_READ_LOCK(stcb->sctp_ep);
+		if (asoc->control_pdapi->on_strm_q) {
+			struct sctp_stream_in *strm;
+
+			strm = &asoc->strmin[asoc->control_pdapi->sinfo_stream];
+			if (asoc->control_pdapi->on_strm_q == SCTP_ON_UNORDERED) {
+				/* Unordered */
+				TAILQ_REMOVE(&strm->uno_inqueue, asoc->control_pdapi, next_instrm);
+				asoc->control_pdapi->on_strm_q = 0;
+			} else if (asoc->control_pdapi->on_strm_q == SCTP_ON_ORDERED) {
+				/* Ordered */
+				TAILQ_REMOVE(&strm->inqueue, asoc->control_pdapi, next_instrm);
+				asoc->control_pdapi->on_strm_q = 0;
+#ifdef INVARIANTS
+			} else {
+				panic("Unknown state on ctrl:%p on_strm_q:%d",
+				      asoc->control_pdapi,
+				      asoc->control_pdapi->on_strm_q);
+#endif
+			}
+		}
+		asoc->control_pdapi->end_added = 1;
+		asoc->control_pdapi->pdapi_aborted = 1;
+		asoc->control_pdapi = NULL;
+		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		so = SCTP_INP_SO(stcb->sctp_ep);
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_SOCKET_LOCK(so, 1);
+		SCTP_TCB_LOCK(stcb);
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+			/* assoc was freed while we were unlocked */
+			SCTP_SOCKET_UNLOCK(so, 1);
+			return;
+		}
+#endif
+		if (stcb->sctp_socket) {
+			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
+		}
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+	}
+	/* goto SHUTDOWN_RECEIVED state to block new requests */
+	if (stcb->sctp_socket) {
+		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) &&
+		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
+			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED);
+			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+			/* notify upper layer that peer has initiated a shutdown */
+			sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+
+			/* reset time */
+			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
+		}
+	}
+	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
+		/*
+		 * stop the shutdown timer, since we WILL move to
+		 * SHUTDOWN-ACK-SENT.
+		 */
+		sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
+		                net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9);
+	}
+	/* Now is there unsent data on a stream somewhere? */
+	some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED);
+
+	if (!TAILQ_EMPTY(&asoc->send_queue) ||
+	    !TAILQ_EMPTY(&asoc->sent_queue) ||
+	    some_on_streamwheel) {
+		/* By returning we will push more data out */
+		return;
+	} else {
+		/* no outstanding data to send, so move on... */
+		/* send SHUTDOWN-ACK */
+		/* move to SHUTDOWN-ACK-SENT state */
+		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+		    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+		}
+		SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+		if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
+			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
+			sctp_stop_timers_for_shutdown(stcb);
+			sctp_send_shutdown_ack(stcb, net);
+			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
+			                 stcb->sctp_ep, stcb, net);
+		} else if (old_state == SCTP_STATE_SHUTDOWN_ACK_SENT) {
+			sctp_send_shutdown_ack(stcb, net);
+		}
+	}
+}
+
+static void
+sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED,
+                         struct sctp_tcb *stcb,
+                         struct sctp_nets *net)
+{
+	struct sctp_association *asoc;
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	struct socket *so;
+
+	so = SCTP_INP_SO(stcb->sctp_ep);
+#endif
+	SCTPDBG(SCTP_DEBUG_INPUT2,
+		"sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n");
+	if (stcb == NULL)
+		return;
+
+	asoc = &stcb->asoc;
+	/* process according to association state */
+	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
+	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
+		/* unexpected SHUTDOWN-ACK... do OOTB handling... */
+		sctp_send_shutdown_complete(stcb, net, 1);
+		SCTP_TCB_UNLOCK(stcb);
+		return;
+	}
+	if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+	    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+		/* unexpected SHUTDOWN-ACK... so ignore... */
+		SCTP_TCB_UNLOCK(stcb);
+		return;
+	}
+	if (asoc->control_pdapi) {
+		/* With a normal shutdown
+		 * we assume the end of last record.
+		 */
+		SCTP_INP_READ_LOCK(stcb->sctp_ep);
+		asoc->control_pdapi->end_added = 1;
+		asoc->control_pdapi->pdapi_aborted = 1;
+		asoc->control_pdapi = NULL;
+		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_SOCKET_LOCK(so, 1);
+		SCTP_TCB_LOCK(stcb);
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+			/* assoc was freed while we were unlocked */
+			SCTP_SOCKET_UNLOCK(so, 1);
+			return;
+		}
+#endif
+		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+	}
+#ifdef INVARIANTS
+	if (!TAILQ_EMPTY(&asoc->send_queue) ||
+	    !TAILQ_EMPTY(&asoc->sent_queue) ||
+	    sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
+		panic("Queues are not empty when handling SHUTDOWN-ACK");
+	}
+#endif
+	/* stop the timer */
+	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net,
+	                SCTP_FROM_SCTP_INPUT + SCTP_LOC_10);
+	/* send SHUTDOWN-COMPLETE */
+	sctp_send_shutdown_complete(stcb, net, 0);
+	/* notify upper layer protocol */
+	if (stcb->sctp_socket) {
+		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+			stcb->sctp_socket->so_snd.sb_cc = 0;
+		}
+		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+	}
+	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
+	/* free the TCB but first save off the ep */
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	atomic_add_int(&stcb->asoc.refcnt, 1);
+	SCTP_TCB_UNLOCK(stcb);
+	SCTP_SOCKET_LOCK(so, 1);
+	SCTP_TCB_LOCK(stcb);
+	atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
+			      SCTP_FROM_SCTP_INPUT + SCTP_LOC_11);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+}
+
+/*
+ * Skip past the param header and then we will find the chunk that caused the
+ * problem. There are two possibilities ASCONF or FWD-TSN other than that and
+ * our peer must be broken.
+ */
+static void
+sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr,
+    struct sctp_nets *net)
+{
+	struct sctp_chunkhdr *chk;
+
+	chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr));
+	switch (chk->chunk_type) {
+	case SCTP_ASCONF_ACK:
+	case SCTP_ASCONF:
+		sctp_asconf_cleanup(stcb, net);
+		break;
+	case SCTP_IFORWARD_CUM_TSN:
+	case SCTP_FORWARD_CUM_TSN:
+		stcb->asoc.prsctp_supported = 0;
+		break;
+	default:
+		SCTPDBG(SCTP_DEBUG_INPUT2,
+			"Peer does not support chunk type %d(%x)??\n",
+			chk->chunk_type, (uint32_t) chk->chunk_type);
+		break;
+	}
+}
+
+/*
+ * Skip past the param header and then we will find the param that caused the
+ * problem.  There are a number of param's in a ASCONF OR the prsctp param
+ * these will turn of specific features.
+ * XXX: Is this the right thing to do?
+ */
+static void
+sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr)
+{
+	struct sctp_paramhdr *pbad;
+
+	pbad = phdr + 1;
+	switch (ntohs(pbad->param_type)) {
+		/* pr-sctp draft */
+	case SCTP_PRSCTP_SUPPORTED:
+		stcb->asoc.prsctp_supported = 0;
+		break;
+	case SCTP_SUPPORTED_CHUNK_EXT:
+		break;
+		/* draft-ietf-tsvwg-addip-sctp */
+	case SCTP_HAS_NAT_SUPPORT:
+	        stcb->asoc.peer_supports_nat = 0;
+	        break;
+	case SCTP_ADD_IP_ADDRESS:
+	case SCTP_DEL_IP_ADDRESS:
+	case SCTP_SET_PRIM_ADDR:
+		stcb->asoc.asconf_supported = 0;
+		break;
+	case SCTP_SUCCESS_REPORT:
+	case SCTP_ERROR_CAUSE_IND:
+		SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n");
+		SCTPDBG(SCTP_DEBUG_INPUT2,
+			"Turning off ASCONF to this strange peer\n");
+		stcb->asoc.asconf_supported = 0;
+		break;
+	default:
+		SCTPDBG(SCTP_DEBUG_INPUT2,
+			"Peer does not support param type %d(%x)??\n",
+			pbad->param_type, (uint32_t) pbad->param_type);
+		break;
+	}
+}
+
+static int
+sctp_handle_error(struct sctp_chunkhdr *ch,
+    struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	int chklen;
+	struct sctp_paramhdr *phdr;
+	uint16_t error, error_type;
+	uint16_t error_len;
+	struct sctp_association *asoc;
+	int adjust;
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	struct socket *so;
+#endif
+
+	/* parse through all of the errors and process */
+	asoc = &stcb->asoc;
+	phdr = (struct sctp_paramhdr *)((caddr_t)ch +
+	    sizeof(struct sctp_chunkhdr));
+	chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr);
+	error = 0;
+	while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) {
+		/* Process an Error Cause */
+		error_type = ntohs(phdr->param_type);
+		error_len = ntohs(phdr->param_length);
+		if ((error_len > chklen) || (error_len == 0)) {
+			/* invalid param length for this param */
+			SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n",
+				chklen, error_len);
+			return (0);
+		}
+		if (error == 0) {
+			/* report the first error cause */
+			error = error_type;
+		}
+		switch (error_type) {
+		case SCTP_CAUSE_INVALID_STREAM:
+		case SCTP_CAUSE_MISSING_PARAM:
+		case SCTP_CAUSE_INVALID_PARAM:
+		case SCTP_CAUSE_NO_USER_DATA:
+			SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n",
+				error_type);
+			break;
+		case SCTP_CAUSE_NAT_COLLIDING_STATE:
+		        SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n",
+				ch->chunk_flags);
+			if (sctp_handle_nat_colliding_state(stcb)) {
+			  return (0);
+			}
+			break;
+		case SCTP_CAUSE_NAT_MISSING_STATE:
+			SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n",
+			                           ch->chunk_flags);
+			if (sctp_handle_nat_missing_state(stcb, net)) {
+			  return (0);
+			}
+			break;
+		case SCTP_CAUSE_STALE_COOKIE:
+			/*
+			 * We only act if we have echoed a cookie and are
+			 * waiting.
+			 */
+			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
+				int *p;
+
+				p = (int *)((caddr_t)phdr + sizeof(*phdr));
+				/* Save the time doubled */
+				asoc->cookie_preserve_req = ntohl(*p) << 1;
+				asoc->stale_cookie_count++;
+				if (asoc->stale_cookie_count >
+				    asoc->max_init_times) {
+					sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
+					/* now free the asoc */
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+					so = SCTP_INP_SO(stcb->sctp_ep);
+					atomic_add_int(&stcb->asoc.refcnt, 1);
+					SCTP_TCB_UNLOCK(stcb);
+					SCTP_SOCKET_LOCK(so, 1);
+					SCTP_TCB_LOCK(stcb);
+					atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+					(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
+							      SCTP_FROM_SCTP_INPUT + SCTP_LOC_12);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+					SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+					return (-1);
+				}
+				/* blast back to INIT state */
+				sctp_toss_old_cookies(stcb, &stcb->asoc);
+				asoc->state &= ~SCTP_STATE_COOKIE_ECHOED;
+				asoc->state |= SCTP_STATE_COOKIE_WAIT;
+				sctp_stop_all_cookie_timers(stcb);
+				sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+			}
+			break;
+		case SCTP_CAUSE_UNRESOLVABLE_ADDR:
+			/*
+			 * Nothing we can do here, we don't do hostname
+			 * addresses so if the peer does not like my IPv6
+			 * (or IPv4 for that matter) it does not matter. If
+			 * they don't support that type of address, they can
+			 * NOT possibly get that packet type... i.e. with no
+			 * IPv6 you can't receive a IPv6 packet. so we can
+			 * safely ignore this one. If we ever added support
+			 * for HOSTNAME Addresses, then we would need to do
+			 * something here.
+			 */
+			break;
+		case SCTP_CAUSE_UNRECOG_CHUNK:
+			sctp_process_unrecog_chunk(stcb, phdr, net);
+			break;
+		case SCTP_CAUSE_UNRECOG_PARAM:
+			sctp_process_unrecog_param(stcb, phdr);
+			break;
+		case SCTP_CAUSE_COOKIE_IN_SHUTDOWN:
+			/*
+			 * We ignore this since the timer will drive out a
+			 * new cookie anyway and there timer will drive us
+			 * to send a SHUTDOWN_COMPLETE. We can't send one
+			 * here since we don't have their tag.
+			 */
+			break;
+		case SCTP_CAUSE_DELETING_LAST_ADDR:
+		case SCTP_CAUSE_RESOURCE_SHORTAGE:
+		case SCTP_CAUSE_DELETING_SRC_ADDR:
+			/*
+			 * We should NOT get these here, but in a
+			 * ASCONF-ACK.
+			 */
+			SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n",
+				error_type);
+			break;
+		case SCTP_CAUSE_OUT_OF_RESC:
+			/*
+			 * And what, pray tell do we do with the fact that
+			 * the peer is out of resources? Not really sure we
+			 * could do anything but abort. I suspect this
+			 * should have came WITH an abort instead of in a
+			 * OP-ERROR.
+			 */
+			break;
+		default:
+			SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n",
+				error_type);
+			break;
+		}
+		adjust = SCTP_SIZE32(error_len);
+		chklen -= adjust;
+		phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust);
+	}
+	sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, error, ch, SCTP_SO_NOT_LOCKED);
+	return (0);
+}
+
+static int
+sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset,
+                     struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh,
+                     struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb,
+                     struct sctp_nets *net, int *abort_no_unlock,
+#if defined(__FreeBSD__)
+                     uint8_t mflowtype, uint32_t mflowid,
+#endif
+                     uint32_t vrf_id)
+{
+	struct sctp_init_ack *init_ack;
+	struct mbuf *op_err;
+
+	SCTPDBG(SCTP_DEBUG_INPUT2,
+		"sctp_handle_init_ack: handling INIT-ACK\n");
+
+	if (stcb == NULL) {
+		SCTPDBG(SCTP_DEBUG_INPUT2,
+			"sctp_handle_init_ack: TCB is null\n");
+		return (-1);
+	}
+	if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) {
+		/* Invalid length */
+		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
+		                       src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+		                       mflowtype, mflowid,
+#endif
+		                       vrf_id, net->port);
+		*abort_no_unlock = 1;
+		return (-1);
+	}
+	init_ack = &cp->init;
+	/* validate parameters */
+	if (init_ack->initiate_tag == 0) {
+		/* protocol error... send an abort */
+		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
+		                       src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+		                       mflowtype, mflowid,
+#endif
+		                       vrf_id, net->port);
+		*abort_no_unlock = 1;
+		return (-1);
+	}
+	if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) {
+		/* protocol error... send an abort */
+		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
+		                       src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+		                       mflowtype, mflowid,
+#endif
+		                       vrf_id, net->port);
+		*abort_no_unlock = 1;
+		return (-1);
+	}
+	if (init_ack->num_inbound_streams == 0) {
+		/* protocol error... send an abort */
+		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
+		                       src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+		                       mflowtype, mflowid,
+#endif
+		                       vrf_id, net->port);
+		*abort_no_unlock = 1;
+		return (-1);
+	}
+	if (init_ack->num_outbound_streams == 0) {
+		/* protocol error... send an abort */
+		op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, "");
+		sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen,
+		                       src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+		                       mflowtype, mflowid,
+#endif
+		                       vrf_id, net->port);
+		*abort_no_unlock = 1;
+		return (-1);
+	}
+	/* process according to association state... */
+	switch (stcb->asoc.state & SCTP_STATE_MASK) {
+	case SCTP_STATE_COOKIE_WAIT:
+		/* this is the expected state for this chunk */
+		/* process the INIT-ACK parameters */
+		if (stcb->asoc.primary_destination->dest_state &
+		    SCTP_ADDR_UNCONFIRMED) {
+			/*
+			 * The primary is where we sent the INIT, we can
+			 * always consider it confirmed when the INIT-ACK is
+			 * returned. Do this before we load addresses
+			 * though.
+			 */
+			stcb->asoc.primary_destination->dest_state &=
+			    ~SCTP_ADDR_UNCONFIRMED;
+			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
+			    stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED);
+		}
+		if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb,
+		                          net, abort_no_unlock,
+#if defined(__FreeBSD__)
+		                          mflowtype, mflowid,
+#endif
+		                          vrf_id) < 0) {
+			/* error in parsing parameters */
+			return (-1);
+		}
+		/* update our state */
+		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n");
+		SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED);
+
+		/* reset the RTO calc */
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+				       stcb->asoc.overall_error_count,
+				       0,
+				       SCTP_FROM_SCTP_INPUT,
+				       __LINE__);
+		}
+		stcb->asoc.overall_error_count = 0;
+		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+		/*
+		 * collapse the init timer back in case of a exponential
+		 * backoff
+		 */
+		sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep,
+		    stcb, net);
+		/*
+		 * the send at the end of the inbound data processing will
+		 * cause the cookie to be sent
+		 */
+		break;
+	case SCTP_STATE_SHUTDOWN_SENT:
+		/* incorrect state... discard */
+		break;
+	case SCTP_STATE_COOKIE_ECHOED:
+		/* incorrect state... discard */
+		break;
+	case SCTP_STATE_OPEN:
+		/* incorrect state... discard */
+		break;
+	case SCTP_STATE_EMPTY:
+	case SCTP_STATE_INUSE:
+	default:
+		/* incorrect state... discard */
+		return (-1);
+		break;
+	}
+	SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n");
+	return (0);
+}
+
+static struct sctp_tcb *
+sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
+    struct sockaddr *src, struct sockaddr *dst,
+    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
+    struct sctp_inpcb *inp, struct sctp_nets **netp,
+    struct sockaddr *init_src, int *notification,
+    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
+#if defined(__FreeBSD__)
+    uint8_t mflowtype, uint32_t mflowid,
+#endif
+    uint32_t vrf_id, uint16_t port);
+
+
+/*
+ * handle a state cookie for an existing association m: input packet mbuf
+ * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a
+ * "split" mbuf and the cookie signature does not exist offset: offset into
+ * mbuf to the cookie-echo chunk
+ */
+static struct sctp_tcb *
+sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
+    struct sockaddr *src, struct sockaddr *dst,
+    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
+    struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp,
+    struct sockaddr *init_src, int *notification,
+    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
+#if defined(__FreeBSD__)
+    uint8_t mflowtype, uint32_t mflowid,
+#endif
+    uint32_t vrf_id, uint16_t port)
+{
+	struct sctp_association *asoc;
+	struct sctp_init_chunk *init_cp, init_buf;
+	struct sctp_init_ack_chunk *initack_cp, initack_buf;
+	struct sctp_nets *net;
+	struct mbuf *op_err;
+	int init_offset, initack_offset, i;
+	int retval;
+	int spec_flag = 0;
+	uint32_t how_indx;
+#if defined(SCTP_DETAILED_STR_STATS)
+	int j;
+#endif
+
+	net = *netp;
+	/* I know that the TCB is non-NULL from the caller */
+	asoc = &stcb->asoc;
+	for (how_indx = 0; how_indx  < sizeof(asoc->cookie_how); how_indx++) {
+		if (asoc->cookie_how[how_indx] == 0)
+			break;
+	}
+	if (how_indx < sizeof(asoc->cookie_how)) {
+		asoc->cookie_how[how_indx] = 1;
+	}
+	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
+		/* SHUTDOWN came in after sending INIT-ACK */
+		sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination);
+		op_err = sctp_generate_cause(SCTP_CAUSE_COOKIE_IN_SHUTDOWN, "");
+		sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
+#if defined(__FreeBSD__)
+		                   mflowtype, mflowid, inp->fibnum,
+#endif
+		                   vrf_id, net->port);
+		if (how_indx < sizeof(asoc->cookie_how))
+			asoc->cookie_how[how_indx] = 2;
+		return (NULL);
+	}
+	/*
+	 * find and validate the INIT chunk in the cookie (peer's info) the
+	 * INIT should start after the cookie-echo header struct (chunk
+	 * header, state cookie header struct)
+	 */
+	init_offset = offset += sizeof(struct sctp_cookie_echo_chunk);
+
+	init_cp = (struct sctp_init_chunk *)
+		sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
+			      (uint8_t *) & init_buf);
+	if (init_cp == NULL) {
+		/* could not pull a INIT chunk in cookie */
+		return (NULL);
+	}
+	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
+		return (NULL);
+	}
+	/*
+	 * find and validate the INIT-ACK chunk in the cookie (my info) the
+	 * INIT-ACK follows the INIT chunk
+	 */
+	initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
+	initack_cp = (struct sctp_init_ack_chunk *)
+		sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
+			      (uint8_t *) & initack_buf);
+	if (initack_cp == NULL) {
+		/* could not pull INIT-ACK chunk in cookie */
+		return (NULL);
+	}
+	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
+		return (NULL);
+	}
+	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
+	    (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) {
+		/*
+		 * case D in Section 5.2.4 Table 2: MMAA process accordingly
+		 * to get into the OPEN state
+		 */
+		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
+			/*-
+			 * Opps, this means that we somehow generated two vtag's
+			 * the same. I.e. we did:
+			 *  Us               Peer
+			 *   <---INIT(tag=a)------
+			 *   ----INIT-ACK(tag=t)-->
+			 *   ----INIT(tag=t)------> *1
+			 *   <---INIT-ACK(tag=a)---
+                         *   <----CE(tag=t)------------- *2
+			 *
+			 * At point *1 we should be generating a different
+			 * tag t'. Which means we would throw away the CE and send
+			 * ours instead. Basically this is case C (throw away side).
+			 */
+			if (how_indx < sizeof(asoc->cookie_how))
+				asoc->cookie_how[how_indx] = 17;
+			return (NULL);
+
+		}
+		switch (SCTP_GET_STATE(asoc)) {
+			case SCTP_STATE_COOKIE_WAIT:
+			case SCTP_STATE_COOKIE_ECHOED:
+				/*
+				 * INIT was sent but got a COOKIE_ECHO with the
+				 * correct tags... just accept it...but we must
+				 * process the init so that we can make sure we
+				 * have the right seq no's.
+				 */
+				/* First we must process the INIT !! */
+				retval = sctp_process_init(init_cp, stcb);
+				if (retval < 0) {
+					if (how_indx < sizeof(asoc->cookie_how))
+						asoc->cookie_how[how_indx] = 3;
+					return (NULL);
+				}
+				/* we have already processed the INIT so no problem */
+				sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp,
+				                stcb, net,
+				                SCTP_FROM_SCTP_INPUT + SCTP_LOC_13);
+				sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp,
+				                stcb, net,
+				                SCTP_FROM_SCTP_INPUT + SCTP_LOC_14);
+				/* update current state */
+				if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
+					SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
+				else
+					SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
+
+				SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
+				if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+							 stcb->sctp_ep, stcb, asoc->primary_destination);
+				}
+				SCTP_STAT_INCR_GAUGE32(sctps_currestab);
+				sctp_stop_all_cookie_timers(stcb);
+				if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+				     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+				    (inp->sctp_socket->so_qlimit == 0)
+					) {
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+					struct socket *so;
+#endif
+					/*
+					 * Here is where collision would go if we
+					 * did a connect() and instead got a
+					 * init/init-ack/cookie done before the
+					 * init-ack came back..
+					 */
+					stcb->sctp_ep->sctp_flags |=
+						SCTP_PCB_FLAGS_CONNECTED;
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+					so = SCTP_INP_SO(stcb->sctp_ep);
+					atomic_add_int(&stcb->asoc.refcnt, 1);
+					SCTP_TCB_UNLOCK(stcb);
+					SCTP_SOCKET_LOCK(so, 1);
+					SCTP_TCB_LOCK(stcb);
+					atomic_add_int(&stcb->asoc.refcnt, -1);
+					if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+						SCTP_SOCKET_UNLOCK(so, 1);
+						return (NULL);
+					}
+#endif
+					soisconnected(stcb->sctp_socket);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+					SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+				}
+				/* notify upper layer */
+				*notification = SCTP_NOTIFY_ASSOC_UP;
+				/*
+				 * since we did not send a HB make sure we
+				 * don't double things
+				 */
+				net->hb_responded = 1;
+				net->RTO = sctp_calculate_rto(stcb, asoc, net,
+							      &cookie->time_entered,
+							      sctp_align_unsafe_makecopy,
+							      SCTP_RTT_FROM_NON_DATA);
+
+				if (stcb->asoc.sctp_autoclose_ticks &&
+				    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
+					sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
+							 inp, stcb, NULL);
+				}
+				break;
+			default:
+				/*
+				 * we're in the OPEN state (or beyond), so
+				 * peer must have simply lost the COOKIE-ACK
+				 */
+				break;
+		}	/* end switch */
+		sctp_stop_all_cookie_timers(stcb);
+		/*
+		 * We ignore the return code here.. not sure if we should
+		 * somehow abort.. but we do have an existing asoc. This
+		 * really should not fail.
+		 */
+		if (sctp_load_addresses_from_init(stcb, m,
+						  init_offset + sizeof(struct sctp_init_chunk),
+						  initack_offset, src, dst, init_src, stcb->asoc.port)) {
+			if (how_indx < sizeof(asoc->cookie_how))
+				asoc->cookie_how[how_indx] = 4;
+			return (NULL);
+		}
+		/* respond with a COOKIE-ACK */
+		sctp_toss_old_cookies(stcb, asoc);
+		sctp_send_cookie_ack(stcb);
+		if (how_indx < sizeof(asoc->cookie_how))
+			asoc->cookie_how[how_indx] = 5;
+		return (stcb);
+	}
+
+	if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
+	    ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag &&
+	    cookie->tie_tag_my_vtag == 0 &&
+	    cookie->tie_tag_peer_vtag == 0) {
+		/*
+		 * case C in Section 5.2.4 Table 2: XMOO silently discard
+		 */
+		if (how_indx < sizeof(asoc->cookie_how))
+			asoc->cookie_how[how_indx] = 6;
+		return (NULL);
+	}
+	/* If nat support, and the below and stcb is established,
+	 * send back a ABORT(colliding state) if we are established.
+	 */
+	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN)  &&
+	    (asoc->peer_supports_nat) &&
+	    ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
+	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
+	     (asoc->peer_vtag == 0)))) {
+		/* Special case - Peer's support nat. We may have
+		 * two init's that we gave out the same tag on since
+		 * one was not established.. i.e. we get INIT from host-1
+		 * behind the nat and we respond tag-a, we get a INIT from
+		 * host-2 behind the nat and we get tag-a again. Then we
+		 * bring up host-1 (or 2's) assoc, Then comes the cookie
+		 * from hsot-2 (or 1). Now we have colliding state. We must
+		 * send an abort here with colliding state indication.
+		 */
+		op_err = sctp_generate_cause(SCTP_CAUSE_NAT_COLLIDING_STATE, "");
+		sctp_send_abort(m, iphlen,  src, dst, sh, 0, op_err,
+#if defined(__FreeBSD__)
+		                mflowtype, mflowid, inp->fibnum,
+#endif
+		                vrf_id, port);
+		return (NULL);
+	}
+	if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) &&
+	    ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) ||
+	     (asoc->peer_vtag == 0))) {
+		/*
+		 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info
+		 * should be ok, re-accept peer info
+		 */
+		if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) {
+			/* Extension of case C.
+			 * If we hit this, then the random number
+			 * generator returned the same vtag when we
+			 * first sent our INIT-ACK and when we later sent
+			 * our INIT. The side with the seq numbers that are
+			 * different will be the one that normnally would
+			 * have hit case C. This in effect "extends" our vtags
+			 * in this collision case to be 64 bits. The same collision
+			 * could occur aka you get both vtag and seq number the
+			 * same twice in a row.. but is much less likely. If it
+			 * did happen then we would proceed through and bring
+			 * up the assoc.. we may end up with the wrong stream
+			 * setup however.. which would be bad.. but there is
+			 * no way to tell.. until we send on a stream that does
+			 * not exist :-)
+			 */
+			if (how_indx < sizeof(asoc->cookie_how))
+				asoc->cookie_how[how_indx] = 7;
+
+			return (NULL);
+		}
+		if (how_indx < sizeof(asoc->cookie_how))
+			asoc->cookie_how[how_indx] = 8;
+		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
+		                SCTP_FROM_SCTP_INPUT + SCTP_LOC_15);
+		sctp_stop_all_cookie_timers(stcb);
+		/*
+		 * since we did not send a HB make sure we don't double
+		 * things
+		 */
+		net->hb_responded = 1;
+		if (stcb->asoc.sctp_autoclose_ticks &&
+		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
+					 NULL);
+		}
+		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
+		asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
+
+		if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) {
+			/* Ok the peer probably discarded our
+			 * data (if we echoed a cookie+data). So anything
+			 * on the sent_queue should be marked for
+			 * retransmit, we may not get something to
+			 * kick us so it COULD still take a timeout
+			 * to move these.. but it can't hurt to mark them.
+			 */
+			struct sctp_tmit_chunk *chk;
+		        TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+				if (chk->sent < SCTP_DATAGRAM_RESEND) {
+					chk->sent = SCTP_DATAGRAM_RESEND;
+					sctp_flight_size_decrease(chk);
+					sctp_total_flight_decrease(stcb, chk);
+					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+					spec_flag++;
+				}
+			}
+
+		}
+		/* process the INIT info (peer's info) */
+		retval = sctp_process_init(init_cp, stcb);
+		if (retval < 0) {
+			if (how_indx < sizeof(asoc->cookie_how))
+				asoc->cookie_how[how_indx] = 9;
+			return (NULL);
+		}
+		if (sctp_load_addresses_from_init(stcb, m,
+						  init_offset + sizeof(struct sctp_init_chunk),
+						  initack_offset, src, dst, init_src, stcb->asoc.port)) {
+			if (how_indx < sizeof(asoc->cookie_how))
+				asoc->cookie_how[how_indx] = 10;
+			return (NULL);
+		}
+		if ((asoc->state & SCTP_STATE_COOKIE_WAIT) ||
+		    (asoc->state & SCTP_STATE_COOKIE_ECHOED)) {
+			*notification = SCTP_NOTIFY_ASSOC_UP;
+
+			if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+			    (inp->sctp_socket->so_qlimit == 0)) {
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+				struct socket *so;
+#endif
+				stcb->sctp_ep->sctp_flags |=
+					SCTP_PCB_FLAGS_CONNECTED;
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+				so = SCTP_INP_SO(stcb->sctp_ep);
+				atomic_add_int(&stcb->asoc.refcnt, 1);
+				SCTP_TCB_UNLOCK(stcb);
+				SCTP_SOCKET_LOCK(so, 1);
+				SCTP_TCB_LOCK(stcb);
+				atomic_add_int(&stcb->asoc.refcnt, -1);
+				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+					SCTP_SOCKET_UNLOCK(so, 1);
+					return (NULL);
+				}
+#endif
+				soisconnected(stcb->sctp_socket);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+				SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+			}
+			if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)
+				SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
+			else
+				SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
+			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
+		} else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
+			SCTP_STAT_INCR_COUNTER32(sctps_restartestab);
+		} else {
+			SCTP_STAT_INCR_COUNTER32(sctps_collisionestab);
+		}
+		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
+		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+					 stcb->sctp_ep, stcb, asoc->primary_destination);
+		}
+		sctp_stop_all_cookie_timers(stcb);
+		sctp_toss_old_cookies(stcb, asoc);
+		sctp_send_cookie_ack(stcb);
+		if (spec_flag) {
+			/* only if we have retrans set do we do this. What
+			 * this call does is get only the COOKIE-ACK out
+			 * and then when we return the normal call to
+			 * sctp_chunk_output will get the retrans out
+			 * behind this.
+			 */
+			sctp_chunk_output(inp,stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED);
+		}
+		if (how_indx < sizeof(asoc->cookie_how))
+			asoc->cookie_how[how_indx] = 11;
+
+		return (stcb);
+	}
+	if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag &&
+	     ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) &&
+	    cookie->tie_tag_my_vtag == asoc->my_vtag_nonce &&
+	    cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce &&
+	    cookie->tie_tag_peer_vtag != 0) {
+		struct sctpasochead *head;
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		struct socket *so;
+#endif
+
+		if (asoc->peer_supports_nat) {
+			/* This is a gross gross hack.
+			 * Just call the cookie_new code since we
+			 * are allowing a duplicate association.
+			 * I hope this works...
+			 */
+			return (sctp_process_cookie_new(m, iphlen, offset, src, dst,
+			                                sh, cookie, cookie_len,
+			                                inp, netp, init_src,notification,
+			                                auth_skipped, auth_offset, auth_len,
+#if defined(__FreeBSD__)
+			                                mflowtype, mflowid,
+#endif
+			                                vrf_id, port));
+		}
+		/*
+		 * case A in Section 5.2.4 Table 2: XXMM (peer restarted)
+		 */
+		/* temp code */
+		if (how_indx < sizeof(asoc->cookie_how))
+			asoc->cookie_how[how_indx] = 12;
+		sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net,
+		                SCTP_FROM_SCTP_INPUT + SCTP_LOC_16);
+		sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
+		                SCTP_FROM_SCTP_INPUT + SCTP_LOC_17);
+
+		/* notify upper layer */
+		*notification = SCTP_NOTIFY_ASSOC_RESTART;
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) &&
+		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+		    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) {
+			SCTP_STAT_INCR_GAUGE32(sctps_currestab);
+		}
+		if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
+			SCTP_STAT_INCR_GAUGE32(sctps_restartestab);
+		} else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
+			SCTP_STAT_INCR_GAUGE32(sctps_collisionestab);
+		}
+		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
+			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+					 stcb->sctp_ep, stcb, asoc->primary_destination);
+
+		} else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) {
+			/* move to OPEN state, if not in SHUTDOWN_SENT */
+			SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
+		}
+		asoc->pre_open_streams =
+			ntohs(initack_cp->init.num_outbound_streams);
+		asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
+		asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
+		asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
+
+		asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
+
+		asoc->str_reset_seq_in = asoc->init_seq_number;
+
+		asoc->advanced_peer_ack_point = asoc->last_acked_seq;
+		if (asoc->mapping_array) {
+			memset(asoc->mapping_array, 0,
+			       asoc->mapping_array_size);
+		}
+		if (asoc->nr_mapping_array) {
+			memset(asoc->nr_mapping_array, 0,
+			    asoc->mapping_array_size);
+		}
+		SCTP_TCB_UNLOCK(stcb);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		so = SCTP_INP_SO(stcb->sctp_ep);
+		SCTP_SOCKET_LOCK(so, 1);
+#endif
+		SCTP_INP_INFO_WLOCK();
+		SCTP_INP_WLOCK(stcb->sctp_ep);
+		SCTP_TCB_LOCK(stcb);
+		atomic_add_int(&stcb->asoc.refcnt, -1);
+		/* send up all the data */
+		SCTP_TCB_SEND_LOCK(stcb);
+
+		sctp_report_all_outbound(stcb, 0, 1, SCTP_SO_LOCKED);
+		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+			stcb->asoc.strmout[i].chunks_on_queues = 0;
+#if defined(SCTP_DETAILED_STR_STATS)
+			for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
+				asoc->strmout[i].abandoned_sent[j] = 0;
+				asoc->strmout[i].abandoned_unsent[j] = 0;
+			}
+#else
+			asoc->strmout[i].abandoned_sent[0] = 0;
+			asoc->strmout[i].abandoned_unsent[0] = 0;
+#endif
+			stcb->asoc.strmout[i].sid = i;
+			stcb->asoc.strmout[i].next_mid_ordered = 0;
+			stcb->asoc.strmout[i].next_mid_unordered = 0;
+			stcb->asoc.strmout[i].last_msg_incomplete = 0;
+		}
+		/* process the INIT-ACK info (my info) */
+		asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
+		asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
+
+		/* pull from vtag hash */
+		LIST_REMOVE(stcb, sctp_asocs);
+		/* re-insert to new vtag position */
+		head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
+								    SCTP_BASE_INFO(hashasocmark))];
+		/*
+		 * put it in the bucket in the vtag hash of assoc's for the
+		 * system
+		 */
+		LIST_INSERT_HEAD(head, stcb, sctp_asocs);
+
+		SCTP_TCB_SEND_UNLOCK(stcb);
+		SCTP_INP_WUNLOCK(stcb->sctp_ep);
+		SCTP_INP_INFO_WUNLOCK();
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+		asoc->total_flight = 0;
+		asoc->total_flight_count = 0;
+		/* process the INIT info (peer's info) */
+		retval = sctp_process_init(init_cp, stcb);
+		if (retval < 0) {
+			if (how_indx < sizeof(asoc->cookie_how))
+				asoc->cookie_how[how_indx] = 13;
+
+			return (NULL);
+		}
+		/*
+		 * since we did not send a HB make sure we don't double
+		 * things
+		 */
+		net->hb_responded = 1;
+
+		if (sctp_load_addresses_from_init(stcb, m,
+						  init_offset + sizeof(struct sctp_init_chunk),
+						  initack_offset, src, dst, init_src, stcb->asoc.port)) {
+			if (how_indx < sizeof(asoc->cookie_how))
+				asoc->cookie_how[how_indx] = 14;
+
+			return (NULL);
+		}
+		/* respond with a COOKIE-ACK */
+		sctp_stop_all_cookie_timers(stcb);
+		sctp_toss_old_cookies(stcb, asoc);
+		sctp_send_cookie_ack(stcb);
+		if (how_indx < sizeof(asoc->cookie_how))
+			asoc->cookie_how[how_indx] = 15;
+
+		return (stcb);
+	}
+	if (how_indx < sizeof(asoc->cookie_how))
+		asoc->cookie_how[how_indx] = 16;
+	/* all other cases... */
+	return (NULL);
+}
+
+
+/*
+ * handle a state cookie for a new association m: input packet mbuf chain--
+ * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf
+ * and the cookie signature does not exist offset: offset into mbuf to the
+ * cookie-echo chunk length: length of the cookie chunk to: where the init
+ * was from returns a new TCB
+ */
+static struct sctp_tcb *
+sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
+    struct sockaddr *src, struct sockaddr *dst,
+    struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len,
+    struct sctp_inpcb *inp, struct sctp_nets **netp,
+    struct sockaddr *init_src, int *notification,
+    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
+#if defined(__FreeBSD__)
+    uint8_t mflowtype, uint32_t mflowid,
+#endif
+    uint32_t vrf_id, uint16_t port)
+{
+	struct sctp_tcb *stcb;
+	struct sctp_init_chunk *init_cp, init_buf;
+	struct sctp_init_ack_chunk *initack_cp, initack_buf;
+	union sctp_sockstore store;
+	struct sctp_association *asoc;
+	int init_offset, initack_offset, initack_limit;
+	int retval;
+	int error = 0;
+	uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE];
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	struct socket *so;
+
+	so = SCTP_INP_SO(inp);
+#endif
+
+	/*
+	 * find and validate the INIT chunk in the cookie (peer's info) the
+	 * INIT should start after the cookie-echo header struct (chunk
+	 * header, state cookie header struct)
+	 */
+	init_offset = offset + sizeof(struct sctp_cookie_echo_chunk);
+	init_cp = (struct sctp_init_chunk *)
+	    sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk),
+	    (uint8_t *) & init_buf);
+	if (init_cp == NULL) {
+		/* could not pull a INIT chunk in cookie */
+		SCTPDBG(SCTP_DEBUG_INPUT1,
+			"process_cookie_new: could not pull INIT chunk hdr\n");
+		return (NULL);
+	}
+	if (init_cp->ch.chunk_type != SCTP_INITIATION) {
+		SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n");
+		return (NULL);
+	}
+	initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length));
+	/*
+	 * find and validate the INIT-ACK chunk in the cookie (my info) the
+	 * INIT-ACK follows the INIT chunk
+	 */
+	initack_cp = (struct sctp_init_ack_chunk *)
+	    sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk),
+	    (uint8_t *) & initack_buf);
+	if (initack_cp == NULL) {
+		/* could not pull INIT-ACK chunk in cookie */
+		SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n");
+		return (NULL);
+	}
+	if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) {
+		return (NULL);
+	}
+	/*
+	 * NOTE: We can't use the INIT_ACK's chk_length to determine the
+	 * "initack_limit" value.  This is because the chk_length field
+	 * includes the length of the cookie, but the cookie is omitted when
+	 * the INIT and INIT_ACK are tacked onto the cookie...
+	 */
+	initack_limit = offset + cookie_len;
+
+	/*
+	 * now that we know the INIT/INIT-ACK are in place, create a new TCB
+	 * and popluate
+	 */
+
+	/*
+	 * Here we do a trick, we set in NULL for the proc/thread argument. We
+	 * do this since in effect we only use the p argument when
+	 * the socket is unbound and we must do an implicit bind.
+	 * Since we are getting a cookie, we cannot be unbound.
+	 */
+	stcb = sctp_aloc_assoc(inp, init_src, &error,
+	                       ntohl(initack_cp->init.initiate_tag), vrf_id,
+	                       ntohs(initack_cp->init.num_outbound_streams),
+	                       port,
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+	                       (struct thread *)NULL
+#elif defined(__Windows__)
+	                       (PKTHREAD)NULL
+#else
+	                       (struct proc *)NULL
+#endif
+	                       );
+	if (stcb == NULL) {
+		struct mbuf *op_err;
+
+		/* memory problem? */
+		SCTPDBG(SCTP_DEBUG_INPUT1,
+			"process_cookie_new: no room for another TCB!\n");
+		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
+		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
+		                       src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+		                       mflowtype, mflowid,
+#endif
+		                       vrf_id, port);
+		return (NULL);
+	}
+	/* get the correct sctp_nets */
+	if (netp)
+		*netp = sctp_findnet(stcb, init_src);
+
+	asoc = &stcb->asoc;
+	/* get scope variables out of cookie */
+	asoc->scope.ipv4_local_scope = cookie->ipv4_scope;
+	asoc->scope.site_scope = cookie->site_scope;
+	asoc->scope.local_scope = cookie->local_scope;
+	asoc->scope.loopback_scope = cookie->loopback_scope;
+
+#if defined(__Userspace__)
+	if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
+	    (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal) ||
+	    (asoc->scope.conn_addr_legal != cookie->conn_addr_legal)) {
+#else
+	if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) ||
+	    (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal)) {
+#endif
+		struct mbuf *op_err;
+
+		/*
+		 * Houston we have a problem. The EP changed while the
+		 * cookie was in flight. Only recourse is to abort the
+		 * association.
+		 */
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
+		sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen,
+				       src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+		                       mflowtype, mflowid,
+#endif
+		                       vrf_id, port);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_SOCKET_LOCK(so, 1);
+		SCTP_TCB_LOCK(stcb);
+#endif
+		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+				      SCTP_FROM_SCTP_INPUT + SCTP_LOC_18);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+		return (NULL);
+	}
+	/* process the INIT-ACK info (my info) */
+	asoc->my_vtag = ntohl(initack_cp->init.initiate_tag);
+	asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd);
+	asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams);
+	asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn);
+	asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number;
+	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
+	asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1;
+	asoc->str_reset_seq_in = asoc->init_seq_number;
+
+	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
+
+	/* process the INIT info (peer's info) */
+	if (netp)
+		retval = sctp_process_init(init_cp, stcb);
+	else
+		retval = 0;
+	if (retval < 0) {
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_SOCKET_LOCK(so, 1);
+		SCTP_TCB_LOCK(stcb);
+#endif
+		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+		                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_19);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+		return (NULL);
+	}
+	/* load all addresses */
+	if (sctp_load_addresses_from_init(stcb, m,
+	    init_offset + sizeof(struct sctp_init_chunk), initack_offset,
+	    src, dst, init_src, port)) {
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_SOCKET_LOCK(so, 1);
+		SCTP_TCB_LOCK(stcb);
+#endif
+		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+		                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_20);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+		return (NULL);
+	}
+	/*
+	 * verify any preceding AUTH chunk that was skipped
+	 */
+	/* pull the local authentication parameters from the cookie/init-ack */
+	sctp_auth_get_cookie_params(stcb, m,
+	    initack_offset + sizeof(struct sctp_init_ack_chunk),
+	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)));
+	if (auth_skipped) {
+		struct sctp_auth_chunk *auth;
+
+		auth = (struct sctp_auth_chunk *)
+		    sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf);
+		if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) {
+			/* auth HMAC failed, dump the assoc and packet */
+			SCTPDBG(SCTP_DEBUG_AUTH1,
+				"COOKIE-ECHO: AUTH failed\n");
+			atomic_add_int(&stcb->asoc.refcnt, 1);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+			SCTP_TCB_UNLOCK(stcb);
+			SCTP_SOCKET_LOCK(so, 1);
+			SCTP_TCB_LOCK(stcb);
+#endif
+			(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+			                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_21);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+			SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+			atomic_subtract_int(&stcb->asoc.refcnt, 1);
+			return (NULL);
+		} else {
+			/* remaining chunks checked... good to go */
+			stcb->asoc.authenticated = 1;
+		}
+	}
+	/* update current state */
+	SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
+	SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
+	if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+				 stcb->sctp_ep, stcb, asoc->primary_destination);
+	}
+	sctp_stop_all_cookie_timers(stcb);
+	SCTP_STAT_INCR_COUNTER32(sctps_passiveestab);
+	SCTP_STAT_INCR_GAUGE32(sctps_currestab);
+
+	/*
+	 * if we're doing ASCONFs, check to see if we have any new local
+	 * addresses that need to get added to the peer (eg. addresses
+	 * changed while cookie echo in flight).  This needs to be done
+	 * after we go to the OPEN state to do the correct asconf
+	 * processing. else, make sure we have the correct addresses in our
+	 * lists
+	 */
+
+	/* warning, we re-use sin, sin6, sa_store here! */
+	/* pull in local_address (our "from" address) */
+	switch (cookie->laddr_type) {
+#ifdef INET
+	case SCTP_IPV4_ADDRESS:
+		/* source addr is IPv4 */
+		memset(&store.sin, 0, sizeof(struct sockaddr_in));
+		store.sin.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+		store.sin.sin_len = sizeof(struct sockaddr_in);
+#endif
+		store.sin.sin_addr.s_addr = cookie->laddress[0];
+		break;
+#endif
+#ifdef INET6
+	case SCTP_IPV6_ADDRESS:
+		/* source addr is IPv6 */
+		memset(&store.sin6, 0, sizeof(struct sockaddr_in6));
+		store.sin6.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+		store.sin6.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+		store.sin6.sin6_scope_id = cookie->scope_id;
+		memcpy(&store.sin6.sin6_addr, cookie->laddress, sizeof(struct in6_addr));
+		break;
+#endif
+#if defined(__Userspace__)
+	case SCTP_CONN_ADDRESS:
+		/* source addr is conn */
+		memset(&store.sconn, 0, sizeof(struct sockaddr_conn));
+		store.sconn.sconn_family = AF_CONN;
+#ifdef HAVE_SCONN_LEN
+		store.sconn.sconn_len = sizeof(struct sockaddr_conn);
+#endif
+		memcpy(&store.sconn.sconn_addr, cookie->laddress, sizeof(void *));
+		break;
+#endif
+	default:
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_SOCKET_LOCK(so, 1);
+		SCTP_TCB_LOCK(stcb);
+#endif
+		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+		                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_22);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+		return (NULL);
+	}
+
+	/* set up to notify upper layer */
+	*notification = SCTP_NOTIFY_ASSOC_UP;
+	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+	    (inp->sctp_socket->so_qlimit == 0)) {
+		/*
+		 * This is an endpoint that called connect() how it got a
+		 * cookie that is NEW is a bit of a mystery. It must be that
+		 * the INIT was sent, but before it got there.. a complete
+		 * INIT/INIT-ACK/COOKIE arrived. But of course then it
+		 * should have went to the other code.. not here.. oh well..
+		 * a bit of protection is worth having..
+		 */
+		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_SOCKET_LOCK(so, 1);
+		SCTP_TCB_LOCK(stcb);
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+			SCTP_SOCKET_UNLOCK(so, 1);
+			return (NULL);
+		}
+#endif
+		soisconnected(stcb->sctp_socket);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+	} else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+	    (inp->sctp_socket->so_qlimit)) {
+		/*
+		 * We don't want to do anything with this one. Since it is
+		 * the listening guy. The timer will get started for
+		 * accepted connections in the caller.
+		 */
+		;
+	}
+	/* since we did not send a HB make sure we don't double things */
+	if ((netp) && (*netp))
+		(*netp)->hb_responded = 1;
+
+	if (stcb->asoc.sctp_autoclose_ticks &&
+	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+		sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL);
+	}
+	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+	if ((netp != NULL) && (*netp != NULL)) {
+		/* calculate the RTT and set the encaps port */
+		(*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
+						  &cookie->time_entered, sctp_align_unsafe_makecopy,
+						  SCTP_RTT_FROM_NON_DATA);
+	}
+	/* respond with a COOKIE-ACK */
+	sctp_send_cookie_ack(stcb);
+
+	/*
+	 * check the address lists for any ASCONFs that need to be sent
+	 * AFTER the cookie-ack is sent
+	 */
+	sctp_check_address_list(stcb, m,
+	    initack_offset + sizeof(struct sctp_init_ack_chunk),
+	    initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)),
+	    &store.sa, cookie->local_scope, cookie->site_scope,
+	    cookie->ipv4_scope, cookie->loopback_scope);
+
+
+	return (stcb);
+}
+
+/*
+ * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e
+ * we NEED to make sure we are not already using the vtag. If so we
+ * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit!
+	head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
+							    SCTP_BASE_INFO(hashasocmark))];
+	LIST_FOREACH(stcb, head, sctp_asocs) {
+	        if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep))  {
+		       -- SEND ABORT - TRY AGAIN --
+		}
+	}
+*/
+
+/*
+ * handles a COOKIE-ECHO message stcb: modified to either a new or left as
+ * existing (non-NULL) TCB
+ */
+static struct mbuf *
+sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset,
+    struct sockaddr *src, struct sockaddr *dst,
+    struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp,
+    struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp,
+    int auth_skipped, uint32_t auth_offset, uint32_t auth_len,
+    struct sctp_tcb **locked_tcb,
+#if defined(__FreeBSD__)
+    uint8_t mflowtype, uint32_t mflowid,
+#endif
+    uint32_t vrf_id, uint16_t port)
+{
+	struct sctp_state_cookie *cookie;
+	struct sctp_tcb *l_stcb = *stcb;
+	struct sctp_inpcb *l_inp;
+	struct sockaddr *to;
+	struct sctp_pcb *ep;
+	struct mbuf *m_sig;
+	uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE];
+	uint8_t *sig;
+	uint8_t cookie_ok = 0;
+	unsigned int sig_offset, cookie_offset;
+	unsigned int cookie_len;
+	struct timeval now;
+	struct timeval time_expires;
+	int notification = 0;
+	struct sctp_nets *netl;
+	int had_a_existing_tcb = 0;
+	int send_int_conf = 0;
+#ifdef INET
+	struct sockaddr_in sin;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 sin6;
+#endif
+#if defined(__Userspace__)
+	struct sockaddr_conn sconn;
+#endif
+
+	SCTPDBG(SCTP_DEBUG_INPUT2,
+		"sctp_handle_cookie: handling COOKIE-ECHO\n");
+
+	if (inp_p == NULL) {
+		return (NULL);
+	}
+	cookie = &cp->cookie;
+	cookie_offset = offset + sizeof(struct sctp_chunkhdr);
+	cookie_len = ntohs(cp->ch.chunk_length);
+
+	if ((cookie->peerport != sh->src_port) ||
+	    (cookie->myport != sh->dest_port) ||
+	    (cookie->my_vtag != sh->v_tag)) {
+		/*
+		 * invalid ports or bad tag.  Note that we always leave the
+		 * v_tag in the header in network order and when we stored
+		 * it in the my_vtag slot we also left it in network order.
+		 * This maintains the match even though it may be in the
+		 * opposite byte order of the machine :->
+		 */
+		return (NULL);
+	}
+	if (cookie_len < sizeof(struct sctp_cookie_echo_chunk) +
+	    sizeof(struct sctp_init_chunk) +
+	    sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) {
+		/* cookie too small */
+		return (NULL);
+	}
+	/*
+	 * split off the signature into its own mbuf (since it should not be
+	 * calculated in the sctp_hmac_m() call).
+	 */
+	sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE;
+	m_sig = m_split(m, sig_offset, M_NOWAIT);
+	if (m_sig == NULL) {
+		/* out of memory or ?? */
+		return (NULL);
+	}
+#ifdef SCTP_MBUF_LOGGING
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+		sctp_log_mbc(m_sig, SCTP_MBUF_SPLIT);
+	}
+#endif
+
+	/*
+	 * compute the signature/digest for the cookie
+	 */
+	ep = &(*inp_p)->sctp_ep;
+	l_inp = *inp_p;
+	if (l_stcb) {
+		SCTP_TCB_UNLOCK(l_stcb);
+	}
+	SCTP_INP_RLOCK(l_inp);
+	if (l_stcb) {
+		SCTP_TCB_LOCK(l_stcb);
+	}
+	/* which cookie is it? */
+	if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) &&
+	    (ep->current_secret_number != ep->last_secret_number)) {
+		/* it's the old cookie */
+		(void)sctp_hmac_m(SCTP_HMAC,
+		    (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
+		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
+	} else {
+		/* it's the current cookie */
+		(void)sctp_hmac_m(SCTP_HMAC,
+		    (uint8_t *)ep->secret_key[(int)ep->current_secret_number],
+		    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
+	}
+	/* get the signature */
+	SCTP_INP_RUNLOCK(l_inp);
+	sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig);
+	if (sig == NULL) {
+		/* couldn't find signature */
+		sctp_m_freem(m_sig);
+		return (NULL);
+	}
+	/* compare the received digest with the computed digest */
+	if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) {
+		/* try the old cookie? */
+		if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) &&
+		    (ep->current_secret_number != ep->last_secret_number)) {
+			/* compute digest with old */
+			(void)sctp_hmac_m(SCTP_HMAC,
+			    (uint8_t *)ep->secret_key[(int)ep->last_secret_number],
+			    SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0);
+			/* compare */
+			if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0)
+				cookie_ok = 1;
+		}
+	} else {
+		cookie_ok = 1;
+	}
+
+	/*
+	 * Now before we continue we must reconstruct our mbuf so that
+	 * normal processing of any other chunks will work.
+	 */
+	{
+		struct mbuf *m_at;
+
+		m_at = m;
+		while (SCTP_BUF_NEXT(m_at) != NULL) {
+			m_at = SCTP_BUF_NEXT(m_at);
+		}
+		SCTP_BUF_NEXT(m_at) = m_sig;
+	}
+
+	if (cookie_ok == 0) {
+		SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n");
+		SCTPDBG(SCTP_DEBUG_INPUT2,
+			"offset = %u, cookie_offset = %u, sig_offset = %u\n",
+			(uint32_t) offset, cookie_offset, sig_offset);
+		return (NULL);
+	}
+
+	/*
+	 * check the cookie timestamps to be sure it's not stale
+	 */
+	(void)SCTP_GETTIME_TIMEVAL(&now);
+	/* Expire time is in Ticks, so we convert to seconds */
+	time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life);
+	time_expires.tv_usec = cookie->time_entered.tv_usec;
+        /* TODO sctp_constants.h needs alternative time macros when
+         *  _KERNEL is undefined.
+         */
+#ifndef __FreeBSD__
+	if (timercmp(&now, &time_expires, >))
+#else
+	if (timevalcmp(&now, &time_expires, >))
+#endif
+	{
+		/* cookie is stale! */
+		struct mbuf *op_err;
+		struct sctp_error_stale_cookie *cause;
+		uint32_t tim;
+		op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_stale_cookie),
+					       0, M_NOWAIT, 1, MT_DATA);
+		if (op_err == NULL) {
+			/* FOOBAR */
+			return (NULL);
+		}
+		/* Set the len */
+		SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_stale_cookie);
+		cause = mtod(op_err, struct sctp_error_stale_cookie *);
+		cause->cause.code = htons(SCTP_CAUSE_STALE_COOKIE);
+		cause->cause.length = htons((sizeof(struct sctp_paramhdr) +
+		    (sizeof(uint32_t))));
+		/* seconds to usec */
+		tim = (now.tv_sec - time_expires.tv_sec) * 1000000;
+		/* add in usec */
+		if (tim == 0)
+			tim = now.tv_usec - cookie->time_entered.tv_usec;
+		cause->stale_time = htonl(tim);
+		sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err,
+#if defined(__FreeBSD__)
+		                   mflowtype, mflowid, l_inp->fibnum,
+#endif
+		                   vrf_id, port);
+		return (NULL);
+	}
+	/*
+	 * Now we must see with the lookup address if we have an existing
+	 * asoc. This will only happen if we were in the COOKIE-WAIT state
+	 * and a INIT collided with us and somewhere the peer sent the
+	 * cookie on another address besides the single address our assoc
+	 * had for him. In this case we will have one of the tie-tags set at
+	 * least AND the address field in the cookie can be used to look it
+	 * up.
+	 */
+	to = NULL;
+	switch (cookie->addr_type) {
+#ifdef INET6
+	case SCTP_IPV6_ADDRESS:
+		memset(&sin6, 0, sizeof(sin6));
+		sin6.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+		sin6.sin6_len = sizeof(sin6);
+#endif
+		sin6.sin6_port = sh->src_port;
+		sin6.sin6_scope_id = cookie->scope_id;
+		memcpy(&sin6.sin6_addr.s6_addr, cookie->address,
+		    sizeof(sin6.sin6_addr.s6_addr));
+		to = (struct sockaddr *)&sin6;
+		break;
+#endif
+#ifdef INET
+	case SCTP_IPV4_ADDRESS:
+		memset(&sin, 0, sizeof(sin));
+		sin.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+		sin.sin_len = sizeof(sin);
+#endif
+		sin.sin_port = sh->src_port;
+		sin.sin_addr.s_addr = cookie->address[0];
+		to = (struct sockaddr *)&sin;
+		break;
+#endif
+#if defined(__Userspace__)
+	case SCTP_CONN_ADDRESS:
+		memset(&sconn, 0, sizeof(struct sockaddr_conn));
+		sconn.sconn_family = AF_CONN;
+#ifdef HAVE_SCONN_LEN
+		sconn.sconn_len = sizeof(struct sockaddr_conn);
+#endif
+		sconn.sconn_port = sh->src_port;
+		memcpy(&sconn.sconn_addr, cookie->address, sizeof(void *));
+		to = (struct sockaddr *)&sconn;
+		break;
+#endif
+	default:
+		/* This should not happen */
+		return (NULL);
+	}
+	if (*stcb == NULL) {
+		/* Yep, lets check */
+		*stcb = sctp_findassociation_ep_addr(inp_p, to, netp, dst, NULL);
+		if (*stcb == NULL) {
+			/*
+			 * We should have only got back the same inp. If we
+			 * got back a different ep we have a problem. The
+			 * original findep got back l_inp and now
+			 */
+			if (l_inp != *inp_p) {
+				SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n");
+			}
+		} else {
+			if (*locked_tcb == NULL) {
+				/* In this case we found the assoc only
+				 * after we locked the create lock. This means
+				 * we are in a colliding case and we must make
+				 * sure that we unlock the tcb if its one of the
+				 * cases where we throw away the incoming packets.
+				 */
+				*locked_tcb = *stcb;
+
+				/* We must also increment the inp ref count
+				 * since the ref_count flags was set when we
+				 * did not find the TCB, now we found it which
+				 * reduces the refcount.. we must raise it back
+				 * out to balance it all :-)
+				 */
+				SCTP_INP_INCR_REF((*stcb)->sctp_ep);
+				if ((*stcb)->sctp_ep != l_inp) {
+					SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n",
+						    (void *)(*stcb)->sctp_ep, (void *)l_inp);
+				}
+			}
+		}
+	}
+
+	cookie_len -= SCTP_SIGNATURE_SIZE;
+	if (*stcb == NULL) {
+		/* this is the "normal" case... get a new TCB */
+		*stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, sh,
+		                                cookie, cookie_len, *inp_p,
+		                                netp, to, &notification,
+		                                auth_skipped, auth_offset, auth_len,
+#if defined(__FreeBSD__)
+		                                mflowtype, mflowid,
+#endif
+		                                vrf_id, port);
+	} else {
+		/* this is abnormal... cookie-echo on existing TCB */
+		had_a_existing_tcb = 1;
+		*stcb = sctp_process_cookie_existing(m, iphlen, offset,
+		                                     src, dst, sh,
+						     cookie, cookie_len, *inp_p, *stcb, netp, to,
+						     &notification, auth_skipped, auth_offset, auth_len,
+#if defined(__FreeBSD__)
+		                                     mflowtype, mflowid,
+#endif
+		                                     vrf_id, port);
+	}
+
+	if (*stcb == NULL) {
+		/* still no TCB... must be bad cookie-echo */
+		return (NULL);
+	}
+#if defined(__FreeBSD__)
+	if (*netp != NULL) {
+		(*netp)->flowtype = mflowtype;
+		(*netp)->flowid = mflowid;
+	}
+#endif
+	/*
+	 * Ok, we built an association so confirm the address we sent the
+	 * INIT-ACK to.
+	 */
+	netl = sctp_findnet(*stcb, to);
+	/*
+	 * This code should in theory NOT run but
+	 */
+	if (netl == NULL) {
+		/* TSNH! Huh, why do I need to add this address here? */
+		if (sctp_add_remote_addr(*stcb, to, NULL, port,
+		                         SCTP_DONOT_SETSCOPE, SCTP_IN_COOKIE_PROC)) {
+			return (NULL);
+		}
+		netl = sctp_findnet(*stcb, to);
+	}
+	if (netl) {
+		if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) {
+			netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
+			(void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL,
+			    netl);
+			send_int_conf = 1;
+		}
+	}
+	sctp_start_net_timers(*stcb);
+	if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+		if (!had_a_existing_tcb ||
+		    (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
+			/*
+			 * If we have a NEW cookie or the connect never
+			 * reached the connected state during collision we
+			 * must do the TCP accept thing.
+			 */
+			struct socket *so, *oso;
+			struct sctp_inpcb *inp;
+
+			if (notification == SCTP_NOTIFY_ASSOC_RESTART) {
+				/*
+				 * For a restart we will keep the same
+				 * socket, no need to do anything. I THINK!!
+				 */
+				sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+				if (send_int_conf) {
+					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
+					                (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
+				}
+				return (m);
+			}
+			oso = (*inp_p)->sctp_socket;
+#if (defined(__FreeBSD__) && __FreeBSD_version < 700000)
+			/*
+			 * We do this to keep the sockets side happy during
+			 * the sonewcon ONLY.
+			 */
+			NET_LOCK_GIANT();
+#endif
+			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
+			SCTP_TCB_UNLOCK((*stcb));
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+			CURVNET_SET(oso->so_vnet);
+#endif
+#if defined(__APPLE__)
+			SCTP_SOCKET_LOCK(oso, 1);
+#endif
+			so = sonewconn(oso, 0
+#if defined(__APPLE__)
+			    ,NULL
+#endif
+#ifdef __Panda__
+			     ,NULL , (*inp_p)->def_vrf_id
+#endif
+			    );
+#if (defined(__FreeBSD__) && __FreeBSD_version < 700000)
+			NET_UNLOCK_GIANT();
+#endif
+#if defined(__APPLE__)
+			SCTP_SOCKET_UNLOCK(oso, 1);
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+			CURVNET_RESTORE();
+#endif
+			SCTP_TCB_LOCK((*stcb));
+			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
+
+			if (so == NULL) {
+				struct mbuf *op_err;
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+				struct socket *pcb_so;
+#endif
+				/* Too many sockets */
+				SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n");
+				op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
+				sctp_abort_association(*inp_p, NULL, m, iphlen,
+						       src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+				                       mflowtype, mflowid,
+#endif
+				                       vrf_id, port);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+				pcb_so = SCTP_INP_SO(*inp_p);
+				atomic_add_int(&(*stcb)->asoc.refcnt, 1);
+				SCTP_TCB_UNLOCK((*stcb));
+				SCTP_SOCKET_LOCK(pcb_so, 1);
+				SCTP_TCB_LOCK((*stcb));
+				atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
+#endif
+				(void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC,
+				                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_23);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+				SCTP_SOCKET_UNLOCK(pcb_so, 1);
+#endif
+				return (NULL);
+			}
+			inp = (struct sctp_inpcb *)so->so_pcb;
+			SCTP_INP_INCR_REF(inp);
+			/*
+			 * We add the unbound flag here so that
+			 * if we get an soabort() before we get the
+			 * move_pcb done, we will properly cleanup.
+			 */
+			inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
+			    SCTP_PCB_FLAGS_CONNECTED |
+			    SCTP_PCB_FLAGS_IN_TCPPOOL |
+			    SCTP_PCB_FLAGS_UNBOUND |
+			    (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) |
+			    SCTP_PCB_FLAGS_DONT_WAKE);
+			inp->sctp_features = (*inp_p)->sctp_features;
+			inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features;
+			inp->sctp_socket = so;
+			inp->sctp_frag_point = (*inp_p)->sctp_frag_point;
+			inp->max_cwnd = (*inp_p)->max_cwnd;
+			inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off;
+			inp->ecn_supported = (*inp_p)->ecn_supported;
+			inp->prsctp_supported = (*inp_p)->prsctp_supported;
+			inp->auth_supported = (*inp_p)->auth_supported;
+			inp->asconf_supported = (*inp_p)->asconf_supported;
+			inp->reconfig_supported = (*inp_p)->reconfig_supported;
+			inp->nrsack_supported = (*inp_p)->nrsack_supported;
+			inp->pktdrop_supported = (*inp_p)->pktdrop_supported;
+			inp->partial_delivery_point = (*inp_p)->partial_delivery_point;
+			inp->sctp_context = (*inp_p)->sctp_context;
+			inp->local_strreset_support = (*inp_p)->local_strreset_support;
+			inp->fibnum = (*inp_p)->fibnum;
+			inp->inp_starting_point_for_iterator = NULL;
+#if defined(__Userspace__)
+			inp->ulp_info = (*inp_p)->ulp_info;
+			inp->recv_callback = (*inp_p)->recv_callback;
+			inp->send_callback = (*inp_p)->send_callback;
+			inp->send_sb_threshold = (*inp_p)->send_sb_threshold;
+#endif
+			/*
+			 * copy in the authentication parameters from the
+			 * original endpoint
+			 */
+			if (inp->sctp_ep.local_hmacs)
+				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
+			inp->sctp_ep.local_hmacs =
+			    sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs);
+			if (inp->sctp_ep.local_auth_chunks)
+				sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
+			inp->sctp_ep.local_auth_chunks =
+			    sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks);
+
+			/*
+			 * Now we must move it from one hash table to
+			 * another and get the tcb in the right place.
+			 */
+
+			/* This is where the one-2-one socket is put into
+			 * the accept state waiting for the accept!
+			 */
+			if (*stcb) {
+				(*stcb)->asoc.state |= SCTP_STATE_IN_ACCEPT_QUEUE;
+			}
+			sctp_move_pcb_and_assoc(*inp_p, inp, *stcb);
+
+			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
+			SCTP_TCB_UNLOCK((*stcb));
+
+#if defined(__FreeBSD__)
+			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb,
+			    0);
+#else
+			sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT);
+#endif
+			SCTP_TCB_LOCK((*stcb));
+			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
+
+
+			/* now we must check to see if we were aborted while
+			 * the move was going on and the lock/unlock happened.
+			 */
+			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+				/* yep it was, we leave the
+				 * assoc attached to the socket since
+				 * the sctp_inpcb_free() call will send
+				 * an abort for us.
+				 */
+				SCTP_INP_DECR_REF(inp);
+				return (NULL);
+			}
+			SCTP_INP_DECR_REF(inp);
+			/* Switch over to the new guy */
+			*inp_p = inp;
+			sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+			if (send_int_conf) {
+				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
+				                (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
+			}
+
+			/* Pull it from the incomplete queue and wake the guy */
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+			atomic_add_int(&(*stcb)->asoc.refcnt, 1);
+			SCTP_TCB_UNLOCK((*stcb));
+			SCTP_SOCKET_LOCK(so, 1);
+#endif
+			soisconnected(so);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+			SCTP_TCB_LOCK((*stcb));
+			atomic_subtract_int(&(*stcb)->asoc.refcnt, 1);
+			SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+			return (m);
+		}
+	}
+	if (notification) {
+		sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+	}
+	if (send_int_conf) {
+		sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED,
+		                (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED);
+	}
+	return (m);
+}
+
+static void
+sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED,
+    struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	/* cp must not be used, others call this without a c-ack :-) */
+	struct sctp_association *asoc;
+
+	SCTPDBG(SCTP_DEBUG_INPUT2,
+		"sctp_handle_cookie_ack: handling COOKIE-ACK\n");
+	if ((stcb == NULL) || (net == NULL)) {
+		return;
+	}
+
+	asoc = &stcb->asoc;
+
+	sctp_stop_all_cookie_timers(stcb);
+	/* process according to association state */
+	if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) {
+		/* state change only needed when I am in right state */
+		SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n");
+		SCTP_SET_STATE(asoc, SCTP_STATE_OPEN);
+		sctp_start_net_timers(stcb);
+		if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
+			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+					 stcb->sctp_ep, stcb, asoc->primary_destination);
+
+		}
+		/* update RTO */
+		SCTP_STAT_INCR_COUNTER32(sctps_activeestab);
+		SCTP_STAT_INCR_GAUGE32(sctps_currestab);
+		if (asoc->overall_error_count == 0) {
+			net->RTO = sctp_calculate_rto(stcb, asoc, net,
+					             &asoc->time_entered, sctp_align_safe_nocopy,
+						      SCTP_RTT_FROM_NON_DATA);
+		}
+		(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
+		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+		if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+			struct socket *so;
+
+#endif
+			stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+			so = SCTP_INP_SO(stcb->sctp_ep);
+			atomic_add_int(&stcb->asoc.refcnt, 1);
+			SCTP_TCB_UNLOCK(stcb);
+			SCTP_SOCKET_LOCK(so, 1);
+			SCTP_TCB_LOCK(stcb);
+			atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+			if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) {
+				soisconnected(stcb->sctp_socket);
+			}
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+			SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+		}
+		/*
+		 * since we did not send a HB make sure we don't double
+		 * things
+		 */
+		net->hb_responded = 1;
+
+		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+			/* We don't need to do the asconf thing,
+			 * nor hb or autoclose if the socket is closed.
+			 */
+			goto closed_socket;
+		}
+
+		sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
+		    stcb, net);
+
+
+		if (stcb->asoc.sctp_autoclose_ticks &&
+		    sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE,
+			    stcb->sctp_ep, stcb, NULL);
+		}
+		/*
+		 * send ASCONF if parameters are pending and ASCONFs are
+		 * allowed (eg. addresses changed when init/cookie echo were
+		 * in flight)
+		 */
+		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) &&
+		    (stcb->asoc.asconf_supported == 1) &&
+		    (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) {
+#ifdef SCTP_TIMER_BASED_ASCONF
+			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF,
+					 stcb->sctp_ep, stcb,
+					 stcb->asoc.primary_destination);
+#else
+			sctp_send_asconf(stcb, stcb->asoc.primary_destination,
+					 SCTP_ADDR_NOT_LOCKED);
+#endif
+		}
+	}
+closed_socket:
+	/* Toss the cookie if I can */
+	sctp_toss_old_cookies(stcb, asoc);
+	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
+		/* Restart the timer if we have pending data */
+		struct sctp_tmit_chunk *chk;
+
+		chk = TAILQ_FIRST(&asoc->sent_queue);
+		sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
+	}
+}
+
+static void
+sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp,
+		     struct sctp_tcb *stcb)
+{
+	struct sctp_nets *net;
+	struct sctp_tmit_chunk *lchk;
+	struct sctp_ecne_chunk bkup;
+	uint8_t override_bit;
+	uint32_t tsn, window_data_tsn;
+	int len;
+	unsigned int pkt_cnt;
+
+	len = ntohs(cp->ch.chunk_length);
+	if ((len != sizeof(struct sctp_ecne_chunk)) &&
+	    (len != sizeof(struct old_sctp_ecne_chunk))) {
+		return;
+	}
+	if (len == sizeof(struct old_sctp_ecne_chunk)) {
+		/* Its the old format */
+		memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk));
+		bkup.num_pkts_since_cwr = htonl(1);
+		cp = &bkup;
+	}
+	SCTP_STAT_INCR(sctps_recvecne);
+	tsn = ntohl(cp->tsn);
+	pkt_cnt = ntohl(cp->num_pkts_since_cwr);
+	lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead);
+	if (lchk == NULL) {
+		window_data_tsn = stcb->asoc.sending_seq - 1;
+	} else {
+		window_data_tsn = lchk->rec.data.tsn;
+	}
+
+	/* Find where it was sent to if possible. */
+	net = NULL;
+	TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) {
+		if (lchk->rec.data.tsn == tsn) {
+			net = lchk->whoTo;
+			net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send;
+			break;
+		}
+		if (SCTP_TSN_GT(lchk->rec.data.tsn, tsn)) {
+			break;
+		}
+	}
+	if (net == NULL) {
+		/*
+		 * What to do. A previous send of a
+		 * CWR was possibly lost. See how old it is, we
+		 * may have it marked on the actual net.
+		 */
+		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+			if (tsn == net->last_cwr_tsn) {
+				/* Found him, send it off */
+				break;
+			}
+		}
+		if (net == NULL) {
+			/*
+			 * If we reach here, we need to send a special
+			 * CWR that says hey, we did this a long time
+			 * ago and you lost the response.
+			 */
+			net = TAILQ_FIRST(&stcb->asoc.nets);
+			if (net == NULL) {
+				/* TSNH */
+				return;
+			}
+			override_bit = SCTP_CWR_REDUCE_OVERRIDE;
+		} else {
+			override_bit = 0;
+		}
+	} else {
+		override_bit = 0;
+	}
+	if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) &&
+	    ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
+		/* JRS - Use the congestion control given in the pluggable CC module */
+		stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt);
+		/*
+		 * We reduce once every RTT. So we will only lower cwnd at
+		 * the next sending seq i.e. the window_data_tsn
+		 */
+		net->cwr_window_tsn = window_data_tsn;
+		net->ecn_ce_pkt_cnt += pkt_cnt;
+		net->lost_cnt = pkt_cnt;
+		net->last_cwr_tsn = tsn;
+	} else {
+		override_bit |= SCTP_CWR_IN_SAME_WINDOW;
+		if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) &&
+		    ((override_bit&SCTP_CWR_REDUCE_OVERRIDE) == 0)) {
+			/*
+			 * Another loss in the same window update how
+			 * many marks/packets lost we have had.
+			 */
+			int cnt = 1;
+			if (pkt_cnt > net->lost_cnt) {
+				/* Should be the case */
+				cnt = (pkt_cnt - net->lost_cnt);
+				net->ecn_ce_pkt_cnt += cnt;
+			}
+			net->lost_cnt = pkt_cnt;
+			net->last_cwr_tsn = tsn;
+			/*
+			 * Most CC functions will ignore this call, since we are in-window
+			 * yet of the initial CE the peer saw.
+			 */
+			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt);
+		}
+	}
+	/*
+	 * We always send a CWR this way if our previous one was lost our
+	 * peer will get an update, or if it is not time again to reduce we
+	 * still get the cwr to the peer. Note we set the override when we
+	 * could not find the TSN on the chunk or the destination network.
+	 */
+	sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit);
+}
+
+static void
+sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	/*
+	 * Here we get a CWR from the peer. We must look in the outqueue and
+	 * make sure that we have a covered ECNE in the control chunk part.
+	 * If so remove it.
+	 */
+	struct sctp_tmit_chunk *chk;
+	struct sctp_ecne_chunk *ecne;
+	int override;
+	uint32_t cwr_tsn;
+
+	cwr_tsn = ntohl(cp->tsn);
+	override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE;
+	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
+		if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) {
+			continue;
+		}
+		if ((override == 0) && (chk->whoTo != net)) {
+			/* Must be from the right src unless override is set */
+			continue;
+		}
+		ecne = mtod(chk->data, struct sctp_ecne_chunk *);
+		if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) {
+			/* this covers this ECNE, we can remove it */
+			stcb->asoc.ecn_echo_cnt_onq--;
+			TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk,
+			    sctp_next);
+			sctp_m_freem(chk->data);
+			chk->data = NULL;
+			stcb->asoc.ctrl_queue_cnt--;
+			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+			if (override == 0) {
+				break;
+			}
+		}
+	}
+}
+
+static void
+sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED,
+    struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	struct sctp_association *asoc;
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	struct socket *so;
+#endif
+
+	SCTPDBG(SCTP_DEBUG_INPUT2,
+		"sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n");
+	if (stcb == NULL)
+		return;
+
+	asoc = &stcb->asoc;
+	/* process according to association state */
+	if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) {
+		/* unexpected SHUTDOWN-COMPLETE... so ignore... */
+		SCTPDBG(SCTP_DEBUG_INPUT2,
+			"sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n");
+		SCTP_TCB_UNLOCK(stcb);
+		return;
+	}
+	/* notify upper layer protocol */
+	if (stcb->sctp_socket) {
+		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
+	}
+#ifdef INVARIANTS
+	if (!TAILQ_EMPTY(&asoc->send_queue) ||
+	    !TAILQ_EMPTY(&asoc->sent_queue) ||
+	    sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED)) {
+		panic("Queues are not empty when handling SHUTDOWN-COMPLETE");
+	}
+#endif
+	/* stop the timer */
+	sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net,
+	                SCTP_FROM_SCTP_INPUT + SCTP_LOC_24);
+	SCTP_STAT_INCR_COUNTER32(sctps_shutdown);
+	/* free the TCB */
+	SCTPDBG(SCTP_DEBUG_INPUT2,
+		"sctp_handle_shutdown_complete: calls free-asoc\n");
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	so = SCTP_INP_SO(stcb->sctp_ep);
+	atomic_add_int(&stcb->asoc.refcnt, 1);
+	SCTP_TCB_UNLOCK(stcb);
+	SCTP_SOCKET_LOCK(so, 1);
+	SCTP_TCB_LOCK(stcb);
+	atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+	(void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC,
+	                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_25);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+	return;
+}
+
+static int
+process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
+    struct sctp_nets *net, uint8_t flg)
+{
+	switch (desc->chunk_type) {
+	case SCTP_DATA:
+		/* find the tsn to resend (possibly */
+	{
+		uint32_t tsn;
+		struct sctp_tmit_chunk *tp1;
+
+		tsn = ntohl(desc->tsn_ifany);
+		TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
+			if (tp1->rec.data.tsn == tsn) {
+				/* found it */
+				break;
+			}
+			if (SCTP_TSN_GT(tp1->rec.data.tsn, tsn)) {
+				/* not found */
+				tp1 = NULL;
+				break;
+			}
+		}
+		if (tp1 == NULL) {
+			/*
+			 * Do it the other way , aka without paying
+			 * attention to queue seq order.
+			 */
+			SCTP_STAT_INCR(sctps_pdrpdnfnd);
+			TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
+				if (tp1->rec.data.tsn == tsn) {
+					/* found it */
+					break;
+				}
+			}
+		}
+		if (tp1 == NULL) {
+			SCTP_STAT_INCR(sctps_pdrptsnnf);
+		}
+		if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) {
+			uint8_t *ddp;
+
+			if (((flg & SCTP_BADCRC) == 0) &&
+			    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
+				return (0);
+			}
+			if ((stcb->asoc.peers_rwnd == 0) &&
+			    ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) {
+				SCTP_STAT_INCR(sctps_pdrpdiwnp);
+				return (0);
+			}
+			if (stcb->asoc.peers_rwnd == 0 &&
+			    (flg & SCTP_FROM_MIDDLE_BOX)) {
+				SCTP_STAT_INCR(sctps_pdrpdizrw);
+				return (0);
+			}
+			ddp = (uint8_t *) (mtod(tp1->data, caddr_t) +
+					   sizeof(struct sctp_data_chunk));
+			{
+				unsigned int iii;
+
+				for (iii = 0; iii < sizeof(desc->data_bytes);
+				     iii++) {
+					if (ddp[iii] != desc->data_bytes[iii]) {
+						SCTP_STAT_INCR(sctps_pdrpbadd);
+						return (-1);
+					}
+				}
+			}
+
+			if (tp1->do_rtt) {
+				/*
+				 * this guy had a RTO calculation
+				 * pending on it, cancel it
+				 */
+				if (tp1->whoTo->rto_needed == 0) {
+					tp1->whoTo->rto_needed = 1;
+				}
+				tp1->do_rtt = 0;
+			}
+			SCTP_STAT_INCR(sctps_pdrpmark);
+			if (tp1->sent != SCTP_DATAGRAM_RESEND)
+				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+			/*
+			 * mark it as if we were doing a FR, since
+			 * we will be getting gap ack reports behind
+			 * the info from the router.
+			 */
+			tp1->rec.data.doing_fast_retransmit = 1;
+			/*
+			 * mark the tsn with what sequences can
+			 * cause a new FR.
+			 */
+			if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
+				tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
+			} else {
+				tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn;
+			}
+
+			/* restart the timer */
+			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+					stcb, tp1->whoTo,
+			                SCTP_FROM_SCTP_INPUT + SCTP_LOC_26);
+			sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
+					 stcb, tp1->whoTo);
+
+			/* fix counts and things */
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP,
+					       tp1->whoTo->flight_size,
+					       tp1->book_size,
+					       (uint32_t)(uintptr_t)stcb,
+					       tp1->rec.data.tsn);
+			}
+			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+				sctp_flight_size_decrease(tp1);
+				sctp_total_flight_decrease(stcb, tp1);
+			}
+			tp1->sent = SCTP_DATAGRAM_RESEND;
+		} {
+			/* audit code */
+			unsigned int audit;
+
+			audit = 0;
+			TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) {
+				if (tp1->sent == SCTP_DATAGRAM_RESEND)
+					audit++;
+			}
+			TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue,
+				      sctp_next) {
+				if (tp1->sent == SCTP_DATAGRAM_RESEND)
+					audit++;
+			}
+			if (audit != stcb->asoc.sent_queue_retran_cnt) {
+				SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n",
+					    audit, stcb->asoc.sent_queue_retran_cnt);
+#ifndef SCTP_AUDITING_ENABLED
+				stcb->asoc.sent_queue_retran_cnt = audit;
+#endif
+			}
+		}
+	}
+	break;
+	case SCTP_ASCONF:
+	{
+		struct sctp_tmit_chunk *asconf;
+
+		TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
+			      sctp_next) {
+			if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
+				break;
+			}
+		}
+		if (asconf) {
+			if (asconf->sent != SCTP_DATAGRAM_RESEND)
+				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+			asconf->sent = SCTP_DATAGRAM_RESEND;
+			asconf->snd_count--;
+		}
+	}
+	break;
+	case SCTP_INITIATION:
+		/* resend the INIT */
+		stcb->asoc.dropped_special_cnt++;
+		if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) {
+			/*
+			 * If we can get it in, in a few attempts we do
+			 * this, otherwise we let the timer fire.
+			 */
+			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep,
+					stcb, net,
+			                SCTP_FROM_SCTP_INPUT + SCTP_LOC_27);
+			sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
+		}
+		break;
+	case SCTP_SELECTIVE_ACK:
+	case SCTP_NR_SELECTIVE_ACK:
+		/* resend the sack */
+		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
+		break;
+	case SCTP_HEARTBEAT_REQUEST:
+		/* resend a demand HB */
+		if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
+			/* Only retransmit if we KNOW we wont destroy the tcb */
+			sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
+		}
+		break;
+	case SCTP_SHUTDOWN:
+		sctp_send_shutdown(stcb, net);
+		break;
+	case SCTP_SHUTDOWN_ACK:
+		sctp_send_shutdown_ack(stcb, net);
+		break;
+	case SCTP_COOKIE_ECHO:
+	{
+		struct sctp_tmit_chunk *cookie;
+
+		cookie = NULL;
+		TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue,
+			      sctp_next) {
+			if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+				break;
+			}
+		}
+		if (cookie) {
+			if (cookie->sent != SCTP_DATAGRAM_RESEND)
+				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+			cookie->sent = SCTP_DATAGRAM_RESEND;
+			sctp_stop_all_cookie_timers(stcb);
+		}
+	}
+	break;
+	case SCTP_COOKIE_ACK:
+		sctp_send_cookie_ack(stcb);
+		break;
+	case SCTP_ASCONF_ACK:
+		/* resend last asconf ack */
+		sctp_send_asconf_ack(stcb);
+		break;
+	case SCTP_IFORWARD_CUM_TSN:
+	case SCTP_FORWARD_CUM_TSN:
+		send_forward_tsn(stcb, &stcb->asoc);
+		break;
+		/* can't do anything with these */
+	case SCTP_PACKET_DROPPED:
+	case SCTP_INITIATION_ACK:	/* this should not happen */
+	case SCTP_HEARTBEAT_ACK:
+	case SCTP_ABORT_ASSOCIATION:
+	case SCTP_OPERATION_ERROR:
+	case SCTP_SHUTDOWN_COMPLETE:
+	case SCTP_ECN_ECHO:
+	case SCTP_ECN_CWR:
+	default:
+		break;
+	}
+	return (0);
+}
+
+void
+sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
+{
+	uint32_t i;
+	uint16_t temp;
+
+	/*
+	 * We set things to 0xffffffff since this is the last delivered sequence
+	 * and we will be sending in 0 after the reset.
+	 */
+
+	if (number_entries) {
+		for (i = 0; i < number_entries; i++) {
+			temp = ntohs(list[i]);
+			if (temp >= stcb->asoc.streamincnt) {
+				continue;
+			}
+			stcb->asoc.strmin[temp].last_mid_delivered = 0xffffffff;
+		}
+	} else {
+		list = NULL;
+		for (i = 0; i < stcb->asoc.streamincnt; i++) {
+			stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
+		}
+	}
+	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
+}
+
+static void
+sctp_reset_out_streams(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
+{
+	uint32_t i;
+	uint16_t temp;
+
+	if (number_entries > 0) {
+		for (i = 0; i < number_entries; i++) {
+			temp = ntohs(list[i]);
+			if (temp >= stcb->asoc.streamoutcnt) {
+				/* no such stream */
+				continue;
+			}
+			stcb->asoc.strmout[temp].next_mid_ordered = 0;
+			stcb->asoc.strmout[temp].next_mid_unordered = 0;
+		}
+	} else {
+		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+			stcb->asoc.strmout[i].next_mid_ordered = 0;
+			stcb->asoc.strmout[i].next_mid_unordered = 0;
+		}
+	}
+	sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED);
+}
+
+static void
+sctp_reset_clear_pending(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t *list)
+{
+	uint32_t i;
+	uint16_t temp;
+
+	if (number_entries > 0) {
+		for (i = 0; i < number_entries; i++) {
+			temp = ntohs(list[i]);
+			if (temp >= stcb->asoc.streamoutcnt) {
+				/* no such stream */
+				continue;
+			}
+			stcb->asoc.strmout[temp].state = SCTP_STREAM_OPEN;
+		}
+	} else {
+		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+			stcb->asoc.strmout[i].state = SCTP_STREAM_OPEN;
+		}
+	}
+}
+
+
+struct sctp_stream_reset_request *
+sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk)
+{
+	struct sctp_association *asoc;
+	struct sctp_chunkhdr *ch;
+	struct sctp_stream_reset_request *r;
+	struct sctp_tmit_chunk *chk;
+	int len, clen;
+
+	asoc = &stcb->asoc;
+	if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
+		asoc->stream_reset_outstanding = 0;
+		return (NULL);
+	}
+	if (stcb->asoc.str_reset == NULL) {
+		asoc->stream_reset_outstanding = 0;
+		return (NULL);
+	}
+	chk = stcb->asoc.str_reset;
+	if (chk->data == NULL) {
+		return (NULL);
+	}
+	if (bchk) {
+		/* he wants a copy of the chk pointer */
+		*bchk = chk;
+	}
+	clen = chk->send_size;
+	ch = mtod(chk->data, struct sctp_chunkhdr *);
+	r = (struct sctp_stream_reset_request *)(ch + 1);
+	if (ntohl(r->request_seq) == seq) {
+		/* found it */
+		return (r);
+	}
+	len = SCTP_SIZE32(ntohs(r->ph.param_length));
+	if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) {
+		/* move to the next one, there can only be a max of two */
+		r = (struct sctp_stream_reset_request *)((caddr_t)r + len);
+		if (ntohl(r->request_seq) == seq) {
+			return (r);
+		}
+	}
+	/* that seq is not here */
+	return (NULL);
+}
+
+static void
+sctp_clean_up_stream_reset(struct sctp_tcb *stcb)
+{
+	struct sctp_association *asoc;
+	struct sctp_tmit_chunk *chk = stcb->asoc.str_reset;
+
+	if (stcb->asoc.str_reset == NULL) {
+		return;
+	}
+	asoc = &stcb->asoc;
+
+	sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb,
+	                chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28);
+	TAILQ_REMOVE(&asoc->control_send_queue,
+	    chk,
+	    sctp_next);
+	if (chk->data) {
+		sctp_m_freem(chk->data);
+		chk->data = NULL;
+	}
+	asoc->ctrl_queue_cnt--;
+	sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+	/*sa_ignore NO_NULL_CHK*/
+	stcb->asoc.str_reset = NULL;
+}
+
+
+static int
+sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
+				  uint32_t seq, uint32_t action,
+				  struct sctp_stream_reset_response *respin)
+{
+	uint16_t type;
+	int lparm_len;
+	struct sctp_association *asoc = &stcb->asoc;
+	struct sctp_tmit_chunk *chk;
+	struct sctp_stream_reset_request *req_param;
+	struct sctp_stream_reset_out_request *req_out_param;
+	struct sctp_stream_reset_in_request *req_in_param;
+	uint32_t number_entries;
+
+	if (asoc->stream_reset_outstanding == 0) {
+		/* duplicate */
+		return (0);
+	}
+	if (seq == stcb->asoc.str_reset_seq_out) {
+		req_param = sctp_find_stream_reset(stcb, seq, &chk);
+		if (req_param != NULL) {
+			stcb->asoc.str_reset_seq_out++;
+			type = ntohs(req_param->ph.param_type);
+			lparm_len = ntohs(req_param->ph.param_length);
+			if (type == SCTP_STR_RESET_OUT_REQUEST) {
+				int no_clear = 0;
+
+				req_out_param = (struct sctp_stream_reset_out_request *)req_param;
+				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t);
+				asoc->stream_reset_out_is_outstanding = 0;
+				if (asoc->stream_reset_outstanding)
+					asoc->stream_reset_outstanding--;
+				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
+					/* do it */
+					sctp_reset_out_streams(stcb, number_entries, req_out_param->list_of_streams);
+				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
+					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
+				} else if (action == SCTP_STREAM_RESET_RESULT_IN_PROGRESS) {
+					/* Set it up so we don't stop retransmitting */
+					asoc->stream_reset_outstanding++;
+					stcb->asoc.str_reset_seq_out--;
+					asoc->stream_reset_out_is_outstanding = 1;
+					no_clear = 1;
+				} else {
+					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, req_out_param->list_of_streams, SCTP_SO_NOT_LOCKED);
+				}
+				if (no_clear == 0) {
+					sctp_reset_clear_pending(stcb, number_entries, req_out_param->list_of_streams);
+				}
+			} else if (type == SCTP_STR_RESET_IN_REQUEST) {
+				req_in_param = (struct sctp_stream_reset_in_request *)req_param;
+				number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t);
+				if (asoc->stream_reset_outstanding)
+					asoc->stream_reset_outstanding--;
+				if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
+					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_IN, stcb,
+							number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
+				} else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
+					sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb,
+							number_entries, req_in_param->list_of_streams, SCTP_SO_NOT_LOCKED);
+				}
+			} else if (type == SCTP_STR_RESET_ADD_OUT_STREAMS) {
+				/* Ok we now may have more streams */
+				int num_stream;
+
+				num_stream = stcb->asoc.strm_pending_add_size;
+				if (num_stream > (stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt)) {
+					/* TSNH */
+					num_stream = stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt;
+				}
+				stcb->asoc.strm_pending_add_size = 0;
+				if (asoc->stream_reset_outstanding)
+					asoc->stream_reset_outstanding--;
+				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
+					/* Put the new streams into effect */
+					int i;
+					for ( i = asoc->streamoutcnt; i< (asoc->streamoutcnt + num_stream); i++) {
+						asoc->strmout[i].state = SCTP_STREAM_OPEN;
+					}
+					asoc->streamoutcnt += num_stream;
+					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
+				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
+					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
+								     SCTP_STREAM_CHANGE_DENIED);
+				} else {
+					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
+								     SCTP_STREAM_CHANGE_FAILED);
+				}
+			} else if (type == SCTP_STR_RESET_ADD_IN_STREAMS) {
+				if (asoc->stream_reset_outstanding)
+					asoc->stream_reset_outstanding--;
+				if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
+					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
+								     SCTP_STREAM_CHANGE_DENIED);
+				} else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) {
+					sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt,
+								     SCTP_STREAM_CHANGE_FAILED);
+				}
+			} else if (type == SCTP_STR_RESET_TSN_REQUEST) {
+				/**
+				 * a) Adopt the new in tsn.
+				 * b) reset the map
+				 * c) Adopt the new out-tsn
+				 */
+				struct sctp_stream_reset_response_tsn *resp;
+				struct sctp_forward_tsn_chunk fwdtsn;
+				int abort_flag = 0;
+				if (respin == NULL) {
+					/* huh ? */
+					return (0);
+				}
+				if (ntohs(respin->ph.param_length) < sizeof(struct sctp_stream_reset_response_tsn)) {
+					return (0);
+				}
+				if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) {
+					resp = (struct sctp_stream_reset_response_tsn *)respin;
+					asoc->stream_reset_outstanding--;
+					fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
+					fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
+					fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1);
+					sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
+					if (abort_flag) {
+						return (1);
+					}
+					stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1);
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+						sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+					}
+
+					stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map;
+					stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn);
+					memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
+
+					stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
+					memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
+
+					stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
+					stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
+
+					sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
+					sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
+					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 0);
+				} else if (action == SCTP_STREAM_RESET_RESULT_DENIED) {
+					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
+								     SCTP_ASSOC_RESET_DENIED);
+				} else {
+					sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1),
+								     SCTP_ASSOC_RESET_FAILED);
+				}
+			}
+			/* get rid of the request and get the request flags */
+			if (asoc->stream_reset_outstanding == 0) {
+				sctp_clean_up_stream_reset(stcb);
+			}
+		}
+	}
+	if (asoc->stream_reset_outstanding == 0) {
+		sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
+	}
+	return (0);
+}
+
+static void
+sctp_handle_str_reset_request_in(struct sctp_tcb *stcb,
+    struct sctp_tmit_chunk *chk,
+    struct sctp_stream_reset_in_request *req, int trunc)
+{
+	uint32_t seq;
+	int len, i;
+	int number_entries;
+	uint16_t temp;
+
+	/*
+	 * peer wants me to send a str-reset to him for my outgoing seq's if
+	 * seq_in is right.
+	 */
+	struct sctp_association *asoc = &stcb->asoc;
+
+	seq = ntohl(req->request_seq);
+	if (asoc->str_reset_seq_in == seq) {
+		asoc->last_reset_action[1] = asoc->last_reset_action[0];
+		if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
+			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+		} else if (trunc) {
+			/* Can't do it, since they exceeded our buffer size  */
+			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+		} else if (stcb->asoc.stream_reset_out_is_outstanding == 0) {
+			len = ntohs(req->ph.param_length);
+			number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t));
+			if (number_entries) {
+				for (i = 0; i < number_entries; i++) {
+					temp = ntohs(req->list_of_streams[i]);
+					if (temp >= stcb->asoc.streamoutcnt) {
+						asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+						goto bad_boy;
+					}
+					req->list_of_streams[i] = temp;
+				}
+				for (i = 0; i < number_entries; i++) {
+					if (stcb->asoc.strmout[req->list_of_streams[i]].state == SCTP_STREAM_OPEN) {
+						stcb->asoc.strmout[req->list_of_streams[i]].state = SCTP_STREAM_RESET_PENDING;
+					}
+				}
+			} else {
+				/* Its all */
+				for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+					if (stcb->asoc.strmout[i].state == SCTP_STREAM_OPEN)
+						stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_PENDING;
+				}
+			}
+			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
+		} else {
+			/* Can't do it, since we have sent one out */
+			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
+		}
+	bad_boy:
+		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+		asoc->str_reset_seq_in++;
+	} else if (asoc->str_reset_seq_in - 1 == seq) {
+		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+	} else if (asoc->str_reset_seq_in - 2 == seq) {
+		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
+	} else {
+		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
+	}
+	sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_NOT_LOCKED);
+}
+
+static int
+sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
+    struct sctp_tmit_chunk *chk,
+    struct sctp_stream_reset_tsn_request *req)
+{
+	/* reset all in and out and update the tsn */
+	/*
+	 * A) reset my str-seq's on in and out. B) Select a receive next,
+	 * and set cum-ack to it. Also process this selected number as a
+	 * fwd-tsn as well. C) set in the response my next sending seq.
+	 */
+	struct sctp_forward_tsn_chunk fwdtsn;
+	struct sctp_association *asoc = &stcb->asoc;
+	int abort_flag = 0;
+	uint32_t seq;
+
+	seq = ntohl(req->request_seq);
+	if (asoc->str_reset_seq_in == seq) {
+		asoc->last_reset_action[1] = stcb->asoc.last_reset_action[0];
+		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
+			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+		} else {
+			fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk));
+			fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN;
+			fwdtsn.ch.chunk_flags = 0;
+			fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1);
+			sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0);
+			if (abort_flag) {
+				return (1);
+			}
+			asoc->highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA;
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
+				sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
+			}
+			asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
+			asoc->mapping_array_base_tsn = asoc->highest_tsn_inside_map + 1;
+			memset(asoc->mapping_array, 0, asoc->mapping_array_size);
+			asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map;
+			memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
+			atomic_add_int(&asoc->sending_seq, 1);
+			/* save off historical data for retrans */
+			asoc->last_sending_seq[1] = asoc->last_sending_seq[0];
+			asoc->last_sending_seq[0] = asoc->sending_seq;
+			asoc->last_base_tsnsent[1] = asoc->last_base_tsnsent[0];
+			asoc->last_base_tsnsent[0] = asoc->mapping_array_base_tsn;
+			sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL);
+			sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL);
+			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
+			sctp_notify_stream_reset_tsn(stcb, asoc->sending_seq, (asoc->mapping_array_base_tsn + 1), 0);
+		}
+		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
+		                                 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
+		asoc->str_reset_seq_in++;
+	} else if (asoc->str_reset_seq_in - 1 == seq) {
+		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0],
+		                                 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]);
+	} else if (asoc->str_reset_seq_in - 2 == seq) {
+		sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1],
+		                                 asoc->last_sending_seq[1], asoc->last_base_tsnsent[1]);
+	} else {
+		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
+	}
+	return (0);
+}
+
+static void
+sctp_handle_str_reset_request_out(struct sctp_tcb *stcb,
+    struct sctp_tmit_chunk *chk,
+    struct sctp_stream_reset_out_request *req, int trunc)
+{
+	uint32_t seq, tsn;
+	int number_entries, len;
+	struct sctp_association *asoc = &stcb->asoc;
+
+	seq = ntohl(req->request_seq);
+
+	/* now if its not a duplicate we process it */
+	if (asoc->str_reset_seq_in == seq) {
+		len = ntohs(req->ph.param_length);
+		number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t));
+		/*
+		 * the sender is resetting, handle the list issue.. we must
+		 * a) verify if we can do the reset, if so no problem b) If
+		 * we can't do the reset we must copy the request. c) queue
+		 * it, and setup the data in processor to trigger it off
+		 * when needed and dequeue all the queued data.
+		 */
+		tsn = ntohl(req->send_reset_at_tsn);
+
+		/* move the reset action back one */
+		asoc->last_reset_action[1] = asoc->last_reset_action[0];
+		if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) {
+			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+		} else if (trunc) {
+			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+		} else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
+			/* we can do it now */
+			sctp_reset_in_stream(stcb, number_entries, req->list_of_streams);
+			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
+		} else {
+			/*
+			 * we must queue it up and thus wait for the TSN's
+			 * to arrive that are at or before tsn
+			 */
+			struct sctp_stream_reset_list *liste;
+			int siz;
+
+			siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t));
+			SCTP_MALLOC(liste, struct sctp_stream_reset_list *,
+				    siz, SCTP_M_STRESET);
+			if (liste == NULL) {
+				/* gak out of memory */
+				asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+				sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+				return;
+			}
+			liste->seq = seq;
+			liste->tsn = tsn;
+			liste->number_entries = number_entries;
+			memcpy(&liste->list_of_streams, req->list_of_streams, number_entries * sizeof(uint16_t));
+			TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp);
+			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_IN_PROGRESS;
+		}
+		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+		asoc->str_reset_seq_in++;
+	} else if ((asoc->str_reset_seq_in - 1) == seq) {
+		/*
+		 * one seq back, just echo back last action since my
+		 * response was lost.
+		 */
+		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+	} else if ((asoc->str_reset_seq_in - 2) == seq) {
+		/*
+		 * two seq back, just echo back last action since my
+		 * response was lost.
+		 */
+		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
+	} else {
+		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
+	}
+}
+
+static void
+sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
+			       struct sctp_stream_reset_add_strm  *str_add)
+{
+	/*
+	 * Peer is requesting to add more streams.
+	 * If its within our max-streams we will
+	 * allow it.
+	 */
+	uint32_t num_stream, i;
+	uint32_t seq;
+	struct sctp_association *asoc = &stcb->asoc;
+	struct sctp_queued_to_read *ctl, *nctl;
+
+	/* Get the number. */
+	seq = ntohl(str_add->request_seq);
+	num_stream = ntohs(str_add->number_of_streams);
+	/* Now what would be the new total? */
+	if (asoc->str_reset_seq_in == seq) {
+		num_stream += stcb->asoc.streamincnt;
+		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
+		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
+			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+		} else if ((num_stream > stcb->asoc.max_inbound_streams) ||
+		           (num_stream > 0xffff)) {
+			/* We must reject it they ask for to many */
+  denied:
+			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+		} else {
+			/* Ok, we can do that :-) */
+			struct sctp_stream_in *oldstrm;
+
+			/* save off the old */
+			oldstrm = stcb->asoc.strmin;
+			SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *,
+			            (num_stream * sizeof(struct sctp_stream_in)),
+			            SCTP_M_STRMI);
+			if (stcb->asoc.strmin == NULL) {
+				stcb->asoc.strmin = oldstrm;
+				goto denied;
+			}
+			/* copy off the old data */
+			for (i = 0; i < stcb->asoc.streamincnt; i++) {
+				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
+				TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
+				stcb->asoc.strmin[i].sid = i;
+				stcb->asoc.strmin[i].last_mid_delivered = oldstrm[i].last_mid_delivered;
+				stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started;
+				stcb->asoc.strmin[i].pd_api_started = oldstrm[i].pd_api_started;
+				/* now anything on those queues? */
+				TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next_instrm, nctl) {
+					TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next_instrm);
+					TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next_instrm);
+				}
+				TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].uno_inqueue, next_instrm, nctl) {
+					TAILQ_REMOVE(&oldstrm[i].uno_inqueue, ctl, next_instrm);
+					TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].uno_inqueue, ctl, next_instrm);
+				}
+			}
+			/* Init the new streams */
+			for (i = stcb->asoc.streamincnt; i < num_stream; i++) {
+				TAILQ_INIT(&stcb->asoc.strmin[i].inqueue);
+				TAILQ_INIT(&stcb->asoc.strmin[i].uno_inqueue);
+				stcb->asoc.strmin[i].sid = i;
+				stcb->asoc.strmin[i].last_mid_delivered = 0xffffffff;
+				stcb->asoc.strmin[i].pd_api_started = 0;
+				stcb->asoc.strmin[i].delivery_started = 0;
+			}
+			SCTP_FREE(oldstrm, SCTP_M_STRMI);
+			/* update the size */
+			stcb->asoc.streamincnt = num_stream;
+			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
+			sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0);
+		}
+		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+		asoc->str_reset_seq_in++;
+	} else if ((asoc->str_reset_seq_in - 1) == seq) {
+		/*
+		 * one seq back, just echo back last action since my
+		 * response was lost.
+		 */
+		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+	} else if ((asoc->str_reset_seq_in - 2) == seq) {
+		/*
+		 * two seq back, just echo back last action since my
+		 * response was lost.
+		 */
+		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
+	} else {
+		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
+
+	}
+}
+
+static void
+sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
+				   struct sctp_stream_reset_add_strm  *str_add)
+{
+	/*
+	 * Peer is requesting to add more streams.
+	 * If its within our max-streams we will
+	 * allow it.
+	 */
+	uint16_t num_stream;
+	uint32_t seq;
+	struct sctp_association *asoc = &stcb->asoc;
+
+	/* Get the number. */
+	seq = ntohl(str_add->request_seq);
+	num_stream = ntohs(str_add->number_of_streams);
+	/* Now what would be the new total? */
+	if (asoc->str_reset_seq_in == seq) {
+		stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0];
+		if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
+			asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+		} else if (stcb->asoc.stream_reset_outstanding) {
+			/* We must reject it we have something pending */
+			stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS;
+		} else {
+			/* Ok, we can do that :-) */
+			int mychk;
+			mychk = stcb->asoc.streamoutcnt;
+			mychk += num_stream;
+			if (mychk < 0x10000) {
+				stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED;
+				if (sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, 1, num_stream, 0, 1)) {
+					stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+				}
+			} else {
+				stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED;
+			}
+		}
+		sctp_add_stream_reset_result(chk, seq, stcb->asoc.last_reset_action[0]);
+		asoc->str_reset_seq_in++;
+	} else if ((asoc->str_reset_seq_in - 1) == seq) {
+		/*
+		 * one seq back, just echo back last action since my
+		 * response was lost.
+		 */
+		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]);
+	} else if ((asoc->str_reset_seq_in - 2) == seq) {
+		/*
+		 * two seq back, just echo back last action since my
+		 * response was lost.
+		 */
+		sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]);
+	} else {
+		sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO);
+	}
+}
+
+#if !defined(__Panda__)
+#ifdef __GNUC__
+__attribute__ ((noinline))
+#endif
+#endif
+static int
+sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset,
+			 struct sctp_chunkhdr *ch_req)
+{
+	uint16_t remaining_length, param_len, ptype;
+	struct sctp_paramhdr pstore;
+	uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE];
+	uint32_t seq = 0;
+	int num_req = 0;
+	int trunc = 0;
+	struct sctp_tmit_chunk *chk;
+	struct sctp_chunkhdr *ch;
+	struct sctp_paramhdr *ph;
+	int ret_code = 0;
+	int num_param = 0;
+
+	/* now it may be a reset or a reset-response */
+	remaining_length = ntohs(ch_req->chunk_length) - sizeof(struct sctp_chunkhdr);
+
+	/* setup for adding the response */
+	sctp_alloc_a_chunk(stcb, chk);
+	if (chk == NULL) {
+		return (ret_code);
+	}
+	chk->copy_by_ref = 0;
+	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
+	chk->rec.chunk_id.can_take_data = 0;
+	chk->flags = 0;
+	chk->asoc = &stcb->asoc;
+	chk->no_fr_allowed = 0;
+	chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr);
+	chk->book_size_scale = 0;
+	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+	if (chk->data == NULL) {
+	strres_nochunk:
+		if (chk->data) {
+			sctp_m_freem(chk->data);
+			chk->data = NULL;
+		}
+		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+		return (ret_code);
+	}
+	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+
+	/* setup chunk parameters */
+	chk->sent = SCTP_DATAGRAM_UNSENT;
+	chk->snd_count = 0;
+	chk->whoTo = NULL;
+
+	ch = mtod(chk->data, struct sctp_chunkhdr *);
+	ch->chunk_type = SCTP_STREAM_RESET;
+	ch->chunk_flags = 0;
+	ch->chunk_length = htons(chk->send_size);
+	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
+	offset += sizeof(struct sctp_chunkhdr);
+	while (remaining_length >= sizeof(struct sctp_paramhdr)) {
+		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *)&pstore);
+		if (ph == NULL) {
+			/* TSNH */
+			break;
+		}
+		param_len = ntohs(ph->param_length);
+		if ((param_len > remaining_length) ||
+		    (param_len < (sizeof(struct sctp_paramhdr) + sizeof(uint32_t)))) {
+			/* bad parameter length */
+			break;
+		}
+		ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, sizeof(cstore)),
+							   (uint8_t *)&cstore);
+		if (ph == NULL) {
+			/* TSNH */
+			break;
+		}
+		ptype = ntohs(ph->param_type);
+		num_param++;
+		if (param_len > sizeof(cstore)) {
+			trunc = 1;
+		} else {
+			trunc = 0;
+		}
+		if (num_param > SCTP_MAX_RESET_PARAMS) {
+			/* hit the max of parameters already sorry.. */
+			break;
+		}
+		if (ptype == SCTP_STR_RESET_OUT_REQUEST) {
+			struct sctp_stream_reset_out_request *req_out;
+
+			if (param_len < sizeof(struct sctp_stream_reset_out_request)) {
+				break;
+			}
+			req_out = (struct sctp_stream_reset_out_request *)ph;
+			num_req++;
+			if (stcb->asoc.stream_reset_outstanding) {
+				seq = ntohl(req_out->response_seq);
+				if (seq == stcb->asoc.str_reset_seq_out) {
+					/* implicit ack */
+					(void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_RESULT_PERFORMED, NULL);
+				}
+			}
+			sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc);
+		} else if (ptype == SCTP_STR_RESET_ADD_OUT_STREAMS) {
+			struct sctp_stream_reset_add_strm  *str_add;
+
+			if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
+				break;
+			}
+			str_add = (struct sctp_stream_reset_add_strm  *)ph;
+			num_req++;
+			sctp_handle_str_reset_add_strm(stcb, chk, str_add);
+		} else if (ptype == SCTP_STR_RESET_ADD_IN_STREAMS) {
+			struct sctp_stream_reset_add_strm  *str_add;
+
+			if (param_len < sizeof(struct sctp_stream_reset_add_strm)) {
+				break;
+			}
+			str_add = (struct sctp_stream_reset_add_strm  *)ph;
+			num_req++;
+			sctp_handle_str_reset_add_out_strm(stcb, chk, str_add);
+		} else if (ptype == SCTP_STR_RESET_IN_REQUEST) {
+			struct sctp_stream_reset_in_request *req_in;
+
+			num_req++;
+			req_in = (struct sctp_stream_reset_in_request *)ph;
+			sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc);
+		} else if (ptype == SCTP_STR_RESET_TSN_REQUEST) {
+			struct sctp_stream_reset_tsn_request *req_tsn;
+
+			num_req++;
+			req_tsn = (struct sctp_stream_reset_tsn_request *)ph;
+			if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) {
+				ret_code = 1;
+				goto strres_nochunk;
+			}
+			/* no more */
+			break;
+		} else if (ptype == SCTP_STR_RESET_RESPONSE) {
+			struct sctp_stream_reset_response *resp;
+			uint32_t result;
+
+			if (param_len < sizeof(struct sctp_stream_reset_response)) {
+				break;
+			}
+			resp = (struct sctp_stream_reset_response *)ph;
+			seq = ntohl(resp->response_seq);
+			result = ntohl(resp->result);
+			if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) {
+				ret_code = 1;
+				goto strres_nochunk;
+			}
+		} else {
+			break;
+		}
+		offset += SCTP_SIZE32(param_len);
+		if (remaining_length >= SCTP_SIZE32(param_len)) {
+			remaining_length -= SCTP_SIZE32(param_len);
+		} else {
+			remaining_length = 0;
+		}
+	}
+	if (num_req == 0) {
+		/* we have no response free the stuff */
+		goto strres_nochunk;
+	}
+	/* ok we have a chunk to link in */
+	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue,
+			  chk,
+			  sctp_next);
+	stcb->asoc.ctrl_queue_cnt++;
+	return (ret_code);
+}
+
+/*
+ * Handle a router or endpoints report of a packet loss, there are two ways
+ * to handle this, either we get the whole packet and must disect it
+ * ourselves (possibly with truncation and or corruption) or it is a summary
+ * from a middle box that did the disectting for us.
+ */
+static void
+sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp,
+    struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit)
+{
+	uint32_t bottle_bw, on_queue;
+	uint16_t trunc_len;
+	unsigned int chlen;
+	unsigned int at;
+	struct sctp_chunk_desc desc;
+	struct sctp_chunkhdr *ch;
+
+	chlen = ntohs(cp->ch.chunk_length);
+	chlen -= sizeof(struct sctp_pktdrop_chunk);
+	/* XXX possible chlen underflow */
+	if (chlen == 0) {
+		ch = NULL;
+		if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)
+			SCTP_STAT_INCR(sctps_pdrpbwrpt);
+	} else {
+		ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr));
+		chlen -= sizeof(struct sctphdr);
+		/* XXX possible chlen underflow */
+		memset(&desc, 0, sizeof(desc));
+	}
+	trunc_len = (uint16_t) ntohs(cp->trunc_len);
+	if (trunc_len > limit) {
+		trunc_len = limit;
+	}
+
+	/* now the chunks themselves */
+	while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) {
+		desc.chunk_type = ch->chunk_type;
+		/* get amount we need to move */
+		at = ntohs(ch->chunk_length);
+		if (at < sizeof(struct sctp_chunkhdr)) {
+			/* corrupt chunk, maybe at the end? */
+			SCTP_STAT_INCR(sctps_pdrpcrupt);
+			break;
+		}
+		if (trunc_len == 0) {
+			/* we are supposed to have all of it */
+			if (at > chlen) {
+				/* corrupt skip it */
+				SCTP_STAT_INCR(sctps_pdrpcrupt);
+				break;
+			}
+		} else {
+			/* is there enough of it left ? */
+			if (desc.chunk_type == SCTP_DATA) {
+				if (chlen < (sizeof(struct sctp_data_chunk) +
+				    sizeof(desc.data_bytes))) {
+					break;
+				}
+			} else {
+				if (chlen < sizeof(struct sctp_chunkhdr)) {
+					break;
+				}
+			}
+		}
+		if (desc.chunk_type == SCTP_DATA) {
+			/* can we get out the tsn? */
+			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
+				SCTP_STAT_INCR(sctps_pdrpmbda);
+
+			if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) {
+				/* yep */
+				struct sctp_data_chunk *dcp;
+				uint8_t *ddp;
+				unsigned int iii;
+
+				dcp = (struct sctp_data_chunk *)ch;
+				ddp = (uint8_t *) (dcp + 1);
+				for (iii = 0; iii < sizeof(desc.data_bytes); iii++) {
+					desc.data_bytes[iii] = ddp[iii];
+				}
+				desc.tsn_ifany = dcp->dp.tsn;
+			} else {
+				/* nope we are done. */
+				SCTP_STAT_INCR(sctps_pdrpnedat);
+				break;
+			}
+		} else {
+			if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX))
+				SCTP_STAT_INCR(sctps_pdrpmbct);
+		}
+
+		if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) {
+			SCTP_STAT_INCR(sctps_pdrppdbrk);
+			break;
+		}
+		if (SCTP_SIZE32(at) > chlen) {
+			break;
+		}
+		chlen -= SCTP_SIZE32(at);
+		if (chlen < sizeof(struct sctp_chunkhdr)) {
+			/* done, none left */
+			break;
+		}
+		ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at));
+	}
+	/* Now update any rwnd --- possibly */
+	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) {
+		/* From a peer, we get a rwnd report */
+		uint32_t a_rwnd;
+
+		SCTP_STAT_INCR(sctps_pdrpfehos);
+
+		bottle_bw = ntohl(cp->bottle_bw);
+		on_queue = ntohl(cp->current_onq);
+		if (bottle_bw && on_queue) {
+			/* a rwnd report is in here */
+			if (bottle_bw > on_queue)
+				a_rwnd = bottle_bw - on_queue;
+			else
+				a_rwnd = 0;
+
+			if (a_rwnd == 0)
+				stcb->asoc.peers_rwnd = 0;
+			else {
+				if (a_rwnd > stcb->asoc.total_flight) {
+					stcb->asoc.peers_rwnd =
+					    a_rwnd - stcb->asoc.total_flight;
+				} else {
+					stcb->asoc.peers_rwnd = 0;
+				}
+				if (stcb->asoc.peers_rwnd <
+				    stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+					/* SWS sender side engages */
+					stcb->asoc.peers_rwnd = 0;
+				}
+			}
+		}
+	} else {
+		SCTP_STAT_INCR(sctps_pdrpfmbox);
+	}
+
+	/* now middle boxes in sat networks get a cwnd bump */
+	if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) &&
+	    (stcb->asoc.sat_t3_loss_recovery == 0) &&
+	    (stcb->asoc.sat_network)) {
+		/*
+		 * This is debatable but for sat networks it makes sense
+		 * Note if a T3 timer has went off, we will prohibit any
+		 * changes to cwnd until we exit the t3 loss recovery.
+		 */
+		stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb,
+			net, cp, &bottle_bw, &on_queue);
+	}
+}
+
+/*
+ * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to
+ * still contain IP/SCTP header - stcb: is the tcb found for this packet -
+ * offset: offset into the mbuf chain to first chunkhdr - length: is the
+ * length of the complete packet outputs: - length: modified to remaining
+ * length after control processing - netp: modified to new sctp_nets after
+ * cookie-echo processing - return NULL to discard the packet (ie. no asoc,
+ * bad packet,...) otherwise return the tcb for this packet
+ */
+#if !defined(__Panda__)
+#ifdef __GNUC__
+__attribute__ ((noinline))
+#endif
+#endif
+static struct sctp_tcb *
+sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length,
+    struct sockaddr *src, struct sockaddr *dst,
+    struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp,
+    struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen,
+#if defined(__FreeBSD__)
+    uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
+#endif
+    uint32_t vrf_id, uint16_t port)
+{
+	struct sctp_association *asoc;
+	struct mbuf *op_err;
+	char msg[SCTP_DIAG_INFO_LEN];
+	uint32_t vtag_in;
+	int num_chunks = 0;	/* number of control chunks processed */
+	uint32_t chk_length;
+	int ret;
+	int abort_no_unlock = 0;
+	int ecne_seen = 0;
+	/*
+	 * How big should this be, and should it be alloc'd? Lets try the
+	 * d-mtu-ceiling for now (2k) and that should hopefully work ...
+	 * until we get into jumbo grams and such..
+	 */
+	uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE];
+	struct sctp_tcb *locked_tcb = stcb;
+	int got_auth = 0;
+	uint32_t auth_offset = 0, auth_len = 0;
+	int auth_skipped = 0;
+	int asconf_cnt = 0;
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	struct socket *so;
+#endif
+
+	SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n",
+		iphlen, *offset, length, (void *)stcb);
+
+	/* validate chunk header length... */
+	if (ntohs(ch->chunk_length) < sizeof(*ch)) {
+		SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n",
+			ntohs(ch->chunk_length));
+		if (locked_tcb) {
+			SCTP_TCB_UNLOCK(locked_tcb);
+		}
+		return (NULL);
+	}
+	/*
+	 * validate the verification tag
+	 */
+	vtag_in = ntohl(sh->v_tag);
+
+	if (locked_tcb) {
+		SCTP_TCB_LOCK_ASSERT(locked_tcb);
+	}
+	if (ch->chunk_type == SCTP_INITIATION) {
+		SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n",
+			ntohs(ch->chunk_length), vtag_in);
+		if (vtag_in != 0) {
+			/* protocol error- silently discard... */
+			SCTP_STAT_INCR(sctps_badvtag);
+			if (locked_tcb) {
+				SCTP_TCB_UNLOCK(locked_tcb);
+			}
+			return (NULL);
+		}
+	} else if (ch->chunk_type != SCTP_COOKIE_ECHO) {
+		/*
+		 * If there is no stcb, skip the AUTH chunk and process
+		 * later after a stcb is found (to validate the lookup was
+		 * valid.
+		 */
+		if ((ch->chunk_type == SCTP_AUTHENTICATION) &&
+		    (stcb == NULL) &&
+		    (inp->auth_supported == 1)) {
+			/* save this chunk for later processing */
+			auth_skipped = 1;
+			auth_offset = *offset;
+			auth_len = ntohs(ch->chunk_length);
+
+			/* (temporarily) move past this chunk */
+			*offset += SCTP_SIZE32(auth_len);
+			if (*offset >= length) {
+				/* no more data left in the mbuf chain */
+				*offset = length;
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				return (NULL);
+			}
+			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+								   sizeof(struct sctp_chunkhdr), chunk_buf);
+		}
+		if (ch == NULL) {
+			/* Help */
+			*offset = length;
+			if (locked_tcb) {
+				SCTP_TCB_UNLOCK(locked_tcb);
+			}
+			return (NULL);
+		}
+		if (ch->chunk_type == SCTP_COOKIE_ECHO) {
+			goto process_control_chunks;
+		}
+		/*
+		 * first check if it's an ASCONF with an unknown src addr we
+		 * need to look inside to find the association
+		 */
+		if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) {
+			struct sctp_chunkhdr *asconf_ch = ch;
+			uint32_t asconf_offset = 0, asconf_len = 0;
+
+			/* inp's refcount may be reduced */
+			SCTP_INP_INCR_REF(inp);
+
+			asconf_offset = *offset;
+			do {
+				asconf_len = ntohs(asconf_ch->chunk_length);
+				if (asconf_len < sizeof(struct sctp_asconf_paramhdr))
+					break;
+				stcb = sctp_findassociation_ep_asconf(m,
+				                                      *offset,
+				                                      dst,
+				                                      sh, &inp, netp, vrf_id);
+				if (stcb != NULL)
+					break;
+				asconf_offset += SCTP_SIZE32(asconf_len);
+				asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset,
+										  sizeof(struct sctp_chunkhdr), chunk_buf);
+			} while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF);
+			if (stcb == NULL) {
+				/*
+				 * reduce inp's refcount if not reduced in
+				 * sctp_findassociation_ep_asconf().
+				 */
+				SCTP_INP_DECR_REF(inp);
+			} else {
+				locked_tcb = stcb;
+			}
+
+			/* now go back and verify any auth chunk to be sure */
+			if (auth_skipped && (stcb != NULL)) {
+				struct sctp_auth_chunk *auth;
+
+				auth = (struct sctp_auth_chunk *)
+					sctp_m_getptr(m, auth_offset,
+						      auth_len, chunk_buf);
+				got_auth = 1;
+				auth_skipped = 0;
+				if ((auth == NULL) || sctp_handle_auth(stcb, auth, m,
+								       auth_offset)) {
+					/* auth HMAC failed so dump it */
+					*offset = length;
+					if (locked_tcb) {
+						SCTP_TCB_UNLOCK(locked_tcb);
+					}
+					return (NULL);
+				} else {
+					/* remaining chunks are HMAC checked */
+					stcb->asoc.authenticated = 1;
+				}
+			}
+		}
+		if (stcb == NULL) {
+			snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
+			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+			                             msg);
+			/* no association, so it's out of the blue... */
+			sctp_handle_ootb(m, iphlen, *offset, src, dst, sh, inp, op_err,
+#if defined(__FreeBSD__)
+			                 mflowtype, mflowid, inp->fibnum,
+#endif
+					 vrf_id, port);
+			*offset = length;
+			if (locked_tcb) {
+				SCTP_TCB_UNLOCK(locked_tcb);
+			}
+			return (NULL);
+		}
+		asoc = &stcb->asoc;
+		/* ABORT and SHUTDOWN can use either v_tag... */
+		if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) ||
+		    (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) ||
+		    (ch->chunk_type == SCTP_PACKET_DROPPED)) {
+			/* Take the T-bit always into account. */
+			if ((((ch->chunk_flags & SCTP_HAD_NO_TCB) == 0) &&
+			     (vtag_in == asoc->my_vtag)) ||
+			    (((ch->chunk_flags & SCTP_HAD_NO_TCB) == SCTP_HAD_NO_TCB) &&
+			     (vtag_in == asoc->peer_vtag))) {
+				/* this is valid */
+			} else {
+				/* drop this packet... */
+				SCTP_STAT_INCR(sctps_badvtag);
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				return (NULL);
+			}
+		} else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
+			if (vtag_in != asoc->my_vtag) {
+				/*
+				 * this could be a stale SHUTDOWN-ACK or the
+				 * peer never got the SHUTDOWN-COMPLETE and
+				 * is still hung; we have started a new asoc
+				 * but it won't complete until the shutdown
+				 * is completed
+				 */
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
+				op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+				                             msg);
+				sctp_handle_ootb(m, iphlen, *offset, src, dst,
+				                 sh, inp, op_err,
+#if defined(__FreeBSD__)
+				                 mflowtype, mflowid, fibnum,
+#endif
+				                 vrf_id, port);
+				return (NULL);
+			}
+		} else {
+			/* for all other chunks, vtag must match */
+			if (vtag_in != asoc->my_vtag) {
+				/* invalid vtag... */
+				SCTPDBG(SCTP_DEBUG_INPUT3,
+					"invalid vtag: %xh, expect %xh\n",
+					vtag_in, asoc->my_vtag);
+				SCTP_STAT_INCR(sctps_badvtag);
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				*offset = length;
+				return (NULL);
+			}
+		}
+	}			/* end if !SCTP_COOKIE_ECHO */
+	/*
+	 * process all control chunks...
+	 */
+	if (((ch->chunk_type == SCTP_SELECTIVE_ACK) ||
+	     (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) ||
+	     (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) &&
+	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
+		/* implied cookie-ack.. we must have lost the ack */
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+				       stcb->asoc.overall_error_count,
+				       0,
+				       SCTP_FROM_SCTP_INPUT,
+				       __LINE__);
+		}
+		stcb->asoc.overall_error_count = 0;
+		sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb,
+				       *netp);
+	}
+
+ process_control_chunks:
+	while (IS_SCTP_CONTROL(ch)) {
+		/* validate chunk length */
+		chk_length = ntohs(ch->chunk_length);
+		SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n",
+			ch->chunk_type, chk_length);
+		SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length);
+		if (chk_length < sizeof(*ch) ||
+		    (*offset + (int)chk_length) > length) {
+			*offset = length;
+			if (locked_tcb) {
+				SCTP_TCB_UNLOCK(locked_tcb);
+			}
+			return (NULL);
+		}
+		SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks);
+		/*
+		 * INIT-ACK only gets the init ack "header" portion only
+		 * because we don't have to process the peer's COOKIE. All
+		 * others get a complete chunk.
+		 */
+		if ((ch->chunk_type == SCTP_INITIATION_ACK) ||
+		    (ch->chunk_type == SCTP_INITIATION)) {
+			/* get an init-ack chunk */
+			ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+								   sizeof(struct sctp_init_ack_chunk), chunk_buf);
+			if (ch == NULL) {
+				*offset = length;
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				return (NULL);
+			}
+		} else {
+			/* For cookies and all other chunks. */
+			if (chk_length > sizeof(chunk_buf)) {
+				/*
+				 * use just the size of the chunk buffer
+				 * so the front part of our chunks fit in
+				 * contiguous space up to the chunk buffer
+				 * size (508 bytes).
+				 * For chunks that need to get more than that
+				 * they must use the sctp_m_getptr() function
+				 * or other means (e.g. know how to parse mbuf
+				 * chains). Cookies do this already.
+				 */
+				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+									   (sizeof(chunk_buf) - 4),
+									   chunk_buf);
+				if (ch == NULL) {
+					*offset = length;
+					if (locked_tcb) {
+						SCTP_TCB_UNLOCK(locked_tcb);
+					}
+					return (NULL);
+				}
+			} else {
+				/* We can fit it all */
+				ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+								   chk_length, chunk_buf);
+				if (ch == NULL) {
+					SCTP_PRINTF("sctp_process_control: Can't get the all data....\n");
+					*offset = length;
+					if (locked_tcb) {
+						SCTP_TCB_UNLOCK(locked_tcb);
+					}
+					return (NULL);
+				}
+			}
+		}
+		num_chunks++;
+		/* Save off the last place we got a control from */
+		if (stcb != NULL) {
+			if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) {
+				/*
+				 * allow last_control to be NULL if
+				 * ASCONF... ASCONF processing will find the
+				 * right net later
+				 */
+				if ((netp != NULL) && (*netp != NULL))
+					stcb->asoc.last_control_chunk_from = *netp;
+			}
+		}
+#ifdef SCTP_AUDITING_ENABLED
+		sctp_audit_log(0xB0, ch->chunk_type);
+#endif
+
+		/* check to see if this chunk required auth, but isn't */
+		if ((stcb != NULL) &&
+		    (stcb->asoc.auth_supported == 1) &&
+		    sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) &&
+		    !stcb->asoc.authenticated) {
+			/* "silently" ignore */
+			SCTP_STAT_INCR(sctps_recvauthmissing);
+			goto next_chunk;
+		}
+		switch (ch->chunk_type) {
+		case SCTP_INITIATION:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n");
+			/* The INIT chunk must be the only chunk. */
+			if ((num_chunks > 1) ||
+			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
+				/* RFC 4960 requires that no ABORT is sent */
+				*offset = length;
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				return (NULL);
+			}
+			/* Honor our resource limit. */
+			if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) {
+				op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
+				sctp_abort_association(inp, stcb, m, iphlen,
+						       src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+				                       mflowtype, mflowid,
+#endif
+				                       vrf_id, port);
+				*offset = length;
+				return (NULL);
+			}
+			sctp_handle_init(m, iphlen, *offset, src, dst, sh,
+			                 (struct sctp_init_chunk *)ch, inp,
+			                 stcb, *netp, &abort_no_unlock,
+#if defined(__FreeBSD__)
+			                 mflowtype, mflowid,
+#endif
+			                 vrf_id, port);
+			*offset = length;
+			if ((!abort_no_unlock) && (locked_tcb)) {
+				SCTP_TCB_UNLOCK(locked_tcb);
+			}
+			return (NULL);
+			break;
+		case SCTP_PAD_CHUNK:
+			break;
+		case SCTP_INITIATION_ACK:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n");
+			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+				/* We are not interested anymore */
+				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
+					;
+				} else {
+					if ((locked_tcb != NULL) && (locked_tcb != stcb)) {
+						/* Very unlikely */
+						SCTP_TCB_UNLOCK(locked_tcb);
+					}
+					*offset = length;
+					if (stcb) {
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+						so = SCTP_INP_SO(inp);
+						atomic_add_int(&stcb->asoc.refcnt, 1);
+						SCTP_TCB_UNLOCK(stcb);
+						SCTP_SOCKET_LOCK(so, 1);
+						SCTP_TCB_LOCK(stcb);
+						atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+						(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+						                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_29);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+						SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+					}
+					return (NULL);
+				}
+			}
+			/* The INIT-ACK chunk must be the only chunk. */
+			if ((num_chunks > 1) ||
+			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
+				*offset = length;
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				return (NULL);
+			}
+			if ((netp) && (*netp)) {
+				ret = sctp_handle_init_ack(m, iphlen, *offset,
+				                           src, dst, sh,
+				                           (struct sctp_init_ack_chunk *)ch,
+				                           stcb, *netp,
+				                           &abort_no_unlock,
+#if defined(__FreeBSD__)
+				                           mflowtype, mflowid,
+#endif
+				                           vrf_id);
+			} else {
+				ret = -1;
+			}
+			*offset = length;
+			if (abort_no_unlock) {
+				return (NULL);
+			}
+			/*
+			 * Special case, I must call the output routine to
+			 * get the cookie echoed
+			 */
+			if ((stcb != NULL) && (ret == 0)) {
+				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
+			}
+			if (locked_tcb) {
+				SCTP_TCB_UNLOCK(locked_tcb);
+			}
+			return (NULL);
+			break;
+		case SCTP_SELECTIVE_ACK:
+			{
+				struct sctp_sack_chunk *sack;
+				int abort_now = 0;
+				uint32_t a_rwnd, cum_ack;
+				uint16_t num_seg, num_dup;
+				uint8_t flags;
+				int offset_seg, offset_dup;
+
+				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n");
+				SCTP_STAT_INCR(sctps_recvsacks);
+				if (stcb == NULL) {
+					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n");
+					break;
+				}
+				if (chk_length < sizeof(struct sctp_sack_chunk)) {
+					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n");
+					break;
+				}
+				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
+					/*-
+					 * If we have sent a shutdown-ack, we will pay no
+					 * attention to a sack sent in to us since
+					 * we don't care anymore.
+					 */
+					break;
+				}
+				sack = (struct sctp_sack_chunk *)ch;
+				flags = ch->chunk_flags;
+				cum_ack = ntohl(sack->sack.cum_tsn_ack);
+				num_seg = ntohs(sack->sack.num_gap_ack_blks);
+				num_dup = ntohs(sack->sack.num_dup_tsns);
+				a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd);
+				if (sizeof(struct sctp_sack_chunk) +
+				    num_seg * sizeof(struct sctp_gap_ack_block) +
+				    num_dup * sizeof(uint32_t) != chk_length) {
+					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n");
+					break;
+				}
+				offset_seg = *offset + sizeof(struct sctp_sack_chunk);
+				offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
+				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
+				        cum_ack, num_seg, a_rwnd);
+				stcb->asoc.seen_a_sack_this_pkt = 1;
+				if ((stcb->asoc.pr_sctp_cnt == 0) &&
+				    (num_seg == 0) &&
+				    SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
+				    (stcb->asoc.saw_sack_with_frags == 0) &&
+				    (stcb->asoc.saw_sack_with_nr_frags == 0) &&
+				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))
+					) {
+					/* We have a SIMPLE sack having no prior segments and
+					 * data on sent queue to be acked.. Use the faster
+					 * path sack processing. We also allow window update
+					 * sacks with no missing segments to go this way too.
+					 */
+					sctp_express_handle_sack(stcb, cum_ack, a_rwnd, &abort_now, ecne_seen);
+				} else {
+					if (netp && *netp)
+						sctp_handle_sack(m, offset_seg, offset_dup, stcb,
+								 num_seg, 0, num_dup, &abort_now, flags,
+								 cum_ack, a_rwnd, ecne_seen);
+				}
+				if (abort_now) {
+					/* ABORT signal from sack processing */
+					*offset = length;
+					return (NULL);
+				}
+				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
+				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
+				    (stcb->asoc.stream_queue_cnt == 0)) {
+					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb,  0, NULL, SCTP_SO_NOT_LOCKED);
+				}
+			}
+			break;
+		/* EY - nr_sack:  If the received chunk is an nr_sack chunk */
+		case SCTP_NR_SELECTIVE_ACK:
+			{
+				struct sctp_nr_sack_chunk *nr_sack;
+				int abort_now = 0;
+				uint32_t a_rwnd, cum_ack;
+				uint16_t num_seg, num_nr_seg, num_dup;
+				uint8_t flags;
+				int offset_seg, offset_dup;
+
+				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n");
+				SCTP_STAT_INCR(sctps_recvsacks);
+				if (stcb == NULL) {
+					SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n");
+					break;
+				}
+				if (stcb->asoc.nrsack_supported == 0) {
+					goto unknown_chunk;
+				}
+				if (chk_length < sizeof(struct sctp_nr_sack_chunk)) {
+					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n");
+					break;
+				}
+				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) {
+					/*-
+					 * If we have sent a shutdown-ack, we will pay no
+					 * attention to a sack sent in to us since
+					 * we don't care anymore.
+					 */
+					break;
+				}
+				nr_sack = (struct sctp_nr_sack_chunk *)ch;
+				flags = ch->chunk_flags;
+				cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack);
+				num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks);
+				num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks);
+				num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns);
+				a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd);
+				if (sizeof(struct sctp_nr_sack_chunk) +
+				    (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) +
+				    num_dup * sizeof(uint32_t) != chk_length) {
+					SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n");
+					break;
+				}
+				offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk);
+				offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block);
+				SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n",
+				        cum_ack, num_seg, a_rwnd);
+				stcb->asoc.seen_a_sack_this_pkt = 1;
+				if ((stcb->asoc.pr_sctp_cnt == 0) &&
+				    (num_seg == 0) && (num_nr_seg == 0) &&
+				    SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) &&
+				    (stcb->asoc.saw_sack_with_frags == 0) &&
+				    (stcb->asoc.saw_sack_with_nr_frags == 0) &&
+				    (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
+					/*
+					 * We have a SIMPLE sack having no
+					 * prior segments and data on sent
+					 * queue to be acked. Use the
+					 * faster path sack processing. We
+					 * also allow window update sacks
+					 * with no missing segments to go
+					 * this way too.
+					 */
+					sctp_express_handle_sack(stcb, cum_ack, a_rwnd,
+					                         &abort_now, ecne_seen);
+				} else {
+					if (netp && *netp)
+						sctp_handle_sack(m, offset_seg, offset_dup, stcb,
+						                 num_seg, num_nr_seg, num_dup, &abort_now, flags,
+						                 cum_ack, a_rwnd, ecne_seen);
+				}
+				if (abort_now) {
+					/* ABORT signal from sack processing */
+					*offset = length;
+					return (NULL);
+				}
+				if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
+				    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
+				    (stcb->asoc.stream_queue_cnt == 0)) {
+					sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb,  0, NULL, SCTP_SO_NOT_LOCKED);
+				}
+			}
+			break;
+
+		case SCTP_HEARTBEAT_REQUEST:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n");
+			if ((stcb) && netp && *netp) {
+				SCTP_STAT_INCR(sctps_recvheartbeat);
+				sctp_send_heartbeat_ack(stcb, m, *offset,
+							chk_length, *netp);
+
+				/* He's alive so give him credit */
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+						       stcb->asoc.overall_error_count,
+						       0,
+						       SCTP_FROM_SCTP_INPUT,
+						       __LINE__);
+				}
+				stcb->asoc.overall_error_count = 0;
+			}
+			break;
+		case SCTP_HEARTBEAT_ACK:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n");
+			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) {
+				/* Its not ours */
+				*offset = length;
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				return (NULL);
+			}
+			/* He's alive so give him credit */
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+					       stcb->asoc.overall_error_count,
+					       0,
+					       SCTP_FROM_SCTP_INPUT,
+					       __LINE__);
+			}
+			stcb->asoc.overall_error_count = 0;
+			SCTP_STAT_INCR(sctps_recvheartbeatack);
+			if (netp && *netp)
+				sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch,
+							  stcb, *netp);
+			break;
+		case SCTP_ABORT_ASSOCIATION:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n",
+				(void *)stcb);
+			if ((stcb) && netp && *netp)
+				sctp_handle_abort((struct sctp_abort_chunk *)ch,
+						  stcb, *netp);
+			*offset = length;
+			return (NULL);
+			break;
+		case SCTP_SHUTDOWN:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n",
+				(void *)stcb);
+			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) {
+				*offset = length;
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				return (NULL);
+			}
+			if (netp && *netp) {
+				int abort_flag = 0;
+
+				sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch,
+						     stcb, *netp, &abort_flag);
+				if (abort_flag) {
+					*offset = length;
+					return (NULL);
+				}
+			}
+			break;
+		case SCTP_SHUTDOWN_ACK:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", (void *)stcb);
+			if ((stcb) && (netp) && (*netp))
+				sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp);
+			*offset = length;
+			return (NULL);
+			break;
+
+		case SCTP_OPERATION_ERROR:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n");
+			if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) {
+				*offset = length;
+				return (NULL);
+			}
+			break;
+		case SCTP_COOKIE_ECHO:
+			SCTPDBG(SCTP_DEBUG_INPUT3,
+				"SCTP_COOKIE-ECHO, stcb %p\n", (void *)stcb);
+			if ((stcb) && (stcb->asoc.total_output_queue_size)) {
+				;
+			} else {
+				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+					/* We are not interested anymore */
+				abend:
+					if (stcb) {
+						SCTP_TCB_UNLOCK(stcb);
+					}
+					*offset = length;
+					return (NULL);
+				}
+			}
+			/*
+			 * First are we accepting? We do this again here
+			 * since it is possible that a previous endpoint WAS
+			 * listening responded to a INIT-ACK and then
+			 * closed. We opened and bound.. and are now no
+			 * longer listening.
+			 */
+
+			if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) {
+				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+				    (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) {
+					op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
+					sctp_abort_association(inp, stcb, m, iphlen,
+					                       src, dst, sh, op_err,
+#if defined(__FreeBSD__)
+					                       mflowtype, mflowid,
+#endif
+					                       vrf_id, port);
+				}
+				*offset = length;
+				return (NULL);
+			} else {
+				struct mbuf *ret_buf;
+				struct sctp_inpcb *linp;
+				if (stcb) {
+					linp = NULL;
+				} else {
+					linp = inp;
+				}
+
+				if (linp) {
+					SCTP_ASOC_CREATE_LOCK(linp);
+					if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+					    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+						SCTP_ASOC_CREATE_UNLOCK(linp);
+						goto abend;
+					}
+				}
+
+				if (netp) {
+					ret_buf =
+						sctp_handle_cookie_echo(m, iphlen,
+						                        *offset,
+						                        src, dst,
+						                        sh,
+						                        (struct sctp_cookie_echo_chunk *)ch,
+						                        &inp, &stcb, netp,
+						                        auth_skipped,
+						                        auth_offset,
+						                        auth_len,
+						                        &locked_tcb,
+#if defined(__FreeBSD__)
+						                        mflowtype,
+						                        mflowid,
+#endif
+						                        vrf_id,
+						                        port);
+				} else {
+					ret_buf = NULL;
+				}
+				if (linp) {
+					SCTP_ASOC_CREATE_UNLOCK(linp);
+				}
+				if (ret_buf == NULL) {
+					if (locked_tcb) {
+						SCTP_TCB_UNLOCK(locked_tcb);
+					}
+					SCTPDBG(SCTP_DEBUG_INPUT3,
+						"GAK, null buffer\n");
+					*offset = length;
+					return (NULL);
+				}
+				/* if AUTH skipped, see if it verified... */
+				if (auth_skipped) {
+					got_auth = 1;
+					auth_skipped = 0;
+				}
+				if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
+					/*
+					 * Restart the timer if we have
+					 * pending data
+					 */
+					struct sctp_tmit_chunk *chk;
+
+					chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
+					sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo);
+				}
+			}
+			break;
+		case SCTP_COOKIE_ACK:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", (void *)stcb);
+			if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) {
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				return (NULL);
+			}
+			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+				/* We are not interested anymore */
+				if ((stcb) && (stcb->asoc.total_output_queue_size)) {
+					;
+				} else if (stcb) {
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+					so = SCTP_INP_SO(inp);
+					atomic_add_int(&stcb->asoc.refcnt, 1);
+					SCTP_TCB_UNLOCK(stcb);
+					SCTP_SOCKET_LOCK(so, 1);
+					SCTP_TCB_LOCK(stcb);
+					atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+					                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_30);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+					SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+					*offset = length;
+					return (NULL);
+				}
+			}
+			/* He's alive so give him credit */
+			if ((stcb) && netp && *netp) {
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+						       stcb->asoc.overall_error_count,
+						       0,
+						       SCTP_FROM_SCTP_INPUT,
+						       __LINE__);
+				}
+				stcb->asoc.overall_error_count = 0;
+				sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch,stcb, *netp);
+			}
+			break;
+		case SCTP_ECN_ECHO:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n");
+			/* He's alive so give him credit */
+			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) {
+				/* Its not ours */
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				*offset = length;
+				return (NULL);
+			}
+			if (stcb) {
+				if (stcb->asoc.ecn_supported == 0) {
+					goto unknown_chunk;
+				}
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+						       stcb->asoc.overall_error_count,
+						       0,
+						       SCTP_FROM_SCTP_INPUT,
+						       __LINE__);
+				}
+				stcb->asoc.overall_error_count = 0;
+				sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch,
+						     stcb);
+				ecne_seen = 1;
+			}
+			break;
+		case SCTP_ECN_CWR:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n");
+			/* He's alive so give him credit */
+			if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) {
+				/* Its not ours */
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				*offset = length;
+				return (NULL);
+			}
+			if (stcb) {
+				if (stcb->asoc.ecn_supported == 0) {
+					goto unknown_chunk;
+				}
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+						       stcb->asoc.overall_error_count,
+						       0,
+						       SCTP_FROM_SCTP_INPUT,
+						       __LINE__);
+				}
+				stcb->asoc.overall_error_count = 0;
+				sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp);
+			}
+			break;
+		case SCTP_SHUTDOWN_COMPLETE:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", (void *)stcb);
+			/* must be first and only chunk */
+			if ((num_chunks > 1) ||
+			    (length - *offset > (int)SCTP_SIZE32(chk_length))) {
+				*offset = length;
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				return (NULL);
+			}
+			if ((stcb) && netp && *netp) {
+				sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch,
+							      stcb, *netp);
+			}
+			*offset = length;
+			return (NULL);
+			break;
+		case SCTP_ASCONF:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n");
+			/* He's alive so give him credit */
+			if (stcb) {
+				if (stcb->asoc.asconf_supported == 0) {
+					goto unknown_chunk;
+				}
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+						       stcb->asoc.overall_error_count,
+						       0,
+						       SCTP_FROM_SCTP_INPUT,
+						       __LINE__);
+				}
+				stcb->asoc.overall_error_count = 0;
+				sctp_handle_asconf(m, *offset, src,
+						   (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0);
+				asconf_cnt++;
+			}
+			break;
+		case SCTP_ASCONF_ACK:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n");
+			if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) {
+				/* Its not ours */
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				*offset = length;
+				return (NULL);
+			}
+			if ((stcb) && netp && *netp) {
+				if (stcb->asoc.asconf_supported == 0) {
+					goto unknown_chunk;
+				}
+				/* He's alive so give him credit */
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+						       stcb->asoc.overall_error_count,
+						       0,
+						       SCTP_FROM_SCTP_INPUT,
+						       __LINE__);
+				}
+				stcb->asoc.overall_error_count = 0;
+				sctp_handle_asconf_ack(m, *offset,
+						       (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock);
+				if (abort_no_unlock)
+					return (NULL);
+			}
+			break;
+		case SCTP_FORWARD_CUM_TSN:
+		case SCTP_IFORWARD_CUM_TSN:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n");
+			if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) {
+				/* Its not ours */
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				*offset = length;
+				return (NULL);
+			}
+
+			/* He's alive so give him credit */
+			if (stcb) {
+				int abort_flag = 0;
+
+				if (stcb->asoc.prsctp_supported == 0) {
+					goto unknown_chunk;
+				}
+				stcb->asoc.overall_error_count = 0;
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+					sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+						       stcb->asoc.overall_error_count,
+						       0,
+						       SCTP_FROM_SCTP_INPUT,
+						       __LINE__);
+				}
+				*fwd_tsn_seen = 1;
+				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+					/* We are not interested anymore */
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+					so = SCTP_INP_SO(inp);
+					atomic_add_int(&stcb->asoc.refcnt, 1);
+					SCTP_TCB_UNLOCK(stcb);
+					SCTP_SOCKET_LOCK(so, 1);
+					SCTP_TCB_LOCK(stcb);
+					atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+					                      SCTP_FROM_SCTP_INPUT + SCTP_LOC_31);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+					SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+					*offset = length;
+					return (NULL);
+				}
+				/*
+				 * For sending a SACK this looks like DATA
+				 * chunks.
+				 */
+				stcb->asoc.last_data_chunk_from = stcb->asoc.last_control_chunk_from;
+				sctp_handle_forward_tsn(stcb,
+							(struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset);
+				if (abort_flag) {
+					*offset = length;
+					return (NULL);
+				} else {
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+						sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+							       stcb->asoc.overall_error_count,
+							       0,
+							       SCTP_FROM_SCTP_INPUT,
+							       __LINE__);
+					}
+					stcb->asoc.overall_error_count = 0;
+				}
+
+			}
+			break;
+		case SCTP_STREAM_RESET:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n");
+			if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) {
+				/* Its not ours */
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				*offset = length;
+				return (NULL);
+			}
+			if (stcb->asoc.reconfig_supported == 0) {
+				goto unknown_chunk;
+			}
+			if (sctp_handle_stream_reset(stcb, m, *offset, ch)) {
+				/* stop processing */
+				*offset = length;
+				return (NULL);
+			}
+			break;
+		case SCTP_PACKET_DROPPED:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n");
+			/* re-get it all please */
+			if (chk_length < sizeof(struct sctp_pktdrop_chunk)) {
+				/* Its not ours */
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				*offset = length;
+				return (NULL);
+			}
+
+
+			if (ch && (stcb) && netp && (*netp)) {
+				if (stcb->asoc.pktdrop_supported == 0) {
+					goto unknown_chunk;
+				}
+				sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch,
+							   stcb, *netp,
+							   min(chk_length, (sizeof(chunk_buf) - 4)));
+
+			}
+
+			break;
+		case SCTP_AUTHENTICATION:
+			SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n");
+			if (stcb == NULL) {
+				/* save the first AUTH for later processing */
+				if (auth_skipped == 0) {
+					auth_offset = *offset;
+					auth_len = chk_length;
+					auth_skipped = 1;
+				}
+				/* skip this chunk (temporarily) */
+				goto next_chunk;
+			}
+			if (stcb->asoc.auth_supported == 0) {
+				goto unknown_chunk;
+			}
+			if ((chk_length < (sizeof(struct sctp_auth_chunk))) ||
+			    (chk_length > (sizeof(struct sctp_auth_chunk) +
+					   SCTP_AUTH_DIGEST_LEN_MAX))) {
+				/* Its not ours */
+				if (locked_tcb) {
+					SCTP_TCB_UNLOCK(locked_tcb);
+				}
+				*offset = length;
+				return (NULL);
+			}
+			if (got_auth == 1) {
+				/* skip this chunk... it's already auth'd */
+				goto next_chunk;
+			}
+			got_auth = 1;
+			if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch,
+							     m, *offset)) {
+				/* auth HMAC failed so dump the packet */
+				*offset = length;
+				return (stcb);
+			} else {
+				/* remaining chunks are HMAC checked */
+				stcb->asoc.authenticated = 1;
+			}
+			break;
+
+		default:
+		unknown_chunk:
+			/* it's an unknown chunk! */
+			if ((ch->chunk_type & 0x40) && (stcb != NULL)) {
+				struct sctp_gen_error_cause *cause;
+				int len;
+
+				op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
+				                               0, M_NOWAIT, 1, MT_DATA);
+				if (op_err != NULL) {
+					len = min(SCTP_SIZE32(chk_length), (uint32_t)(length - *offset));
+					cause = mtod(op_err, struct sctp_gen_error_cause *);
+					cause->code =  htons(SCTP_CAUSE_UNRECOG_CHUNK);
+					cause->length = htons((uint16_t)(len + sizeof(struct sctp_gen_error_cause)));
+					SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
+					SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, len, M_NOWAIT);
+					if (SCTP_BUF_NEXT(op_err) != NULL) {
+#ifdef SCTP_MBUF_LOGGING
+						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+							sctp_log_mbc(SCTP_BUF_NEXT(op_err), SCTP_MBUF_ICOPY);
+						}
+#endif
+						sctp_queue_op_err(stcb, op_err);
+					} else {
+						sctp_m_freem(op_err);
+					}
+				}
+			}
+			if ((ch->chunk_type & 0x80) == 0) {
+				/* discard this packet */
+				*offset = length;
+				return (stcb);
+			}	/* else skip this bad chunk and continue... */
+			break;
+		}		/* switch (ch->chunk_type) */
+
+
+	next_chunk:
+		/* get the next chunk */
+		*offset += SCTP_SIZE32(chk_length);
+		if (*offset >= length) {
+			/* no more data left in the mbuf chain */
+			break;
+		}
+		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
+							   sizeof(struct sctp_chunkhdr), chunk_buf);
+		if (ch == NULL) {
+			if (locked_tcb) {
+				SCTP_TCB_UNLOCK(locked_tcb);
+			}
+			*offset = length;
+			return (NULL);
+		}
+	}			/* while */
+
+	if (asconf_cnt > 0 && stcb != NULL) {
+		sctp_send_asconf_ack(stcb);
+	}
+	return (stcb);
+}
+
+
+/*
+ * common input chunk processing (v4 and v6)
+ */
+void
+sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int length,
+                             struct sockaddr *src, struct sockaddr *dst,
+                             struct sctphdr *sh, struct sctp_chunkhdr *ch,
+#if !defined(SCTP_WITH_NO_CSUM)
+                             uint8_t compute_crc,
+#endif
+                             uint8_t ecn_bits,
+#if defined(__FreeBSD__)
+                             uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
+#endif
+                             uint32_t vrf_id, uint16_t port)
+{
+	uint32_t high_tsn;
+	int fwd_tsn_seen = 0, data_processed = 0;
+	struct mbuf *m = *mm, *op_err;
+	char msg[SCTP_DIAG_INFO_LEN];
+	int un_sent;
+	int cnt_ctrl_ready = 0;
+	struct sctp_inpcb *inp = NULL, *inp_decr = NULL;
+	struct sctp_tcb *stcb = NULL;
+	struct sctp_nets *net = NULL;
+#if defined(__Userspace__)
+	struct socket *upcall_socket = NULL;
+#endif
+	SCTP_STAT_INCR(sctps_recvdatagrams);
+#ifdef SCTP_AUDITING_ENABLED
+	sctp_audit_log(0xE0, 1);
+	sctp_auditing(0, inp, stcb, net);
+#endif
+#if !defined(SCTP_WITH_NO_CSUM)
+	if (compute_crc != 0) {
+		uint32_t check, calc_check;
+
+		check = sh->checksum;
+		sh->checksum = 0;
+		calc_check = sctp_calculate_cksum(m, iphlen);
+		sh->checksum = check;
+		if (calc_check != check) {
+			SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x  m:%p mlen:%d iphlen:%d\n",
+			        calc_check, check, (void *)m, length, iphlen);
+			stcb = sctp_findassociation_addr(m, offset, src, dst,
+			                                 sh, ch, &inp, &net, vrf_id);
+#if defined(INET) || defined(INET6)
+			if ((ch->chunk_type != SCTP_INITIATION) &&
+			    (net != NULL) && (net->port != port)) {
+				if (net->port == 0) {
+					/* UDP encapsulation turned on. */
+					net->mtu -= sizeof(struct udphdr);
+					if (stcb->asoc.smallest_mtu > net->mtu) {
+						sctp_pathmtu_adjustment(stcb, net->mtu);
+					}
+				} else if (port == 0) {
+					/* UDP encapsulation turned off. */
+					net->mtu += sizeof(struct udphdr);
+					/* XXX Update smallest_mtu */
+				}
+				net->port = port;
+			}
+#endif
+#if defined(__FreeBSD__)
+			if (net != NULL) {
+				net->flowtype = mflowtype;
+				net->flowid = mflowid;
+			}
+#endif
+			if ((inp != NULL) && (stcb != NULL)) {
+				sctp_send_packet_dropped(stcb, net, m, length, iphlen, 1);
+				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED);
+			} else if ((inp != NULL) && (stcb == NULL)) {
+				inp_decr = inp;
+			}
+			SCTP_STAT_INCR(sctps_badsum);
+			SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors);
+			goto out;
+		}
+	}
+#endif
+	/* Destination port of 0 is illegal, based on RFC4960. */
+	if (sh->dest_port == 0) {
+		SCTP_STAT_INCR(sctps_hdrops);
+		goto out;
+	}
+	stcb = sctp_findassociation_addr(m, offset, src, dst,
+	                                 sh, ch, &inp, &net, vrf_id);
+#if defined(INET) || defined(INET6)
+	if ((ch->chunk_type != SCTP_INITIATION) &&
+	    (net != NULL) && (net->port != port)) {
+		if (net->port == 0) {
+			/* UDP encapsulation turned on. */
+			net->mtu -= sizeof(struct udphdr);
+			if (stcb->asoc.smallest_mtu > net->mtu) {
+				sctp_pathmtu_adjustment(stcb, net->mtu);
+			}
+		} else if (port == 0) {
+			/* UDP encapsulation turned off. */
+			net->mtu += sizeof(struct udphdr);
+			/* XXX Update smallest_mtu */
+		}
+		net->port = port;
+	}
+#endif
+#if defined(__FreeBSD__)
+	if (net != NULL) {
+		net->flowtype = mflowtype;
+		net->flowid = mflowid;
+	}
+#endif
+	if (inp == NULL) {
+		SCTP_STAT_INCR(sctps_noport);
+#if defined(__FreeBSD__) && (((__FreeBSD_version < 900000) && (__FreeBSD_version >= 804000)) || (__FreeBSD_version > 900000))
+		if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) {
+			goto out;
+		}
+#endif
+		if (ch->chunk_type == SCTP_SHUTDOWN_ACK) {
+			sctp_send_shutdown_complete2(src, dst, sh,
+#if defined(__FreeBSD__)
+			                             mflowtype, mflowid, fibnum,
+#endif
+			                             vrf_id, port);
+			goto out;
+		}
+		if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) {
+			goto out;
+		}
+		if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) {
+			if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
+			    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
+			     (ch->chunk_type != SCTP_INIT))) {
+				op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+				                             "Out of the blue");
+				sctp_send_abort(m, iphlen, src, dst,
+				                sh, 0, op_err,
+#if defined(__FreeBSD__)
+				                mflowtype, mflowid, fibnum,
+#endif
+				                vrf_id, port);
+			}
+		}
+		goto out;
+	} else if (stcb == NULL) {
+		inp_decr = inp;
+	}
+#ifdef IPSEC
+	/*-
+	 * I very much doubt any of the IPSEC stuff will work but I have no
+	 * idea, so I will leave it in place.
+	 */
+	if (inp != NULL) {
+		switch (dst->sa_family) {
+#ifdef INET
+		case AF_INET:
+			if (ipsec4_in_reject(m, &inp->ip_inp.inp)) {
+				SCTP_STAT_INCR(sctps_hdrops);
+				goto out;
+			}
+			break;
+#endif
+#ifdef INET6
+		case AF_INET6:
+			if (ipsec6_in_reject(m, &inp->ip_inp.inp)) {
+				SCTP_STAT_INCR(sctps_hdrops);
+				goto out;
+			}
+			break;
+#endif
+		default:
+			break;
+		}
+	}
+#endif
+	SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n",
+		(void *)m, iphlen, offset, length, (void *)stcb);
+	if (stcb) {
+		/* always clear this before beginning a packet */
+		stcb->asoc.authenticated = 0;
+		stcb->asoc.seen_a_sack_this_pkt = 0;
+		SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n",
+			(void *)stcb, stcb->asoc.state);
+
+		if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) ||
+		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
+			/*-
+			 * If we hit here, we had a ref count
+			 * up when the assoc was aborted and the
+			 * timer is clearing out the assoc, we should
+			 * NOT respond to any packet.. its OOTB.
+			 */
+			SCTP_TCB_UNLOCK(stcb);
+			stcb = NULL;
+			snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
+			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+			                             msg);
+			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
+#if defined(__FreeBSD__)
+			                 mflowtype, mflowid, inp->fibnum,
+#endif
+			                 vrf_id, port);
+			goto out;
+		}
+
+	}
+#if defined(__Userspace__)
+	if (stcb && !(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+		if (stcb->sctp_socket != NULL) {
+			if (stcb->sctp_socket->so_head != NULL) {
+				upcall_socket = stcb->sctp_socket->so_head;
+			} else {
+				upcall_socket = stcb->sctp_socket;
+			}
+			SOCK_LOCK(upcall_socket);
+			soref(upcall_socket);
+			SOCK_UNLOCK(upcall_socket);
+		}
+	}
+#endif
+	if (IS_SCTP_CONTROL(ch)) {
+		/* process the control portion of the SCTP packet */
+		/* sa_ignore NO_NULL_CHK */
+		stcb = sctp_process_control(m, iphlen, &offset, length,
+		                            src, dst, sh, ch,
+		                            inp, stcb, &net, &fwd_tsn_seen,
+#if defined(__FreeBSD__)
+		                            mflowtype, mflowid, fibnum,
+#endif
+		                            vrf_id, port);
+		if (stcb) {
+			/* This covers us if the cookie-echo was there
+			 * and it changes our INP.
+			 */
+			inp = stcb->sctp_ep;
+#if defined(INET) || defined(INET6)
+			if ((ch->chunk_type != SCTP_INITIATION) &&
+			    (net != NULL) && (net->port != port)) {
+				if (net->port == 0) {
+					/* UDP encapsulation turned on. */
+					net->mtu -= sizeof(struct udphdr);
+					if (stcb->asoc.smallest_mtu > net->mtu) {
+						sctp_pathmtu_adjustment(stcb, net->mtu);
+					}
+				} else if (port == 0) {
+					/* UDP encapsulation turned off. */
+					net->mtu += sizeof(struct udphdr);
+					/* XXX Update smallest_mtu */
+				}
+				net->port = port;
+			}
+#endif
+		}
+	} else {
+		/*
+		 * no control chunks, so pre-process DATA chunks (these
+		 * checks are taken care of by control processing)
+		 */
+
+		/*
+		 * if DATA only packet, and auth is required, then punt...
+		 * can't have authenticated without any AUTH (control)
+		 * chunks
+		 */
+		if ((stcb != NULL) &&
+		    (stcb->asoc.auth_supported == 1) &&
+		    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) {
+			/* "silently" ignore */
+			SCTP_STAT_INCR(sctps_recvauthmissing);
+			goto out;
+		}
+		if (stcb == NULL) {
+			/* out of the blue DATA chunk */
+			snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
+			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+			                             msg);
+			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
+#if defined(__FreeBSD__)
+			                 mflowtype, mflowid, fibnum,
+#endif
+					 vrf_id, port);
+			goto out;
+		}
+		if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) {
+			/* v_tag mismatch! */
+			SCTP_STAT_INCR(sctps_badvtag);
+			goto out;
+		}
+	}
+
+	if (stcb == NULL) {
+		/*
+		 * no valid TCB for this packet, or we found it's a bad
+		 * packet while processing control, or we're done with this
+		 * packet (done or skip rest of data), so we drop it...
+		 */
+		goto out;
+	}
+#if defined(__Userspace__)
+	if (stcb && upcall_socket == NULL && !(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+		if (stcb->sctp_socket != NULL) {
+			if (stcb->sctp_socket->so_head != NULL) {
+				upcall_socket = stcb->sctp_socket->so_head;
+			} else {
+				upcall_socket = stcb->sctp_socket;
+			}
+			SOCK_LOCK(upcall_socket);
+			soref(upcall_socket);
+			SOCK_UNLOCK(upcall_socket);
+		}
+	}
+#endif
+	/*
+	 * DATA chunk processing
+	 */
+	/* plow through the data chunks while length > offset */
+
+	/*
+	 * Rest should be DATA only.  Check authentication state if AUTH for
+	 * DATA is required.
+	 */
+	if ((length > offset) &&
+	    (stcb != NULL) &&
+	    (stcb->asoc.auth_supported == 1) &&
+	    sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) &&
+	    !stcb->asoc.authenticated) {
+		/* "silently" ignore */
+		SCTP_STAT_INCR(sctps_recvauthmissing);
+		SCTPDBG(SCTP_DEBUG_AUTH1,
+			"Data chunk requires AUTH, skipped\n");
+		goto trigger_send;
+	}
+	if (length > offset) {
+		int retval;
+
+		/*
+		 * First check to make sure our state is correct. We would
+		 * not get here unless we really did have a tag, so we don't
+		 * abort if this happens, just dump the chunk silently.
+		 */
+		switch (SCTP_GET_STATE(&stcb->asoc)) {
+		case SCTP_STATE_COOKIE_ECHOED:
+			/*
+			 * we consider data with valid tags in this state
+			 * shows us the cookie-ack was lost. Imply it was
+			 * there.
+			 */
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+				sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
+					       stcb->asoc.overall_error_count,
+					       0,
+					       SCTP_FROM_SCTP_INPUT,
+					       __LINE__);
+			}
+			stcb->asoc.overall_error_count = 0;
+			sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net);
+			break;
+		case SCTP_STATE_COOKIE_WAIT:
+			/*
+			 * We consider OOTB any data sent during asoc setup.
+			 */
+			snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s", __FILE__, __LINE__, __func__);
+			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+			                             msg);
+			sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err,
+#if defined(__FreeBSD__)
+			                 mflowtype, mflowid, inp->fibnum,
+#endif
+					 vrf_id, port);
+			goto out;
+			/*sa_ignore NOTREACHED*/
+			break;
+		case SCTP_STATE_EMPTY:	/* should not happen */
+		case SCTP_STATE_INUSE:	/* should not happen */
+		case SCTP_STATE_SHUTDOWN_RECEIVED:	/* This is a peer error */
+		case SCTP_STATE_SHUTDOWN_ACK_SENT:
+		default:
+			goto out;
+			/*sa_ignore NOTREACHED*/
+			break;
+		case SCTP_STATE_OPEN:
+		case SCTP_STATE_SHUTDOWN_SENT:
+			break;
+		}
+		/* plow through the data chunks while length > offset */
+		retval = sctp_process_data(mm, iphlen, &offset, length,
+		                           inp, stcb, net, &high_tsn);
+		if (retval == 2) {
+			/*
+			 * The association aborted, NO UNLOCK needed since
+			 * the association is destroyed.
+			 */
+			stcb = NULL;
+			goto out;
+		}
+		data_processed = 1;
+		/*
+		 * Anything important needs to have been m_copy'ed in
+		 * process_data
+		 */
+	}
+
+	/* take care of ecn */
+	if ((data_processed == 1) &&
+	    (stcb->asoc.ecn_supported == 1) &&
+	    ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) {
+		/* Yep, we need to add a ECNE */
+		sctp_send_ecn_echo(stcb, net, high_tsn);
+	}
+
+	if ((data_processed == 0) && (fwd_tsn_seen)) {
+		int was_a_gap;
+		uint32_t highest_tsn;
+
+		if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) {
+			highest_tsn = stcb->asoc.highest_tsn_inside_nr_map;
+		} else {
+			highest_tsn = stcb->asoc.highest_tsn_inside_map;
+		}
+		was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
+		stcb->asoc.send_sack = 1;
+		sctp_sack_check(stcb, was_a_gap);
+	} else if (fwd_tsn_seen) {
+		stcb->asoc.send_sack = 1;
+	}
+	/* trigger send of any chunks in queue... */
+trigger_send:
+#ifdef SCTP_AUDITING_ENABLED
+	sctp_audit_log(0xE0, 2);
+	sctp_auditing(1, inp, stcb, net);
+#endif
+	SCTPDBG(SCTP_DEBUG_INPUT1,
+		"Check for chunk output prw:%d tqe:%d tf=%d\n",
+		stcb->asoc.peers_rwnd,
+		TAILQ_EMPTY(&stcb->asoc.control_send_queue),
+		stcb->asoc.total_flight);
+	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
+	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
+		cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq;
+	}
+	if (!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue) ||
+	    cnt_ctrl_ready ||
+	    stcb->asoc.trigger_reset ||
+	    ((un_sent) &&
+	     (stcb->asoc.peers_rwnd > 0 ||
+	      (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) {
+		SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n");
+		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED);
+		SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n");
+	}
+#ifdef SCTP_AUDITING_ENABLED
+	sctp_audit_log(0xE0, 3);
+	sctp_auditing(2, inp, stcb, net);
+#endif
+ out:
+	if (stcb != NULL) {
+		SCTP_TCB_UNLOCK(stcb);
+	}
+#if defined(__Userspace__)
+	if (upcall_socket != NULL) {
+		if (upcall_socket->so_upcall != NULL) {
+			(*upcall_socket->so_upcall)(upcall_socket, upcall_socket->so_upcallarg, M_NOWAIT);
+		}
+		ACCEPT_LOCK();
+		SOCK_LOCK(upcall_socket);
+		sorele(upcall_socket);
+	}
+#endif
+	if (inp_decr != NULL) {
+		/* reduce ref-count */
+		SCTP_INP_WLOCK(inp_decr);
+		SCTP_INP_DECR_REF(inp_decr);
+		SCTP_INP_WUNLOCK(inp_decr);
+	}
+	return;
+}
+
+#ifdef INET
+#if !defined(__Userspace__)
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+void
+sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port)
+#elif defined(__Panda__)
+void
+sctp_input(pakhandle_type i_pak)
+#else
+void
+#if __STDC__
+sctp_input(struct mbuf *i_pak,...)
+#else
+sctp_input(i_pak, va_alist)
+	struct mbuf *i_pak;
+#endif
+#endif
+{
+	struct mbuf *m;
+	int iphlen;
+	uint32_t vrf_id = 0;
+	uint8_t ecn_bits;
+	struct sockaddr_in src, dst;
+	struct ip *ip;
+	struct sctphdr *sh;
+	struct sctp_chunkhdr *ch;
+	int length, offset;
+#if !defined(SCTP_WITH_NO_CSUM)
+	uint8_t compute_crc;
+#endif
+#if defined(__FreeBSD__)
+	uint32_t mflowid;
+	uint8_t mflowtype;
+	uint16_t fibnum;
+#endif
+#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__))
+	uint16_t port = 0;
+#endif
+
+#if defined(__Panda__)
+	/* This is Evil, but its the only way to make panda work right. */
+	iphlen = sizeof(struct ip);
+#else
+	iphlen = off;
+#endif
+	if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) {
+		SCTP_RELEASE_PKT(i_pak);
+		return;
+	}
+	m = SCTP_HEADER_TO_CHAIN(i_pak);
+#ifdef __Panda__
+	SCTP_DETACH_HEADER_FROM_CHAIN(i_pak);
+	(void)SCTP_RELEASE_HEADER(i_pak);
+#endif
+#ifdef SCTP_MBUF_LOGGING
+	/* Log in any input mbufs */
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+		sctp_log_mbc(m, SCTP_MBUF_INPUT);
+	}
+#endif
+#ifdef SCTP_PACKET_LOGGING
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
+		sctp_packet_log(m);
+	}
+#endif
+#if defined(__FreeBSD__)
+#if __FreeBSD_version > 1000049
+	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
+	        "sctp_input(): Packet of length %d received on %s with csum_flags 0x%b.\n",
+	        m->m_pkthdr.len,
+	        if_name(m->m_pkthdr.rcvif),
+	        (int)m->m_pkthdr.csum_flags, CSUM_BITS);
+#elif __FreeBSD_version >= 800000
+	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
+	        "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
+	        m->m_pkthdr.len,
+	        if_name(m->m_pkthdr.rcvif),
+	        m->m_pkthdr.csum_flags);
+#else
+	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
+	        "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
+	        m->m_pkthdr.len,
+	        m->m_pkthdr.rcvif->if_xname,
+	        m->m_pkthdr.csum_flags);
+#endif
+#endif
+#if defined(__APPLE__)
+	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
+	        "sctp_input(): Packet of length %d received on %s%d with csum_flags 0x%x.\n",
+	        m->m_pkthdr.len,
+	        m->m_pkthdr.rcvif->if_name,
+	        m->m_pkthdr.rcvif->if_unit,
+	        m->m_pkthdr.csum_flags);
+#endif
+#if defined(__Windows__)
+	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
+	        "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n",
+	        m->m_pkthdr.len,
+	        m->m_pkthdr.rcvif->if_xname,
+	        m->m_pkthdr.csum_flags);
+#endif
+#if defined(__FreeBSD__)
+	mflowid = m->m_pkthdr.flowid;
+	mflowtype = M_HASHTYPE_GET(m);
+	fibnum = M_GETFIB(m);
+#endif
+	SCTP_STAT_INCR(sctps_recvpackets);
+	SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
+	/* Get IP, SCTP, and first chunk header together in the first mbuf. */
+	offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
+	if (SCTP_BUF_LEN(m) < offset) {
+		if ((m = m_pullup(m, offset)) == NULL) {
+			SCTP_STAT_INCR(sctps_hdrops);
+			return;
+		}
+	}
+	ip = mtod(m, struct ip *);
+	sh = (struct sctphdr *)((caddr_t)ip + iphlen);
+	ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
+	offset -= sizeof(struct sctp_chunkhdr);
+	memset(&src, 0, sizeof(struct sockaddr_in));
+	src.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+	src.sin_len = sizeof(struct sockaddr_in);
+#endif
+	src.sin_port = sh->src_port;
+	src.sin_addr = ip->ip_src;
+	memset(&dst, 0, sizeof(struct sockaddr_in));
+	dst.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+	dst.sin_len = sizeof(struct sockaddr_in);
+#endif
+	dst.sin_port = sh->dest_port;
+	dst.sin_addr = ip->ip_dst;
+#if defined(__Windows__)
+	NTOHS(ip->ip_len);
+#endif
+#if defined(__Userspace_os_Linux) || defined(__Userspace_os_Windows)
+	ip->ip_len = ntohs(ip->ip_len);
+#endif
+#if defined(__FreeBSD__)
+#if __FreeBSD_version >= 1000000
+	length = ntohs(ip->ip_len);
+#else
+	length = ip->ip_len + iphlen;
+#endif
+#elif defined(__APPLE__)
+	length = ip->ip_len + iphlen;
+#elif defined(__Userspace__)
+#if defined(__Userspace_os_Linux) || defined(__Userspace_os_Windows)
+	length = ip->ip_len;
+#else
+	length = ip->ip_len + iphlen;
+#endif
+#else
+	length = ip->ip_len;
+#endif
+	/* Validate mbuf chain length with IP payload length. */
+	if (SCTP_HEADER_LEN(m) != length) {
+		SCTPDBG(SCTP_DEBUG_INPUT1,
+		        "sctp_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m));
+		SCTP_STAT_INCR(sctps_hdrops);
+		goto out;
+	}
+	/* SCTP does not allow broadcasts or multicasts */
+	if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) {
+		goto out;
+	}
+	if (SCTP_IS_IT_BROADCAST(dst.sin_addr, m)) {
+		goto out;
+	}
+	ecn_bits = ip->ip_tos;
+#if defined(SCTP_WITH_NO_CSUM)
+	SCTP_STAT_INCR(sctps_recvnocrc);
+#else
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+	if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) {
+		SCTP_STAT_INCR(sctps_recvhwcrc);
+		compute_crc = 0;
+	} else {
+#else
+	if (SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
+	    ((src.sin_addr.s_addr == dst.sin_addr.s_addr) ||
+	     (SCTP_IS_IT_LOOPBACK(m)))) {
+		SCTP_STAT_INCR(sctps_recvnocrc);
+		compute_crc = 0;
+	} else {
+#endif
+		SCTP_STAT_INCR(sctps_recvswcrc);
+		compute_crc = 1;
+	}
+#endif
+	sctp_common_input_processing(&m, iphlen, offset, length,
+	                             (struct sockaddr *)&src,
+	                             (struct sockaddr *)&dst,
+	                             sh, ch,
+#if !defined(SCTP_WITH_NO_CSUM)
+	                             compute_crc,
+#endif
+	                             ecn_bits,
+#if defined(__FreeBSD__)
+	                             mflowtype, mflowid, fibnum,
+#endif
+	                             vrf_id, port);
+ out:
+	if (m) {
+		sctp_m_freem(m);
+	}
+	return;
+}
+
+#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
+extern int *sctp_cpuarry;
+#endif
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 1100020
+int
+sctp_input(struct mbuf **mp, int *offp, int proto SCTP_UNUSED)
+{
+	struct mbuf *m;
+	int off;
+
+	m = *mp;
+	off = *offp;
+#else
+void
+sctp_input(struct mbuf *m, int off)
+{
+#endif
+#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
+	if (mp_ncpus > 1) {
+		struct ip *ip;
+		struct sctphdr *sh;
+		int offset;
+		int cpu_to_use;
+		uint32_t flowid, tag;
+
+		if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
+			flowid = m->m_pkthdr.flowid;
+		} else {
+			/* No flow id built by lower layers
+			 * fix it so we create one.
+			 */
+			offset = off + sizeof(struct sctphdr);
+			if (SCTP_BUF_LEN(m) < offset) {
+				if ((m = m_pullup(m, offset)) == NULL) {
+					SCTP_STAT_INCR(sctps_hdrops);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 1100020
+					return (IPPROTO_DONE);
+#else
+					return;
+#endif
+				}
+			}
+			ip = mtod(m, struct ip *);
+			sh = (struct sctphdr *)((caddr_t)ip + off);
+			tag = htonl(sh->v_tag);
+			flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port);
+			m->m_pkthdr.flowid = flowid;
+			M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE_HASH);
+		}
+		cpu_to_use = sctp_cpuarry[flowid % mp_ncpus];
+		sctp_queue_to_mcore(m, off, cpu_to_use);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 1100020
+		return (IPPROTO_DONE);
+#else
+		return;
+#endif
+	}
+#endif
+	sctp_input_with_port(m, off, 0);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 1100020
+	return (IPPROTO_DONE);
+#endif
+}
+#endif
+#endif
diff --git a/usrsctplib/netinet/sctp_input.h b/usrsctplib/netinet/sctp_input.h
new file mode 100755
index 0000000..bc4cf42
--- /dev/null
+++ b/usrsctplib/netinet/sctp_input.h
@@ -0,0 +1,66 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.h 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_INPUT_H_
+#define _NETINET_SCTP_INPUT_H_
+
+#if defined(_KERNEL) || defined(__Userspace__)
+void
+sctp_common_input_processing(struct mbuf **, int, int, int,
+                             struct sockaddr *, struct sockaddr *,
+                             struct sctphdr *, struct sctp_chunkhdr *,
+#if !defined(SCTP_WITH_NO_CSUM)
+                             uint8_t,
+#endif
+                             uint8_t,
+#if defined(__FreeBSD__)
+                             uint8_t, uint32_t, uint16_t,
+#endif
+                             uint32_t, uint16_t);
+
+struct sctp_stream_reset_request *
+sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq,
+    struct sctp_tmit_chunk **bchk);
+
+void sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries,
+    uint16_t *list);
+
+
+int sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked);
+
+#endif
+#endif
diff --git a/usrsctplib/netinet/sctp_lock_userspace.h b/usrsctplib/netinet/sctp_lock_userspace.h
new file mode 100755
index 0000000..83a565c
--- /dev/null
+++ b/usrsctplib/netinet/sctp_lock_userspace.h
@@ -0,0 +1,251 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ * Copyright (c) 2008-2012, by Brad Penoff. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#endif
+
+#ifndef _NETINET_SCTP_LOCK_EMPTY_H_
+#define _NETINET_SCTP_LOCK_EMPTY_H_
+
+/*
+ * Empty Lock declarations for all other platforms. Pre-process away to
+ * nothing.
+ */
+
+/* __Userspace__ putting lock macros in same order as sctp_lock_bsd.h ...*/
+
+#define SCTP_IPI_COUNT_INIT()
+
+#define SCTP_STATLOG_INIT_LOCK()
+#define SCTP_STATLOG_LOCK()
+#define SCTP_STATLOG_UNLOCK()
+#define SCTP_STATLOG_DESTROY()
+
+#define SCTP_INP_INFO_LOCK_DESTROY()
+
+#define SCTP_INP_INFO_LOCK_INIT()
+#define SCTP_INP_INFO_RLOCK()
+#define SCTP_INP_INFO_WLOCK()
+#define SCTP_INP_INFO_TRYLOCK() 1
+#define SCTP_INP_INFO_RUNLOCK()
+#define SCTP_INP_INFO_WUNLOCK()
+
+#define SCTP_WQ_ADDR_INIT()
+#define SCTP_WQ_ADDR_DESTROY()
+#define SCTP_WQ_ADDR_LOCK()
+#define SCTP_WQ_ADDR_UNLOCK()
+
+
+#define SCTP_IPI_ADDR_INIT()
+#define SCTP_IPI_ADDR_DESTROY()
+#define SCTP_IPI_ADDR_RLOCK()
+#define SCTP_IPI_ADDR_WLOCK()
+#define SCTP_IPI_ADDR_RUNLOCK()
+#define SCTP_IPI_ADDR_WUNLOCK()
+
+#define SCTP_IPI_ITERATOR_WQ_INIT()
+#define SCTP_IPI_ITERATOR_WQ_DESTROY()
+#define SCTP_IPI_ITERATOR_WQ_LOCK()
+#define SCTP_IPI_ITERATOR_WQ_UNLOCK()
+
+
+#define SCTP_IP_PKTLOG_INIT()
+#define SCTP_IP_PKTLOG_LOCK()
+#define SCTP_IP_PKTLOG_UNLOCK()
+#define SCTP_IP_PKTLOG_DESTROY()
+
+
+
+#define SCTP_INP_READ_INIT(_inp)
+#define SCTP_INP_READ_DESTROY(_inp)
+#define SCTP_INP_READ_LOCK(_inp)
+#define SCTP_INP_READ_UNLOCK(_inp)
+
+#define SCTP_INP_LOCK_INIT(_inp)
+#define SCTP_ASOC_CREATE_LOCK_INIT(_inp)
+#define SCTP_INP_LOCK_DESTROY(_inp)
+#define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp)
+
+
+#define SCTP_INP_RLOCK(_inp)
+#define SCTP_INP_WLOCK(_inp)
+
+#define SCTP_INP_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
+
+#define SCTP_INP_READ_CONTENDED(_inp) (0) /* Don't know if this is possible */
+
+#define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
+
+
+#define SCTP_TCB_SEND_LOCK_INIT(_tcb)
+#define SCTP_TCB_SEND_LOCK_DESTROY(_tcb)
+#define SCTP_TCB_SEND_LOCK(_tcb)
+#define SCTP_TCB_SEND_UNLOCK(_tcb)
+
+#define SCTP_INP_INCR_REF(_inp)
+#define SCTP_INP_DECR_REF(_inp)
+
+#define SCTP_ASOC_CREATE_LOCK(_inp)
+
+#define SCTP_INP_RUNLOCK(_inp)
+#define SCTP_INP_WUNLOCK(_inp)
+#define SCTP_ASOC_CREATE_UNLOCK(_inp)
+
+
+#define SCTP_TCB_LOCK_INIT(_tcb)
+#define SCTP_TCB_LOCK_DESTROY(_tcb)
+#define SCTP_TCB_LOCK(_tcb)
+#define SCTP_TCB_TRYLOCK(_tcb) 1
+#define SCTP_TCB_UNLOCK(_tcb)
+#define SCTP_TCB_UNLOCK_IFOWNED(_tcb)
+#define SCTP_TCB_LOCK_ASSERT(_tcb)
+
+
+
+#define SCTP_ITERATOR_LOCK_INIT()
+#define SCTP_ITERATOR_LOCK()
+#define SCTP_ITERATOR_UNLOCK()
+#define SCTP_ITERATOR_LOCK_DESTROY()
+
+
+
+#define SCTP_INCR_EP_COUNT() \
+                do { \
+		       sctppcbinfo.ipi_count_ep++; \
+	        } while (0)
+
+#define SCTP_DECR_EP_COUNT() \
+                do { \
+		       sctppcbinfo.ipi_count_ep--; \
+	        } while (0)
+
+#define SCTP_INCR_ASOC_COUNT() \
+                do { \
+	               sctppcbinfo.ipi_count_asoc++; \
+	        } while (0)
+
+#define SCTP_DECR_ASOC_COUNT() \
+                do { \
+	               sctppcbinfo.ipi_count_asoc--; \
+	        } while (0)
+
+#define SCTP_INCR_LADDR_COUNT() \
+                do { \
+	               sctppcbinfo.ipi_count_laddr++; \
+	        } while (0)
+
+#define SCTP_DECR_LADDR_COUNT() \
+                do { \
+	               sctppcbinfo.ipi_count_laddr--; \
+	        } while (0)
+
+#define SCTP_INCR_RADDR_COUNT() \
+                do { \
+ 	               sctppcbinfo.ipi_count_raddr++; \
+	        } while (0)
+
+#define SCTP_DECR_RADDR_COUNT() \
+                do { \
+ 	               sctppcbinfo.ipi_count_raddr--; \
+	        } while (0)
+
+#define SCTP_INCR_CHK_COUNT() \
+                do { \
+  	               sctppcbinfo.ipi_count_chunk++; \
+	        } while (0)
+
+#define SCTP_DECR_CHK_COUNT() \
+                do { \
+  	               sctppcbinfo.ipi_count_chunk--; \
+	        } while (0)
+
+#define SCTP_INCR_READQ_COUNT() \
+                do { \
+		       sctppcbinfo.ipi_count_readq++; \
+	        } while (0)
+
+#define SCTP_DECR_READQ_COUNT() \
+                do { \
+		       sctppcbinfo.ipi_count_readq--; \
+	        } while (0)
+
+#define SCTP_INCR_STRMOQ_COUNT() \
+                do { \
+		       sctppcbinfo.ipi_count_strmoq++; \
+	        } while (0)
+
+#define SCTP_DECR_STRMOQ_COUNT() \
+                do { \
+		       sctppcbinfo.ipi_count_strmoq--; \
+	        } while (0)
+
+
+/* not sure if __Userspace__ needs these (but copied nonetheless...) */
+#if defined(SCTP_SO_LOCK_TESTING)
+#define SCTP_INP_SO(sctpinp)	(sctpinp)->ip_inp.inp.inp_socket
+#define SCTP_SOCKET_LOCK(so, refcnt)
+#define SCTP_SOCKET_UNLOCK(so, refcnt)
+#endif
+
+
+/* these were in sctp_lock_empty.h but aren't in sctp_lock_bsd.h ... */
+#if 0
+#define SCTP_IPI_ADDR_LOCK()
+#define SCTP_IPI_ADDR_UNLOCK()
+#endif
+
+
+/* These were in sctp_lock_empty.h because they were commented out within
+ *  within user_include/user_socketvar.h .  If they are NOT commented out
+ *  in user_socketvar.h (because that seems the more natural place for them
+ *  to live), then change this "if" to 0.  Keep the "if" as 1 if these ARE
+ *  indeed commented out in user_socketvar.h .
+ *
+ * This modularity is kept so this file can easily be chosen as an alternative
+ *  to SCTP_PROCESS_LEVEL_LOCKS.  If one defines SCTP_PROCESS_LEVEL_LOCKS in
+ *  user_include/opt_sctp.h, then the file sctp_process_lock.h (which we didn't
+ *  implement) is used, and that declares these locks already (so using
+ *  SCTP_PROCESS_LEVEL_LOCKS *requires* that these defintions be commented out
+ *  in user_socketvar.h).
+ */
+#if 1
+#define SOCK_LOCK(_so)
+#define SOCK_UNLOCK(_so)
+#define SOCKBUF_LOCK(_so_buf)
+#define SOCKBUF_UNLOCK(_so_buf)
+#define SOCKBUF_LOCK_ASSERT(_so_buf)
+#endif
+
+#endif
diff --git a/usrsctplib/netinet/sctp_os.h b/usrsctplib/netinet/sctp_os.h
new file mode 100755
index 0000000..4888381
--- /dev/null
+++ b/usrsctplib/netinet/sctp_os.h
@@ -0,0 +1,94 @@
+/*-
+ * Copyright (c) 2006-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_os.h 235828 2012-05-23 11:26:28Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_OS_H_
+#define _NETINET_SCTP_OS_H_
+
+/*
+ * General kernel memory allocation:
+ *  SCTP_MALLOC(element, type, size, name)
+ *  SCTP_FREE(element)
+ * Kernel memory allocation for "soname"- memory must be zeroed.
+ *  SCTP_MALLOC_SONAME(name, type, size)
+ *  SCTP_FREE_SONAME(name)
+ */
+
+/*
+ * Zone(pool) allocation routines: MUST be defined for each OS.
+ *  zone = zone/pool pointer.
+ *  name = string name of the zone/pool.
+ *  size = size of each zone/pool element.
+ *  number = number of elements in zone/pool.
+ *  type = structure type to allocate
+ *
+ * sctp_zone_t
+ * SCTP_ZONE_INIT(zone, name, size, number)
+ * SCTP_ZONE_GET(zone, type)
+ * SCTP_ZONE_FREE(zone, element)
+ * SCTP_ZONE_DESTROY(zone)
+ */
+
+#if defined(__FreeBSD__)
+#include <netinet/sctp_os_bsd.h>
+#else
+#define MODULE_GLOBAL(_B) (_B)
+#endif
+
+#if defined(__Userspace__)
+#include <netinet/sctp_os_userspace.h>
+#endif
+
+#if defined(__APPLE__)
+#include <netinet/sctp_os_macosx.h>
+#endif
+
+#if defined(__Panda__)
+#include <ip/sctp/sctp_os_iox.h>
+#endif
+
+#if defined(__Windows__)
+#include <netinet/sctp_os_windows.h>
+#endif
+
+/* All os's must implement this address gatherer. If
+ * no VRF's exist, then vrf 0 is the only one and all
+ * addresses and ifn's live here.
+ */
+#define SCTP_DEFAULT_VRF 0
+void sctp_init_vrf_list(int vrfid);
+
+#endif
diff --git a/usrsctplib/netinet/sctp_os_userspace.h b/usrsctplib/netinet/sctp_os_userspace.h
new file mode 100755
index 0000000..fd3e0d7
--- /dev/null
+++ b/usrsctplib/netinet/sctp_os_userspace.h
@@ -0,0 +1,1163 @@
+/*-
+ * Copyright (c) 2006-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
+ * Copyright (c) 2008-2011, by Brad Penoff. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __sctp_os_userspace_h__
+#define __sctp_os_userspace_h__
+/*
+ * Userspace includes
+ * All the opt_xxx.h files are placed in the kernel build directory.
+ * We will place them in userspace stack build directory.
+ */
+
+#include <errno.h>
+
+#if defined(__Userspace_os_Windows)
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <iphlpapi.h>
+#include <Mswsock.h>
+#include <Windows.h>
+#include "user_environment.h"
+typedef CRITICAL_SECTION userland_mutex_t;
+#if WINVER < 0x0600
+enum {
+	C_SIGNAL = 0,
+	C_BROADCAST = 1,
+	C_MAX_EVENTS = 2
+};
+typedef struct
+{
+	u_int waiters_count;
+	CRITICAL_SECTION waiters_count_lock;
+	HANDLE events_[C_MAX_EVENTS];
+} userland_cond_t;
+void InitializeXPConditionVariable(userland_cond_t *);
+void DeleteXPConditionVariable(userland_cond_t *);
+int SleepXPConditionVariable(userland_cond_t *, userland_mutex_t *);
+void WakeAllXPConditionVariable(userland_cond_t *);
+#define InitializeConditionVariable(cond) InitializeXPConditionVariable(cond)
+#define DeleteConditionVariable(cond) DeleteXPConditionVariable(cond)
+#define SleepConditionVariableCS(cond, mtx, time) SleepXPConditionVariable(cond, mtx)
+#define WakeAllConditionVariable(cond) WakeAllXPConditionVariable(cond)
+#else
+#define DeleteConditionVariable(cond)
+typedef CONDITION_VARIABLE userland_cond_t;
+#endif
+typedef HANDLE userland_thread_t;
+#define ADDRESS_FAMILY	unsigned __int8
+#define IPVERSION  4
+#define MAXTTL     255
+/* VS2010 comes with stdint.h */
+#if _MSC_VER >= 1600
+#include <stdint.h>
+#else
+#define uint64_t   unsigned __int64
+#define uint32_t   unsigned __int32
+#define int32_t    __int32
+#define uint16_t   unsigned __int16
+#define int16_t    __int16
+#define uint8_t    unsigned __int8
+#define int8_t     __int8
+#endif
+#ifndef _SIZE_T_DEFINED
+#define size_t     __int32
+#endif
+#define u_long     unsigned __int64
+#define u_int      unsigned __int32
+#define u_int32_t  unsigned __int32
+#define u_int16_t  unsigned __int16
+#define u_int8_t   unsigned __int8
+#define u_char     unsigned char
+#define n_short    unsigned __int16
+#define u_short    unsigned __int16
+#define n_time     unsigned __int32
+#define sa_family_t unsigned __int8
+#define ssize_t    __int64
+#define __func__	__FUNCTION__
+
+#ifndef EWOULDBLOCK
+#define EWOULDBLOCK             WSAEWOULDBLOCK
+#endif
+#ifndef EINPROGRESS
+#define EINPROGRESS             WSAEINPROGRESS
+#endif
+#ifndef EALREADY
+#define EALREADY                WSAEALREADY
+#endif
+#ifndef ENOTSOCK
+#define ENOTSOCK                WSAENOTSOCK
+#endif
+#ifndef EDESTADDRREQ
+#define EDESTADDRREQ            WSAEDESTADDRREQ
+#endif
+#ifndef EMSGSIZE
+#define EMSGSIZE                WSAEMSGSIZE
+#endif
+#ifndef EPROTOTYPE
+#define EPROTOTYPE              WSAEPROTOTYPE
+#endif
+#ifndef ENOPROTOOPT
+#define ENOPROTOOPT             WSAENOPROTOOPT
+#endif
+#ifndef EPROTONOSUPPORT
+#define EPROTONOSUPPORT         WSAEPROTONOSUPPORT
+#endif
+#ifndef ESOCKTNOSUPPORT
+#define ESOCKTNOSUPPORT         WSAESOCKTNOSUPPORT
+#endif
+#ifndef EOPNOTSUPP
+#define EOPNOTSUPP              WSAEOPNOTSUPP
+#endif
+#ifndef ENOTSUP
+#define ENOTSUP                 WSAEOPNOTSUPP
+#endif
+#ifndef EPFNOSUPPORT
+#define EPFNOSUPPORT            WSAEPFNOSUPPORT
+#endif
+#ifndef EAFNOSUPPORT
+#define EAFNOSUPPORT            WSAEAFNOSUPPORT
+#endif
+#ifndef EADDRINUSE
+#define EADDRINUSE              WSAEADDRINUSE
+#endif
+#ifndef EADDRNOTAVAIL
+#define EADDRNOTAVAIL           WSAEADDRNOTAVAIL
+#endif
+#ifndef ENETDOWN
+#define ENETDOWN                WSAENETDOWN
+#endif
+#ifndef ENETUNREACH
+#define ENETUNREACH             WSAENETUNREACH
+#endif
+#ifndef ENETRESET
+#define ENETRESET               WSAENETRESET
+#endif
+#ifndef ECONNABORTED
+#define ECONNABORTED            WSAECONNABORTED
+#endif
+#ifndef ECONNRESET
+#define ECONNRESET              WSAECONNRESET
+#endif
+#ifndef ENOBUFS
+#define ENOBUFS                 WSAENOBUFS
+#endif
+#ifndef EISCONN
+#define EISCONN                 WSAEISCONN
+#endif
+#ifndef ENOTCONN
+#define ENOTCONN                WSAENOTCONN
+#endif
+#ifndef ESHUTDOWN
+#define ESHUTDOWN               WSAESHUTDOWN
+#endif
+#ifndef ETOOMANYREFS
+#define ETOOMANYREFS            WSAETOOMANYREFS
+#endif
+#ifndef ETIMEDOUT
+#define ETIMEDOUT               WSAETIMEDOUT
+#endif
+#ifndef ECONNREFUSED
+#define ECONNREFUSED            WSAECONNREFUSED
+#endif
+#ifndef ELOOP
+#define ELOOP                   WSAELOOP
+#endif
+#ifndef EHOSTDOWN
+#define EHOSTDOWN               WSAEHOSTDOWN
+#endif
+#ifndef EHOSTUNREACH
+#define EHOSTUNREACH            WSAEHOSTUNREACH
+#endif
+#ifndef EPROCLIM
+#define EPROCLIM                WSAEPROCLIM
+#endif
+#ifndef EUSERS
+#define EUSERS                  WSAEUSERS
+#endif
+#ifndef EDQUOT
+#define EDQUOT                  WSAEDQUOT
+#endif
+#ifndef ESTALE
+#define ESTALE                  WSAESTALE
+#endif
+#ifndef EREMOTE
+#define EREMOTE                 WSAEREMOTE
+#endif
+
+typedef char* caddr_t;
+
+#define bzero(buf, len) memset(buf, 0, len)
+#define bcopy(srcKey, dstKey, len) memcpy(dstKey, srcKey, len)
+#if _MSC_VER < 1900
+#define snprintf(data, size, format, ...) _snprintf_s(data, size, _TRUNCATE, format, __VA_ARGS__)
+#endif
+#define inline __inline
+#define __inline__ __inline
+#define	MSG_EOR		0x8		/* data completes record */
+#define	MSG_DONTWAIT	0x80		/* this message should be nonblocking */
+
+#ifdef CMSG_DATA
+#undef CMSG_DATA
+#endif
+/*
+ * The following definitions should apply iff WINVER < 0x0600
+ * but that check doesn't work in all cases. So be more pedantic...
+ */
+#define CMSG_DATA(x) WSA_CMSG_DATA(x)
+#define CMSG_ALIGN(x) WSA_CMSGDATA_ALIGN(x)
+#ifndef CMSG_FIRSTHDR
+#define CMSG_FIRSTHDR(x) WSA_CMSG_FIRSTHDR(x)
+#endif
+#ifndef CMSG_NXTHDR
+#define CMSG_NXTHDR(x, y) WSA_CMSG_NXTHDR(x, y)
+#endif
+#ifndef CMSG_SPACE
+#define CMSG_SPACE(x) WSA_CMSG_SPACE(x)
+#endif
+#ifndef CMSG_LEN
+#define CMSG_LEN(x) WSA_CMSG_LEN(x)
+#endif
+
+/****  from sctp_os_windows.h ***************/
+#define SCTP_IFN_IS_IFT_LOOP(ifn)	((ifn)->ifn_type == IFT_LOOP)
+#define SCTP_ROUTE_IS_REAL_LOOP(ro) ((ro)->ro_rt && (ro)->ro_rt->rt_ifa && (ro)->ro_rt->rt_ifa->ifa_ifp && (ro)->ro_rt->rt_ifa->ifa_ifp->if_type == IFT_LOOP)
+
+/*
+ * Access to IFN's to help with src-addr-selection
+ */
+/* This could return VOID if the index works but for BSD we provide both. */
+#define SCTP_GET_IFN_VOID_FROM_ROUTE(ro) \
+	((ro)->ro_rt != NULL ? (ro)->ro_rt->rt_ifp : NULL)
+#define SCTP_ROUTE_HAS_VALID_IFN(ro) \
+	((ro)->ro_rt && (ro)->ro_rt->rt_ifp)
+/******************************************/
+
+#define SCTP_GET_IF_INDEX_FROM_ROUTE(ro) 1 /* compiles...  TODO use routing socket to determine */
+
+#define BIG_ENDIAN 1
+#define LITTLE_ENDIAN 0
+#ifdef WORDS_BIGENDIAN
+#define BYTE_ORDER BIG_ENDIAN
+#else
+#define BYTE_ORDER LITTLE_ENDIAN
+#endif
+
+#else /* !defined(Userspace_os_Windows) */
+#include <sys/socket.h>
+#if defined(__Userspace_os_DragonFly) || defined(__Userspace_os_FreeBSD) || defined(__Userspace_os_Linux) || defined(__Userspace_os_NetBSD) || defined(__Userspace_os_OpenBSD) || defined(__Userspace_os_NaCl)
+#include <pthread.h>
+#endif
+typedef pthread_mutex_t userland_mutex_t;
+typedef pthread_cond_t userland_cond_t;
+typedef pthread_t userland_thread_t;
+#endif
+
+#if defined(__Userspace_os_Windows) || defined(__Userspace_os_NaCl)
+
+#define IFNAMSIZ 64
+
+#define random() rand()
+#define srandom(s) srand(s)
+
+#define timeradd(tvp, uvp, vvp)   \
+	do {                          \
+	    (vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec;  \
+		(vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec;  \
+		if ((vvp)->tv_usec >= 1000000) {                   \
+		    (vvp)->tv_sec++;                        \
+			(vvp)->tv_usec -= 1000000;             \
+		}                         \
+	} while (0)
+
+#define timersub(tvp, uvp, vvp)   \
+	do {                          \
+	    (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec;  \
+		(vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec;  \
+		if ((vvp)->tv_usec < 0) {                   \
+		    (vvp)->tv_sec--;                        \
+			(vvp)->tv_usec += 1000000;             \
+		}                       \
+	} while (0)
+
+/*#include <packon.h>
+#pragma pack(push, 1)*/
+struct ip {
+	u_char    ip_hl:4, ip_v:4;
+	u_char    ip_tos;
+	u_short   ip_len;
+	u_short   ip_id;
+	u_short   ip_off;
+#define IP_RP 0x8000
+#define IP_DF 0x4000
+#define IP_MF 0x2000
+#define IP_OFFMASK 0x1fff
+	u_char    ip_ttl;
+	u_char    ip_p;
+	u_short   ip_sum;
+	struct in_addr ip_src, ip_dst;
+};
+
+struct ifaddrs {
+	struct ifaddrs  *ifa_next;
+	char		*ifa_name;
+	unsigned int		 ifa_flags;
+	struct sockaddr	*ifa_addr;
+	struct sockaddr	*ifa_netmask;
+	struct sockaddr	*ifa_dstaddr;
+	void		*ifa_data;
+};
+
+struct udphdr {
+	uint16_t uh_sport;
+	uint16_t uh_dport;
+	uint16_t uh_ulen;
+	uint16_t uh_sum;
+};
+
+struct iovec {
+	size_t len;
+	char *buf;
+};
+
+#define iov_base buf
+#define iov_len	len
+
+struct ifa_msghdr {
+	uint16_t         ifam_msglen;
+	unsigned char    ifam_version;
+	unsigned char    ifam_type;
+	uint32_t         ifam_addrs;
+	uint32_t         ifam_flags;
+	uint16_t         ifam_index;
+	uint32_t         ifam_metric;
+};
+
+struct ifdevmtu {
+	int ifdm_current;
+	int ifdm_min;
+	int ifdm_max;
+};
+
+struct ifkpi {
+	unsigned int  ifk_module_id;
+	unsigned int  ifk_type;
+	union {
+		void *ifk_ptr;
+		int ifk_value;
+	} ifk_data;
+};
+
+struct ifreq {
+	char    ifr_name[16];
+	union {
+		struct sockaddr ifru_addr;
+		struct sockaddr ifru_dstaddr;
+		struct sockaddr ifru_broadaddr;
+		short  ifru_flags;
+		int ifru_metric;
+		int ifru_mtu;
+		int ifru_phys;
+		int ifru_media;
+		int    ifru_intval;
+		char*  ifru_data;
+		struct ifdevmtu ifru_devmtu;
+		struct ifkpi  ifru_kpi;
+		uint32_t ifru_wake_flags;
+	} ifr_ifru;
+#define ifr_addr        ifr_ifru.ifru_addr
+#define ifr_dstaddr     ifr_ifru.ifru_dstaddr
+#define ifr_broadaddr   ifr_ifru.ifru_broadaddr
+#define ifr_flags       ifr_ifru.ifru_flags[0]
+#define ifr_prevflags   ifr_ifru.ifru_flags[1]
+#define ifr_metric      ifr_ifru.ifru_metric
+#define ifr_mtu         ifr_ifru.ifru_mtu
+#define ifr_phys        ifr_ifru.ifru_phys
+#define ifr_media       ifr_ifru.ifru_media
+#define ifr_data        ifr_ifru.ifru_data
+#define ifr_devmtu      ifr_ifru.ifru_devmtu
+#define ifr_intval      ifr_ifru.ifru_intval
+#define ifr_kpi         ifr_ifru.ifru_kpi
+#define ifr_wake_flags  ifr_ifru.ifru_wake_flags
+};
+
+#endif
+
+#if defined(__Userspace_os_Windows)
+int Win_getifaddrs(struct ifaddrs**);
+#define getifaddrs(interfaces)  (int)Win_getifaddrs(interfaces)
+int win_if_nametoindex(const char *);
+#define if_nametoindex(x) win_if_nametoindex(x)
+#endif
+
+#define mtx_lock(arg1)
+#define mtx_unlock(arg1)
+#define mtx_assert(arg1,arg2)
+#define MA_OWNED 7 /* sys/mutex.h typically on FreeBSD */
+#if !defined(__Userspace_os_FreeBSD)
+struct mtx {int dummy;};
+#if !defined(__Userspace_os_NetBSD)
+struct selinfo {int dummy;};
+#endif
+struct sx {int dummy;};
+#endif
+
+#include <stdio.h>
+#include <string.h>
+/* #include <sys/param.h>  in FreeBSD defines MSIZE */
+/* #include <sys/ktr.h> */
+/* #include <sys/systm.h> */
+#if defined(HAVE_SYS_QUEUE_H)
+#include <sys/queue.h>
+#else
+#include <user_queue.h>
+#endif
+#include <user_malloc.h>
+/* #include <sys/kernel.h> */
+/* #include <sys/sysctl.h> */
+/* #include <sys/protosw.h> */
+/* on FreeBSD, this results in a redefintion of SOCK(BUF)_(UN)LOCK and
+ *  uknown type of struct mtx for sb_mtx in struct sockbuf */
+#include "user_socketvar.h" /* MALLOC_DECLARE's M_PCB. Replacement for sys/socketvar.h */
+/* #include <sys/jail.h> */
+/* #include <sys/sysctl.h> */
+#include <user_environment.h>
+#include <user_atomic.h>
+#include <user_mbuf.h>
+/* #include <sys/uio.h> */
+/* #include <sys/lock.h> */
+#if defined(__FreeBSD__) && __FreeBSD_version > 602000
+#include <sys/rwlock.h>
+#endif
+/* #include <sys/kthread.h> */
+#if defined(__FreeBSD__) && __FreeBSD_version > 602000
+#include <sys/priv.h>
+#endif
+/* #include <sys/random.h> */
+/* #include <sys/limits.h> */
+/* #include <machine/cpu.h> */
+
+#if defined(__Userspace_os_Darwin)
+/* was a 0 byte file.  needed for structs if_data(64) and net_event_data */
+#include <net/if_var.h>
+#endif
+#if defined(__Userspace_os_FreeBSD)
+#include <net/if_types.h>
+/* #include <net/if_var.h> was a 0 byte file.  causes struct mtx redefinition */
+#endif
+/* OOTB only - dummy route used at the moment. should we port route to
+ *  userspace as well? */
+/* on FreeBSD, this results in a redefintion of struct route */
+/* #include <net/route.h> */
+#if !defined(__Userspace_os_Windows) && !defined(__Userspace_os_NaCl)
+#include <net/if.h>
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+#endif
+#if defined(HAVE_NETINET_IP_ICMP_H)
+#include <netinet/ip_icmp.h>
+#else
+#include <user_ip_icmp.h>
+#endif
+/* #include <netinet/in_pcb.h> ported to userspace */
+#include <user_inpcb.h>
+
+/* for getifaddrs */
+#include <sys/types.h>
+#if !defined(__Userspace_os_Windows)
+#if defined(INET) || defined(INET6)
+#include <ifaddrs.h>
+#endif
+
+/* for ioctl */
+#include <sys/ioctl.h>
+
+/* for close, etc. */
+#include <unistd.h>
+#endif
+
+/* lots of errno's used and needed in userspace */
+
+/* for offsetof */
+#include <stddef.h>
+
+#if defined(SCTP_PROCESS_LEVEL_LOCKS) && !defined(__Userspace_os_Windows)
+/* for pthread_mutex_lock, pthread_mutex_unlock, etc. */
+#include <pthread.h>
+#endif
+
+#ifdef IPSEC
+#include <netipsec/ipsec.h>
+#include <netipsec/key.h>
+#endif				/* IPSEC */
+
+#ifdef INET6
+#if defined(__Userspace_os_FreeBSD)
+#include <sys/domain.h>
+#endif
+#ifdef IPSEC
+#include <netipsec/ipsec6.h>
+#endif
+#if !defined(__Userspace_os_Windows)
+#include <netinet/ip6.h>
+#endif
+#if defined(__Userspace_os_Darwin) || defined(__Userspace_os_FreeBSD) || defined(__Userspace_os_Linux) || defined(__Userspace_os_NetBSD) || defined(__Userspace_os_OpenBSD) || defined(__Userspace_os_Windows)
+#include "user_ip6_var.h"
+#else
+#include <netinet6/ip6_var.h>
+#endif
+#if defined(__Userspace_os_FreeBSD)
+#include <netinet6/in6_pcb.h>
+#include <netinet6/ip6protosw.h>
+/* #include <netinet6/nd6.h> was a 0 byte file */
+#include <netinet6/scope6_var.h>
+#endif
+#endif /* INET6 */
+
+#if defined(HAVE_SCTP_PEELOFF_SOCKOPT)
+#include <sys/file.h>
+#include <sys/filedesc.h>
+#endif
+
+#include "netinet/sctp_sha1.h"
+
+#if __FreeBSD_version >= 700000
+#include <netinet/ip_options.h>
+#endif
+
+#define SCTP_PRINTF(...)                                  \
+	if (SCTP_BASE_VAR(debug_printf)) {                \
+		SCTP_BASE_VAR(debug_printf)(__VA_ARGS__); \
+	}
+
+#if defined(__FreeBSD__)
+#ifndef in6pcb
+#define in6pcb		inpcb
+#endif
+#endif
+/* Declare all the malloc names for all the various mallocs */
+MALLOC_DECLARE(SCTP_M_MAP);
+MALLOC_DECLARE(SCTP_M_STRMI);
+MALLOC_DECLARE(SCTP_M_STRMO);
+MALLOC_DECLARE(SCTP_M_ASC_ADDR);
+MALLOC_DECLARE(SCTP_M_ASC_IT);
+MALLOC_DECLARE(SCTP_M_AUTH_CL);
+MALLOC_DECLARE(SCTP_M_AUTH_KY);
+MALLOC_DECLARE(SCTP_M_AUTH_HL);
+MALLOC_DECLARE(SCTP_M_AUTH_IF);
+MALLOC_DECLARE(SCTP_M_STRESET);
+MALLOC_DECLARE(SCTP_M_CMSG);
+MALLOC_DECLARE(SCTP_M_COPYAL);
+MALLOC_DECLARE(SCTP_M_VRF);
+MALLOC_DECLARE(SCTP_M_IFA);
+MALLOC_DECLARE(SCTP_M_IFN);
+MALLOC_DECLARE(SCTP_M_TIMW);
+MALLOC_DECLARE(SCTP_M_MVRF);
+MALLOC_DECLARE(SCTP_M_ITER);
+MALLOC_DECLARE(SCTP_M_SOCKOPT);
+
+#if defined(SCTP_LOCAL_TRACE_BUF)
+
+#define SCTP_GET_CYCLECOUNT get_cyclecount()
+#define SCTP_CTR6 sctp_log_trace
+
+#else
+#define SCTP_CTR6 CTR6
+#endif
+
+/* Empty ktr statement for _Userspace__ (similar to what is done for mac) */
+#define	CTR6(m, d, p1, p2, p3, p4, p5, p6)
+
+
+
+#define SCTP_BASE_INFO(__m) system_base_info.sctppcbinfo.__m
+#define SCTP_BASE_STATS system_base_info.sctpstat
+#define SCTP_BASE_STAT(__m)     system_base_info.sctpstat.__m
+#define SCTP_BASE_SYSCTL(__m) system_base_info.sctpsysctl.__m
+#define SCTP_BASE_VAR(__m) system_base_info.__m
+
+/*
+ *
+ */
+#if !defined(__Userspace_os_Darwin)
+#define USER_ADDR_NULL	(NULL)		/* FIX ME: temp */
+#endif
+
+#if defined(SCTP_DEBUG)
+#include <netinet/sctp_constants.h>
+#define SCTPDBG(level, ...)					\
+{								\
+	do {							\
+		if (SCTP_BASE_SYSCTL(sctp_debug_on) & level) {	\
+			SCTP_PRINTF(__VA_ARGS__);		\
+		}						\
+	} while (0);						\
+}
+#define SCTPDBG_ADDR(level, addr)				\
+{								\
+	do {							\
+		if (SCTP_BASE_SYSCTL(sctp_debug_on) & level ) {	\
+		    sctp_print_address(addr);			\
+		}						\
+	} while (0);						\
+}
+#else
+#define SCTPDBG(level, ...)
+#define SCTPDBG_ADDR(level, addr)
+#endif
+
+#ifdef SCTP_LTRACE_CHUNKS
+#define SCTP_LTRACE_CHK(a, b, c, d) if(sctp_logging_level & SCTP_LTRACE_CHUNK_ENABLE) CTR6(KTR_SUBSYS, "SCTP:%d[%d]:%x-%x-%x-%x", SCTP_LOG_CHUNK_PROC, 0, a, b, c, d)
+#else
+#define SCTP_LTRACE_CHK(a, b, c, d)
+#endif
+
+#ifdef SCTP_LTRACE_ERRORS
+#define SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, file, err) \
+	if (sctp_logging_level & SCTP_LTRACE_ERROR_ENABLE) \
+		SCTP_PRINTF("mbuf:%p inp:%p stcb:%p net:%p file:%x line:%d error:%d\n", \
+		            (void *)m, (void *)inp, (void *)stcb, (void *)net, file, __LINE__, err);
+#define SCTP_LTRACE_ERR_RET(inp, stcb, net, file, err) \
+	if (sctp_logging_level & SCTP_LTRACE_ERROR_ENABLE) \
+		SCTP_PRINTF("inp:%p stcb:%p net:%p file:%x line:%d error:%d\n", \
+		            (void *)inp, (void *)stcb, (void *)net, file, __LINE__, err);
+#else
+#define SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, file, err)
+#define SCTP_LTRACE_ERR_RET(inp, stcb, net, file, err)
+#endif
+
+
+/*
+ * Local address and interface list handling
+ */
+#define SCTP_MAX_VRF_ID		0
+#define SCTP_SIZE_OF_VRF_HASH	3
+#define SCTP_IFNAMSIZ		IFNAMSIZ
+#define SCTP_DEFAULT_VRFID	0
+#define SCTP_VRF_ADDR_HASH_SIZE	16
+#define SCTP_VRF_IFN_HASH_SIZE	3
+#define	SCTP_INIT_VRF_TABLEID(vrf)
+
+#if !defined(__Userspace_os_Windows)
+#define SCTP_IFN_IS_IFT_LOOP(ifn) (strncmp((ifn)->ifn_name, "lo", 2) == 0)
+/* BSD definition */
+/* #define SCTP_ROUTE_IS_REAL_LOOP(ro) ((ro)->ro_rt && (ro)->ro_rt->rt_ifa && (ro)->ro_rt->rt_ifa->ifa_ifp && (ro)->ro_rt->rt_ifa->ifa_ifp->if_type == IFT_LOOP) */
+/* only used in IPv6 scenario, which isn't supported yet */
+#define SCTP_ROUTE_IS_REAL_LOOP(ro) 0
+
+/*
+ * Access to IFN's to help with src-addr-selection
+ */
+/* This could return VOID if the index works but for BSD we provide both. */
+#define SCTP_GET_IFN_VOID_FROM_ROUTE(ro) (void *)ro->ro_rt->rt_ifp
+#define SCTP_GET_IF_INDEX_FROM_ROUTE(ro) 1 /* compiles...  TODO use routing socket to determine */
+#define SCTP_ROUTE_HAS_VALID_IFN(ro) ((ro)->ro_rt && (ro)->ro_rt->rt_ifp)
+#endif
+
+/*
+ * general memory allocation
+ */
+#define SCTP_MALLOC(var, type, size, name)				\
+	do {								\
+		MALLOC(var, type, size, name, M_NOWAIT);		\
+	} while (0)
+
+#define SCTP_FREE(var, type)	FREE(var, type)
+
+#define SCTP_MALLOC_SONAME(var, type, size)				\
+	do {								\
+		MALLOC(var, type, size, M_SONAME, (M_WAITOK | M_ZERO));	\
+	} while (0)
+
+#define SCTP_FREE_SONAME(var)	FREE(var, M_SONAME)
+
+#define SCTP_PROCESS_STRUCT struct proc *
+
+/*
+ * zone allocation functions
+ */
+
+
+#if defined(SCTP_SIMPLE_ALLOCATOR)
+/*typedef size_t sctp_zone_t;*/
+#define SCTP_ZONE_INIT(zone, name, size, number) { \
+	zone = size; \
+}
+
+/* __Userspace__ SCTP_ZONE_GET: allocate element from the zone */
+#define SCTP_ZONE_GET(zone, type)  \
+        (type *)malloc(zone);
+
+
+/* __Userspace__ SCTP_ZONE_FREE: free element from the zone */
+#define SCTP_ZONE_FREE(zone, element) { \
+	free(element);  \
+}
+
+#define SCTP_ZONE_DESTROY(zone)
+#else
+/*__Userspace__
+  Compiling & linking notes: Needs libumem, which has been placed in ./user_lib
+  All userspace header files are in ./user_include. Makefile will need the
+  following.
+  CFLAGS = -I./ -Wall
+  LDFLAGS = -L./user_lib -R./user_lib -lumem
+*/
+#include "user_include/umem.h"
+
+/* __Userspace__ SCTP_ZONE_INIT: initialize the zone */
+/*
+  __Userspace__
+  No equivalent function to uma_zone_set_max added yet. (See SCTP_ZONE_INIT in sctp_os_bsd.h
+  for reference). It may not be required as mentioned in
+  http://nixdoc.net/man-pages/FreeBSD/uma_zalloc.9.html that
+  max limits may not enforced on systems with more than one CPU.
+*/
+#define SCTP_ZONE_INIT(zone, name, size, number) { \
+	zone = umem_cache_create(name, size, 0, NULL, NULL, NULL, NULL, NULL, 0); \
+  }
+
+/* __Userspace__ SCTP_ZONE_GET: allocate element from the zone */
+#define SCTP_ZONE_GET(zone, type) \
+        (type *)umem_cache_alloc(zone, UMEM_DEFAULT);
+
+
+/* __Userspace__ SCTP_ZONE_FREE: free element from the zone */
+#define SCTP_ZONE_FREE(zone, element) \
+	umem_cache_free(zone, element);
+
+
+/* __Userspace__ SCTP_ZONE_DESTROY: destroy the zone */
+#define SCTP_ZONE_DESTROY(zone) \
+	umem_cache_destroy(zone);
+#endif
+
+/*
+ * __Userspace__ Defining sctp_hashinit_flags() and sctp_hashdestroy() for userland.
+ */
+void *sctp_hashinit_flags(int elements, struct malloc_type *type,
+                    u_long *hashmask, int flags);
+void
+sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask);
+
+void
+sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask);
+
+
+#define HASH_NOWAIT 0x00000001
+#define HASH_WAITOK 0x00000002
+
+/* M_PCB is MALLOC_DECLARE'd in sys/socketvar.h */
+#define SCTP_HASH_INIT(size, hashmark) sctp_hashinit_flags(size, M_PCB, hashmark, HASH_NOWAIT)
+
+#define SCTP_HASH_FREE(table, hashmark) sctp_hashdestroy(table, M_PCB, hashmark)
+
+#define SCTP_HASH_FREE_DESTROY(table, hashmark)  sctp_hashfreedestroy(table, M_PCB, hashmark)
+#define SCTP_M_COPYM	m_copym
+
+/*
+ * timers
+ */
+/* __Userspace__
+ * user_sctp_callout.h has typedef struct sctp_callout sctp_os_timer_t;
+ * which is used in the timer related functions such as
+ * SCTP_OS_TIMER_INIT etc.
+*/
+#include <netinet/sctp_callout.h>
+
+/* __Userspace__ Creating a receive thread */
+#include <user_recv_thread.h>
+
+/*__Userspace__ defining KTR_SUBSYS 1 as done in sctp_os_macosx.h */
+#define KTR_SUBSYS 1
+
+/* The packed define for 64 bit platforms */
+#if !defined(__Userspace_os_Windows)
+#define SCTP_PACKED __attribute__((packed))
+#define SCTP_UNUSED __attribute__((unused))
+#else
+#define SCTP_PACKED
+#define SCTP_UNUSED
+#endif
+
+/*
+ * Functions
+ */
+/* Mbuf manipulation and access macros  */
+#define SCTP_BUF_LEN(m) (m->m_len)
+#define SCTP_BUF_NEXT(m) (m->m_next)
+#define SCTP_BUF_NEXT_PKT(m) (m->m_nextpkt)
+#define SCTP_BUF_RESV_UF(m, size) m->m_data += size
+#define SCTP_BUF_AT(m, size) m->m_data + size
+#define SCTP_BUF_IS_EXTENDED(m) (m->m_flags & M_EXT)
+#define SCTP_BUF_EXTEND_SIZE(m) (m->m_ext.ext_size)
+#define SCTP_BUF_TYPE(m) (m->m_type)
+#define SCTP_BUF_RECVIF(m) (m->m_pkthdr.rcvif)
+#define SCTP_BUF_PREPEND	M_PREPEND
+
+#define SCTP_ALIGN_TO_END(m, len) if(m->m_flags & M_PKTHDR) { \
+                                     MH_ALIGN(m, len); \
+                                  } else if ((m->m_flags & M_EXT) == 0) { \
+                                     M_ALIGN(m, len); \
+                                  }
+
+/* We make it so if you have up to 4 threads
+ * writting based on the default size of
+ * the packet log 65 k, that would be
+ * 4 16k packets before we would hit
+ * a problem.
+ */
+#define SCTP_PKTLOG_WRITERS_NEED_LOCK 3
+
+
+/*
+ * routes, output, etc.
+ */
+
+typedef struct sctp_route	sctp_route_t;
+typedef struct sctp_rtentry	sctp_rtentry_t;
+
+static inline void sctp_userspace_rtalloc(sctp_route_t *ro)
+{
+	if (ro->ro_rt != NULL) {
+		ro->ro_rt->rt_refcnt++;
+		return;
+	}
+
+	ro->ro_rt = (sctp_rtentry_t *) malloc(sizeof(sctp_rtentry_t));
+	if (ro->ro_rt == NULL)
+		return;
+
+	/* initialize */
+	memset(ro->ro_rt, 0, sizeof(sctp_rtentry_t));
+	ro->ro_rt->rt_refcnt = 1;
+
+	/* set MTU */
+	/* TODO set this based on the ro->ro_dst, looking up MTU with routing socket */
+#if 0
+	if (userspace_rawroute == -1) {
+		userspace_rawroute = socket(AF_ROUTE, SOCK_RAW, 0);
+		if (userspace_rawroute == -1)
+			return;
+	}
+#endif
+	ro->ro_rt->rt_rmx.rmx_mtu = 1500; /* FIXME temporary solution */
+
+	/* TODO enable the ability to obtain interface index of route for
+	 *  SCTP_GET_IF_INDEX_FROM_ROUTE macro.
+	 */
+}
+#define SCTP_RTALLOC(ro, vrf_id, fibnum) sctp_userspace_rtalloc((sctp_route_t *)ro)
+
+/* dummy rtfree needed once user_route.h is included */
+static inline void sctp_userspace_rtfree(sctp_rtentry_t *rt)
+{
+	if(rt == NULL) {
+		return;
+	}
+	if(--rt->rt_refcnt > 0) {
+		return;
+	}
+	free(rt);
+	rt = NULL;
+}
+#define rtfree(arg1) sctp_userspace_rtfree(arg1)
+
+
+/*************************/
+/*      MTU              */
+/*************************/
+int sctp_userspace_get_mtu_from_ifn(uint32_t if_index, int af);
+
+#define SCTP_GATHER_MTU_FROM_IFN_INFO(ifn, ifn_index, af) sctp_userspace_get_mtu_from_ifn(ifn_index, af)
+
+#define SCTP_GATHER_MTU_FROM_ROUTE(sctp_ifa, sa, rt) ((rt != NULL) ? rt->rt_rmx.rmx_mtu : 0)
+
+#define SCTP_GATHER_MTU_FROM_INTFC(sctp_ifn)  sctp_userspace_get_mtu_from_ifn(if_nametoindex(((struct ifaddrs *) (sctp_ifn))->ifa_name), AF_INET)
+
+#define SCTP_SET_MTU_OF_ROUTE(sa, rt, mtu) do { \
+                                              if (rt != NULL) \
+                                                 rt->rt_rmx.rmx_mtu = mtu; \
+                                           } while(0)
+
+/* (de-)register interface event notifications */
+#define SCTP_REGISTER_INTERFACE(ifhandle, af)
+#define SCTP_DEREGISTER_INTERFACE(ifhandle, af)
+
+
+/*************************/
+/* These are for logging */
+/*************************/
+/* return the base ext data pointer */
+#define SCTP_BUF_EXTEND_BASE(m) (m->m_ext.ext_buf)
+ /* return the refcnt of the data pointer */
+#define SCTP_BUF_EXTEND_REFCNT(m) (*m->m_ext.ref_cnt)
+/* return any buffer related flags, this is
+ * used beyond logging for apple only.
+ */
+#define SCTP_BUF_GET_FLAGS(m) (m->m_flags)
+
+/* For BSD this just accesses the M_PKTHDR length
+ * so it operates on an mbuf with hdr flag. Other
+ * O/S's may have seperate packet header and mbuf
+ * chain pointers.. thus the macro.
+ */
+#define SCTP_HEADER_TO_CHAIN(m) (m)
+#define SCTP_DETACH_HEADER_FROM_CHAIN(m)
+#define SCTP_HEADER_LEN(m) ((m)->m_pkthdr.len)
+#define SCTP_GET_HEADER_FOR_OUTPUT(o_pak) 0
+#define SCTP_RELEASE_HEADER(m)
+#define SCTP_RELEASE_PKT(m)	sctp_m_freem(m)
+
+#define SCTP_GET_PKT_VRFID(m, vrf_id)  ((vrf_id = SCTP_DEFAULT_VRFID) != SCTP_DEFAULT_VRFID)
+
+
+
+/* Attach the chain of data into the sendable packet. */
+#define SCTP_ATTACH_CHAIN(pak, m, packet_length) do { \
+                                                  pak = m; \
+                                                  pak->m_pkthdr.len = packet_length; \
+                          } while(0)
+
+/* Other m_pkthdr type things */
+/* FIXME need real definitions */
+#define SCTP_IS_IT_BROADCAST(dst, m) 0
+/* OOTB only #define SCTP_IS_IT_BROADCAST(dst, m) ((m->m_flags & M_PKTHDR) ? in_broadcast(dst, m->m_pkthdr.rcvif) : 0)  BSD def */
+#define SCTP_IS_IT_LOOPBACK(m) 0
+/* OOTB ONLY #define SCTP_IS_IT_LOOPBACK(m) ((m->m_flags & M_PKTHDR) && ((m->m_pkthdr.rcvif == NULL) || (m->m_pkthdr.rcvif->if_type == IFT_LOOP)))  BSD def */
+
+
+/* This converts any input packet header
+ * into the chain of data holders, for BSD
+ * its a NOP.
+ */
+
+/* get the v6 hop limit */
+#define SCTP_GET_HLIM(inp, ro) 128 /* As done for __Windows__ */
+#define IPv6_HOP_LIMIT 128
+
+/* is the endpoint v6only? */
+#define SCTP_IPV6_V6ONLY(inp)	(((struct inpcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY)
+/* is the socket non-blocking? */
+#define SCTP_SO_IS_NBIO(so)	((so)->so_state & SS_NBIO)
+#define SCTP_SET_SO_NBIO(so)	((so)->so_state |= SS_NBIO)
+#define SCTP_CLEAR_SO_NBIO(so)	((so)->so_state &= ~SS_NBIO)
+/* get the socket type */
+#define SCTP_SO_TYPE(so)	((so)->so_type)
+
+/* reserve sb space for a socket */
+#define SCTP_SORESERVE(so, send, recv)	soreserve(so, send, recv)
+
+/* wakeup a socket */
+#define SCTP_SOWAKEUP(so)	wakeup(&(so)->so_timeo, so)
+/* clear the socket buffer state */
+#define SCTP_SB_CLEAR(sb)	\
+	(sb).sb_cc = 0;		\
+	(sb).sb_mb = NULL;	\
+	(sb).sb_mbcnt = 0;
+
+#define SCTP_SB_LIMIT_RCV(so) so->so_rcv.sb_hiwat
+#define SCTP_SB_LIMIT_SND(so) so->so_snd.sb_hiwat
+
+/* Future zero copy wakeup/send  function */
+#define SCTP_ZERO_COPY_EVENT(inp, so)
+/* This is re-pulse ourselves for sendbuf */
+#define SCTP_ZERO_COPY_SENDQ_EVENT(inp, so)
+
+#define SCTP_READ_RANDOM(buf, len)	read_random(buf, len)
+
+#define SCTP_SHA1_CTX		struct sctp_sha1_context
+#define SCTP_SHA1_INIT		sctp_sha1_init
+#define SCTP_SHA1_UPDATE	sctp_sha1_update
+#define SCTP_SHA1_FINAL(x,y)	sctp_sha1_final((unsigned char *)x, y)
+
+/* start OOTB only stuff */
+/* TODO IFT_LOOP is in net/if_types.h on Linux */
+#define IFT_LOOP 0x18
+
+/* sctp_pcb.h */
+
+#if defined(__Userspace_os_Windows)
+#define SHUT_RD 1
+#define SHUT_WR 2
+#define SHUT_RDWR 3
+#endif
+#define PRU_FLUSH_RD SHUT_RD
+#define PRU_FLUSH_WR SHUT_WR
+#define PRU_FLUSH_RDWR SHUT_RDWR
+
+/* netinet/ip_var.h defintions are behind an if defined for _KERNEL on FreeBSD */
+#define	IP_RAWOUTPUT		0x2
+
+
+/* end OOTB only stuff */
+
+#define AF_CONN 123
+struct sockaddr_conn {
+#ifdef HAVE_SCONN_LEN
+	uint8_t sconn_len;
+	uint8_t sconn_family;
+#else
+	uint16_t sconn_family;
+#endif
+	uint16_t sconn_port;
+	void *sconn_addr;
+};
+
+typedef void *(*start_routine_t)(void *);
+
+extern int
+sctp_userspace_thread_create(userland_thread_t *thread, start_routine_t start_routine);
+
+void
+sctp_userspace_set_threadname(const char *name);
+
+/*
+ * SCTP protocol specific mbuf flags.
+ */
+#define	M_NOTIFICATION		M_PROTO5	/* SCTP notification */
+
+/*
+ * IP output routines
+ */
+
+/* Defining SCTP_IP_ID macro.
+   In netinet/ip_output.c, we have u_short ip_id;
+   In netinet/ip_var.h, we have extern u_short	ip_id; (enclosed within _KERNEL_)
+   See static __inline uint16_t ip_newid(void) in netinet/ip_var.h
+ */
+#define SCTP_IP_ID(inp) (ip_id)
+
+/* need sctphdr to get port in SCTP_IP_OUTPUT. sctphdr defined in sctp.h  */
+#include <netinet/sctp.h>
+extern void sctp_userspace_ip_output(int *result, struct mbuf *o_pak,
+                                     sctp_route_t *ro, void *stcb,
+                                     uint32_t vrf_id);
+
+#define SCTP_IP_OUTPUT(result, o_pak, ro, stcb, vrf_id) sctp_userspace_ip_output(&result, o_pak, ro, stcb, vrf_id);
+
+#if defined(INET6)
+extern void sctp_userspace_ip6_output(int *result, struct mbuf *o_pak,
+                                      struct route_in6 *ro, void *stcb,
+                                      uint32_t vrf_id);
+#define SCTP_IP6_OUTPUT(result, o_pak, ro, ifp, stcb, vrf_id) sctp_userspace_ip6_output(&result, o_pak, ro, stcb, vrf_id);
+#endif
+
+
+
+#if 0
+#define SCTP_IP6_OUTPUT(result, o_pak, ro, ifp, stcb, vrf_id) \
+{ \
+	if (stcb && stcb->sctp_ep) \
+		result = ip6_output(o_pak, \
+				    ((struct in6pcb *)(stcb->sctp_ep))->in6p_outputopts, \
+				    (ro), 0, 0, ifp, NULL); \
+	else \
+		result = ip6_output(o_pak, NULL, (ro), 0, 0, ifp, NULL); \
+}
+#endif
+
+struct mbuf *
+sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header, int how, int allonebuf, int type);
+
+
+/* with the current included files, this is defined in Linux but
+ *  in FreeBSD, it is behind a _KERNEL in sys/socket.h ...
+ */
+#if defined(__Userspace_os_DragonFly) || defined(__Userspace_os_FreeBSD) || defined(__Userspace_os_OpenBSD) || defined(__Userspace_os_NaCl)
+/* stolen from /usr/include/sys/socket.h */
+#define CMSG_ALIGN(n)   _ALIGN(n)
+#elif defined(__Userspace_os_NetBSD)
+#define CMSG_ALIGN(n)   (((n) + __ALIGNBYTES) & ~__ALIGNBYTES)
+#elif defined(__Userspace_os_Darwin)
+#if !defined(__DARWIN_ALIGNBYTES)
+#define	__DARWIN_ALIGNBYTES	(sizeof(__darwin_size_t) - 1)
+#endif
+
+#if !defined(__DARWIN_ALIGN)
+#define	__DARWIN_ALIGN(p)	((__darwin_size_t)((char *)(uintptr_t)(p) + __DARWIN_ALIGNBYTES) &~ __DARWIN_ALIGNBYTES)
+#endif
+
+#if !defined(__DARWIN_ALIGNBYTES32)
+#define __DARWIN_ALIGNBYTES32     (sizeof(__uint32_t) - 1)
+#endif
+
+#if !defined(__DARWIN_ALIGN32)
+#define __DARWIN_ALIGN32(p)       ((__darwin_size_t)((char *)(uintptr_t)(p) + __DARWIN_ALIGNBYTES32) &~ __DARWIN_ALIGNBYTES32)
+#endif
+#define CMSG_ALIGN(n)   __DARWIN_ALIGN32(n)
+#endif
+#define I_AM_HERE \
+                do { \
+			SCTP_PRINTF("%s:%d at %s\n", __FILE__, __LINE__ , __func__); \
+		} while (0)
+
+#ifndef timevalsub
+#define timevalsub(tp1, tp2)                       \
+	do {                                       \
+		(tp1)->tv_sec -= (tp2)->tv_sec;    \
+		(tp1)->tv_usec -= (tp2)->tv_usec;  \
+		if ((tp1)->tv_usec < 0) {          \
+			(tp1)->tv_sec--;           \
+			(tp1)->tv_usec += 1000000; \
+		}                                  \
+	} while (0)
+#endif
+
+#if defined(__Userspace_os_Linux)
+#if !defined(TAILQ_FOREACH_SAFE)
+#define TAILQ_FOREACH_SAFE(var, head, field, tvar)             \
+         for ((var) = ((head)->tqh_first);                     \
+              (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
+              (var) = (tvar))
+#endif
+#if !defined(LIST_FOREACH_SAFE)
+#define LIST_FOREACH_SAFE(var, head, field, tvar)              \
+         for ((var) = ((head)->lh_first);                      \
+              (var) && ((tvar) = LIST_NEXT((var), field), 1);  \
+              (var) = (tvar))
+#endif
+#endif
+#if defined(__Userspace_os_DragonFly)
+#define TAILQ_FOREACH_SAFE TAILQ_FOREACH_MUTABLE
+#define LIST_FOREACH_SAFE LIST_FOREACH_MUTABLE
+#endif
+
+#if defined(__Userspace_os_NaCl)
+#define	timercmp(tvp, uvp, cmp)						\
+	(((tvp)->tv_sec == (uvp)->tv_sec) ?				\
+	    ((tvp)->tv_usec cmp (uvp)->tv_usec) :			\
+	    ((tvp)->tv_sec cmp (uvp)->tv_sec))
+#endif
+
+#endif
diff --git a/usrsctplib/netinet/sctp_output.c b/usrsctplib/netinet/sctp_output.c
new file mode 100755
index 0000000..dc4b908
--- /dev/null
+++ b/usrsctplib/netinet/sctp_output.c
@@ -0,0 +1,15009 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 310642 2016-12-27 22:14:41Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#ifdef __FreeBSD__
+#include <sys/proc.h>
+#endif
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_bsd_addr.h>
+#include <netinet/sctp_input.h>
+#include <netinet/sctp_crc32.h>
+#if defined(__Userspace_os_Linux)
+#define __FAVOR_BSD    /* (on Ubuntu at least) enables UDP header field names like BSD in RFC 768 */
+#endif
+#if defined(INET) || defined(INET6)
+#if !defined(__Userspace_os_Windows)
+#include <netinet/udp.h>
+#endif
+#endif
+#if defined(__APPLE__)
+#include <netinet/in.h>
+#endif
+#if defined(__FreeBSD__)
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+#include <netinet/udp_var.h>
+#endif
+#include <machine/in_cksum.h>
+#endif
+#if defined(__Userspace__) && defined(INET6)
+#include <netinet6/sctp6_var.h>
+#endif
+
+#if defined(__APPLE__)
+#define APPLE_FILE_NO 3
+#endif
+
+#if defined(__APPLE__)
+#if !(defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD))
+#define SCTP_MAX_LINKHDR 16
+#endif
+#endif
+
+#define SCTP_MAX_GAPS_INARRAY 4
+struct sack_track {
+	uint8_t right_edge;	/* mergable on the right edge */
+	uint8_t left_edge;	/* mergable on the left edge */
+	uint8_t num_entries;
+	uint8_t spare;
+	struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
+};
+
+const struct sack_track sack_array[256] = {
+	{0, 0, 0, 0,		/* 0x00 */
+		{{0, 0},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 1, 0,		/* 0x01 */
+		{{0, 0},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x02 */
+		{{1, 1},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 1, 0,		/* 0x03 */
+		{{0, 1},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x04 */
+		{{2, 2},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x05 */
+		{{0, 0},
+		{2, 2},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x06 */
+		{{1, 2},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 1, 0,		/* 0x07 */
+		{{0, 2},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x08 */
+		{{3, 3},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x09 */
+		{{0, 0},
+		{3, 3},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x0a */
+		{{1, 1},
+		{3, 3},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x0b */
+		{{0, 1},
+		{3, 3},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x0c */
+		{{2, 3},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x0d */
+		{{0, 0},
+		{2, 3},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x0e */
+		{{1, 3},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 1, 0,		/* 0x0f */
+		{{0, 3},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x10 */
+		{{4, 4},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x11 */
+		{{0, 0},
+		{4, 4},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x12 */
+		{{1, 1},
+		{4, 4},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x13 */
+		{{0, 1},
+		{4, 4},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x14 */
+		{{2, 2},
+		{4, 4},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x15 */
+		{{0, 0},
+		{2, 2},
+		{4, 4},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x16 */
+		{{1, 2},
+		{4, 4},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x17 */
+		{{0, 2},
+		{4, 4},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x18 */
+		{{3, 4},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x19 */
+		{{0, 0},
+		{3, 4},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x1a */
+		{{1, 1},
+		{3, 4},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x1b */
+		{{0, 1},
+		{3, 4},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x1c */
+		{{2, 4},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x1d */
+		{{0, 0},
+		{2, 4},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x1e */
+		{{1, 4},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 1, 0,		/* 0x1f */
+		{{0, 4},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x20 */
+		{{5, 5},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x21 */
+		{{0, 0},
+		{5, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x22 */
+		{{1, 1},
+		{5, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x23 */
+		{{0, 1},
+		{5, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x24 */
+		{{2, 2},
+		{5, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x25 */
+		{{0, 0},
+		{2, 2},
+		{5, 5},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x26 */
+		{{1, 2},
+		{5, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x27 */
+		{{0, 2},
+		{5, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x28 */
+		{{3, 3},
+		{5, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x29 */
+		{{0, 0},
+		{3, 3},
+		{5, 5},
+		{0, 0}
+		}
+	},
+	{0, 0, 3, 0,		/* 0x2a */
+		{{1, 1},
+		{3, 3},
+		{5, 5},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x2b */
+		{{0, 1},
+		{3, 3},
+		{5, 5},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x2c */
+		{{2, 3},
+		{5, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x2d */
+		{{0, 0},
+		{2, 3},
+		{5, 5},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x2e */
+		{{1, 3},
+		{5, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x2f */
+		{{0, 3},
+		{5, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x30 */
+		{{4, 5},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x31 */
+		{{0, 0},
+		{4, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x32 */
+		{{1, 1},
+		{4, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x33 */
+		{{0, 1},
+		{4, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x34 */
+		{{2, 2},
+		{4, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x35 */
+		{{0, 0},
+		{2, 2},
+		{4, 5},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x36 */
+		{{1, 2},
+		{4, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x37 */
+		{{0, 2},
+		{4, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x38 */
+		{{3, 5},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x39 */
+		{{0, 0},
+		{3, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x3a */
+		{{1, 1},
+		{3, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x3b */
+		{{0, 1},
+		{3, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x3c */
+		{{2, 5},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x3d */
+		{{0, 0},
+		{2, 5},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x3e */
+		{{1, 5},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 1, 0,		/* 0x3f */
+		{{0, 5},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x40 */
+		{{6, 6},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x41 */
+		{{0, 0},
+		{6, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x42 */
+		{{1, 1},
+		{6, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x43 */
+		{{0, 1},
+		{6, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x44 */
+		{{2, 2},
+		{6, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x45 */
+		{{0, 0},
+		{2, 2},
+		{6, 6},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x46 */
+		{{1, 2},
+		{6, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x47 */
+		{{0, 2},
+		{6, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x48 */
+		{{3, 3},
+		{6, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x49 */
+		{{0, 0},
+		{3, 3},
+		{6, 6},
+		{0, 0}
+		}
+	},
+	{0, 0, 3, 0,		/* 0x4a */
+		{{1, 1},
+		{3, 3},
+		{6, 6},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x4b */
+		{{0, 1},
+		{3, 3},
+		{6, 6},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x4c */
+		{{2, 3},
+		{6, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x4d */
+		{{0, 0},
+		{2, 3},
+		{6, 6},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x4e */
+		{{1, 3},
+		{6, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x4f */
+		{{0, 3},
+		{6, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x50 */
+		{{4, 4},
+		{6, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x51 */
+		{{0, 0},
+		{4, 4},
+		{6, 6},
+		{0, 0}
+		}
+	},
+	{0, 0, 3, 0,		/* 0x52 */
+		{{1, 1},
+		{4, 4},
+		{6, 6},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x53 */
+		{{0, 1},
+		{4, 4},
+		{6, 6},
+		{0, 0}
+		}
+	},
+	{0, 0, 3, 0,		/* 0x54 */
+		{{2, 2},
+		{4, 4},
+		{6, 6},
+		{0, 0}
+		}
+	},
+	{1, 0, 4, 0,		/* 0x55 */
+		{{0, 0},
+		{2, 2},
+		{4, 4},
+		{6, 6}
+		}
+	},
+	{0, 0, 3, 0,		/* 0x56 */
+		{{1, 2},
+		{4, 4},
+		{6, 6},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x57 */
+		{{0, 2},
+		{4, 4},
+		{6, 6},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x58 */
+		{{3, 4},
+		{6, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x59 */
+		{{0, 0},
+		{3, 4},
+		{6, 6},
+		{0, 0}
+		}
+	},
+	{0, 0, 3, 0,		/* 0x5a */
+		{{1, 1},
+		{3, 4},
+		{6, 6},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x5b */
+		{{0, 1},
+		{3, 4},
+		{6, 6},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x5c */
+		{{2, 4},
+		{6, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x5d */
+		{{0, 0},
+		{2, 4},
+		{6, 6},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x5e */
+		{{1, 4},
+		{6, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x5f */
+		{{0, 4},
+		{6, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x60 */
+		{{5, 6},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x61 */
+		{{0, 0},
+		{5, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x62 */
+		{{1, 1},
+		{5, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x63 */
+		{{0, 1},
+		{5, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x64 */
+		{{2, 2},
+		{5, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x65 */
+		{{0, 0},
+		{2, 2},
+		{5, 6},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x66 */
+		{{1, 2},
+		{5, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x67 */
+		{{0, 2},
+		{5, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x68 */
+		{{3, 3},
+		{5, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x69 */
+		{{0, 0},
+		{3, 3},
+		{5, 6},
+		{0, 0}
+		}
+	},
+	{0, 0, 3, 0,		/* 0x6a */
+		{{1, 1},
+		{3, 3},
+		{5, 6},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x6b */
+		{{0, 1},
+		{3, 3},
+		{5, 6},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x6c */
+		{{2, 3},
+		{5, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x6d */
+		{{0, 0},
+		{2, 3},
+		{5, 6},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x6e */
+		{{1, 3},
+		{5, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x6f */
+		{{0, 3},
+		{5, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x70 */
+		{{4, 6},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x71 */
+		{{0, 0},
+		{4, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x72 */
+		{{1, 1},
+		{4, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x73 */
+		{{0, 1},
+		{4, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x74 */
+		{{2, 2},
+		{4, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 3, 0,		/* 0x75 */
+		{{0, 0},
+		{2, 2},
+		{4, 6},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x76 */
+		{{1, 2},
+		{4, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x77 */
+		{{0, 2},
+		{4, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x78 */
+		{{3, 6},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x79 */
+		{{0, 0},
+		{3, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 2, 0,		/* 0x7a */
+		{{1, 1},
+		{3, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x7b */
+		{{0, 1},
+		{3, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x7c */
+		{{2, 6},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 2, 0,		/* 0x7d */
+		{{0, 0},
+		{2, 6},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 0, 1, 0,		/* 0x7e */
+		{{1, 6},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 0, 1, 0,		/* 0x7f */
+		{{0, 6},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 1, 0,		/* 0x80 */
+		{{7, 7},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0x81 */
+		{{0, 0},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0x82 */
+		{{1, 1},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0x83 */
+		{{0, 1},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0x84 */
+		{{2, 2},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0x85 */
+		{{0, 0},
+		{2, 2},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0x86 */
+		{{1, 2},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0x87 */
+		{{0, 2},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0x88 */
+		{{3, 3},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0x89 */
+		{{0, 0},
+		{3, 3},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 3, 0,		/* 0x8a */
+		{{1, 1},
+		{3, 3},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0x8b */
+		{{0, 1},
+		{3, 3},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0x8c */
+		{{2, 3},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0x8d */
+		{{0, 0},
+		{2, 3},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0x8e */
+		{{1, 3},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0x8f */
+		{{0, 3},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0x90 */
+		{{4, 4},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0x91 */
+		{{0, 0},
+		{4, 4},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 3, 0,		/* 0x92 */
+		{{1, 1},
+		{4, 4},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0x93 */
+		{{0, 1},
+		{4, 4},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 3, 0,		/* 0x94 */
+		{{2, 2},
+		{4, 4},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 4, 0,		/* 0x95 */
+		{{0, 0},
+		{2, 2},
+		{4, 4},
+		{7, 7}
+		}
+	},
+	{0, 1, 3, 0,		/* 0x96 */
+		{{1, 2},
+		{4, 4},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0x97 */
+		{{0, 2},
+		{4, 4},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0x98 */
+		{{3, 4},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0x99 */
+		{{0, 0},
+		{3, 4},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 3, 0,		/* 0x9a */
+		{{1, 1},
+		{3, 4},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0x9b */
+		{{0, 1},
+		{3, 4},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0x9c */
+		{{2, 4},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0x9d */
+		{{0, 0},
+		{2, 4},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0x9e */
+		{{1, 4},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0x9f */
+		{{0, 4},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xa0 */
+		{{5, 5},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xa1 */
+		{{0, 0},
+		{5, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 3, 0,		/* 0xa2 */
+		{{1, 1},
+		{5, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xa3 */
+		{{0, 1},
+		{5, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 3, 0,		/* 0xa4 */
+		{{2, 2},
+		{5, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 4, 0,		/* 0xa5 */
+		{{0, 0},
+		{2, 2},
+		{5, 5},
+		{7, 7}
+		}
+	},
+	{0, 1, 3, 0,		/* 0xa6 */
+		{{1, 2},
+		{5, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xa7 */
+		{{0, 2},
+		{5, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 3, 0,		/* 0xa8 */
+		{{3, 3},
+		{5, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 4, 0,		/* 0xa9 */
+		{{0, 0},
+		{3, 3},
+		{5, 5},
+		{7, 7}
+		}
+	},
+	{0, 1, 4, 0,		/* 0xaa */
+		{{1, 1},
+		{3, 3},
+		{5, 5},
+		{7, 7}
+		}
+	},
+	{1, 1, 4, 0,		/* 0xab */
+		{{0, 1},
+		{3, 3},
+		{5, 5},
+		{7, 7}
+		}
+	},
+	{0, 1, 3, 0,		/* 0xac */
+		{{2, 3},
+		{5, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 4, 0,		/* 0xad */
+		{{0, 0},
+		{2, 3},
+		{5, 5},
+		{7, 7}
+		}
+	},
+	{0, 1, 3, 0,		/* 0xae */
+		{{1, 3},
+		{5, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xaf */
+		{{0, 3},
+		{5, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xb0 */
+		{{4, 5},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xb1 */
+		{{0, 0},
+		{4, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 3, 0,		/* 0xb2 */
+		{{1, 1},
+		{4, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xb3 */
+		{{0, 1},
+		{4, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 3, 0,		/* 0xb4 */
+		{{2, 2},
+		{4, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 4, 0,		/* 0xb5 */
+		{{0, 0},
+		{2, 2},
+		{4, 5},
+		{7, 7}
+		}
+	},
+	{0, 1, 3, 0,		/* 0xb6 */
+		{{1, 2},
+		{4, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xb7 */
+		{{0, 2},
+		{4, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xb8 */
+		{{3, 5},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xb9 */
+		{{0, 0},
+		{3, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 3, 0,		/* 0xba */
+		{{1, 1},
+		{3, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xbb */
+		{{0, 1},
+		{3, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xbc */
+		{{2, 5},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xbd */
+		{{0, 0},
+		{2, 5},
+		{7, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xbe */
+		{{1, 5},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0xbf */
+		{{0, 5},
+		{7, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 1, 0,		/* 0xc0 */
+		{{6, 7},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0xc1 */
+		{{0, 0},
+		{6, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xc2 */
+		{{1, 1},
+		{6, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0xc3 */
+		{{0, 1},
+		{6, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xc4 */
+		{{2, 2},
+		{6, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xc5 */
+		{{0, 0},
+		{2, 2},
+		{6, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xc6 */
+		{{1, 2},
+		{6, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0xc7 */
+		{{0, 2},
+		{6, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xc8 */
+		{{3, 3},
+		{6, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xc9 */
+		{{0, 0},
+		{3, 3},
+		{6, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 3, 0,		/* 0xca */
+		{{1, 1},
+		{3, 3},
+		{6, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xcb */
+		{{0, 1},
+		{3, 3},
+		{6, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xcc */
+		{{2, 3},
+		{6, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xcd */
+		{{0, 0},
+		{2, 3},
+		{6, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xce */
+		{{1, 3},
+		{6, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0xcf */
+		{{0, 3},
+		{6, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xd0 */
+		{{4, 4},
+		{6, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xd1 */
+		{{0, 0},
+		{4, 4},
+		{6, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 3, 0,		/* 0xd2 */
+		{{1, 1},
+		{4, 4},
+		{6, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xd3 */
+		{{0, 1},
+		{4, 4},
+		{6, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 3, 0,		/* 0xd4 */
+		{{2, 2},
+		{4, 4},
+		{6, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 4, 0,		/* 0xd5 */
+		{{0, 0},
+		{2, 2},
+		{4, 4},
+		{6, 7}
+		}
+	},
+	{0, 1, 3, 0,		/* 0xd6 */
+		{{1, 2},
+		{4, 4},
+		{6, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xd7 */
+		{{0, 2},
+		{4, 4},
+		{6, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xd8 */
+		{{3, 4},
+		{6, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xd9 */
+		{{0, 0},
+		{3, 4},
+		{6, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 3, 0,		/* 0xda */
+		{{1, 1},
+		{3, 4},
+		{6, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xdb */
+		{{0, 1},
+		{3, 4},
+		{6, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xdc */
+		{{2, 4},
+		{6, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xdd */
+		{{0, 0},
+		{2, 4},
+		{6, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xde */
+		{{1, 4},
+		{6, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0xdf */
+		{{0, 4},
+		{6, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 1, 0,		/* 0xe0 */
+		{{5, 7},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0xe1 */
+		{{0, 0},
+		{5, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xe2 */
+		{{1, 1},
+		{5, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0xe3 */
+		{{0, 1},
+		{5, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xe4 */
+		{{2, 2},
+		{5, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xe5 */
+		{{0, 0},
+		{2, 2},
+		{5, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xe6 */
+		{{1, 2},
+		{5, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0xe7 */
+		{{0, 2},
+		{5, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xe8 */
+		{{3, 3},
+		{5, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xe9 */
+		{{0, 0},
+		{3, 3},
+		{5, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 3, 0,		/* 0xea */
+		{{1, 1},
+		{3, 3},
+		{5, 7},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xeb */
+		{{0, 1},
+		{3, 3},
+		{5, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xec */
+		{{2, 3},
+		{5, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xed */
+		{{0, 0},
+		{2, 3},
+		{5, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xee */
+		{{1, 3},
+		{5, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0xef */
+		{{0, 3},
+		{5, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 1, 0,		/* 0xf0 */
+		{{4, 7},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0xf1 */
+		{{0, 0},
+		{4, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xf2 */
+		{{1, 1},
+		{4, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0xf3 */
+		{{0, 1},
+		{4, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xf4 */
+		{{2, 2},
+		{4, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 3, 0,		/* 0xf5 */
+		{{0, 0},
+		{2, 2},
+		{4, 7},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xf6 */
+		{{1, 2},
+		{4, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0xf7 */
+		{{0, 2},
+		{4, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 1, 0,		/* 0xf8 */
+		{{3, 7},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0xf9 */
+		{{0, 0},
+		{3, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 2, 0,		/* 0xfa */
+		{{1, 1},
+		{3, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0xfb */
+		{{0, 1},
+		{3, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 1, 0,		/* 0xfc */
+		{{2, 7},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 2, 0,		/* 0xfd */
+		{{0, 0},
+		{2, 7},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{0, 1, 1, 0,		/* 0xfe */
+		{{1, 7},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	},
+	{1, 1, 1, 0,		/* 0xff */
+		{{0, 7},
+		{0, 0},
+		{0, 0},
+		{0, 0}
+		}
+	}
+};
+
+
+int
+sctp_is_address_in_scope(struct sctp_ifa *ifa,
+                         struct sctp_scoping *scope,
+                         int do_update)
+{
+	if ((scope->loopback_scope == 0) &&
+	    (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
+		/*
+		 * skip loopback if not in scope *
+		 */
+		return (0);
+	}
+	switch (ifa->address.sa.sa_family) {
+#ifdef INET
+	case AF_INET:
+		if (scope->ipv4_addr_legal) {
+			struct sockaddr_in *sin;
+
+			sin = &ifa->address.sin;
+			if (sin->sin_addr.s_addr == 0) {
+				/* not in scope , unspecified */
+				return (0);
+			}
+			if ((scope->ipv4_local_scope == 0) &&
+			    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+				/* private address not in scope */
+				return (0);
+			}
+		} else {
+			return (0);
+		}
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		if (scope->ipv6_addr_legal) {
+			struct sockaddr_in6 *sin6;
+
+#if !defined(__Panda__)
+			/* Must update the flags,  bummer, which
+			 * means any IFA locks must now be applied HERE <->
+			 */
+			if (do_update) {
+				sctp_gather_internal_ifa_flags(ifa);
+			}
+#endif
+			if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+				return (0);
+			}
+			/* ok to use deprecated addresses? */
+			sin6 = &ifa->address.sin6;
+			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+				/* skip unspecifed addresses */
+				return (0);
+			}
+			if (		/* (local_scope == 0) && */
+			    (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
+				return (0);
+			}
+			if ((scope->site_scope == 0) &&
+			    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
+				return (0);
+			}
+		} else {
+			return (0);
+		}
+		break;
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+		if (!scope->conn_addr_legal) {
+			return (0);
+		}
+		break;
+#endif
+	default:
+		return (0);
+	}
+	return (1);
+}
+
+static struct mbuf *
+sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa, uint16_t *len)
+{
+#if defined(INET) || defined(INET6)
+	struct sctp_paramhdr *parmh;
+	struct mbuf *mret;
+	uint16_t plen;
+#endif
+
+	switch (ifa->address.sa.sa_family) {
+#ifdef INET
+	case AF_INET:
+		plen = (uint16_t)sizeof(struct sctp_ipv4addr_param);
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		plen = (uint16_t)sizeof(struct sctp_ipv6addr_param);
+		break;
+#endif
+	default:
+		return (m);
+	}
+#if defined(INET) || defined(INET6)
+	if (M_TRAILINGSPACE(m) >= plen) {
+		/* easy side we just drop it on the end */
+		parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
+		mret = m;
+	} else {
+		/* Need more space */
+		mret = m;
+		while (SCTP_BUF_NEXT(mret) != NULL) {
+			mret = SCTP_BUF_NEXT(mret);
+		}
+		SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(plen, 0, M_NOWAIT, 1, MT_DATA);
+		if (SCTP_BUF_NEXT(mret) == NULL) {
+			/* We are hosed, can't add more addresses */
+			return (m);
+		}
+		mret = SCTP_BUF_NEXT(mret);
+		parmh = mtod(mret, struct sctp_paramhdr *);
+	}
+	/* now add the parameter */
+	switch (ifa->address.sa.sa_family) {
+#ifdef INET
+	case AF_INET:
+	{
+		struct sctp_ipv4addr_param *ipv4p;
+		struct sockaddr_in *sin;
+
+		sin = &ifa->address.sin;
+		ipv4p = (struct sctp_ipv4addr_param *)parmh;
+		parmh->param_type = htons(SCTP_IPV4_ADDRESS);
+		parmh->param_length = htons(plen);
+		ipv4p->addr = sin->sin_addr.s_addr;
+		SCTP_BUF_LEN(mret) += plen;
+		break;
+	}
+#endif
+#ifdef INET6
+	case AF_INET6:
+	{
+		struct sctp_ipv6addr_param *ipv6p;
+		struct sockaddr_in6 *sin6;
+
+		sin6 = &ifa->address.sin6;
+		ipv6p = (struct sctp_ipv6addr_param *)parmh;
+		parmh->param_type = htons(SCTP_IPV6_ADDRESS);
+		parmh->param_length = htons(plen);
+		memcpy(ipv6p->addr, &sin6->sin6_addr,
+		    sizeof(ipv6p->addr));
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+		/* clear embedded scope in the address */
+		in6_clearscope((struct in6_addr *)ipv6p->addr);
+#endif
+		SCTP_BUF_LEN(mret) += plen;
+		break;
+	}
+#endif
+	default:
+		return (m);
+	}
+	if (len != NULL) {
+		*len += plen;
+	}
+	return (mret);
+#endif
+}
+
+
+struct mbuf *
+sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+                           struct sctp_scoping *scope,
+			   struct mbuf *m_at, int cnt_inits_to,
+			   uint16_t *padding_len, uint16_t *chunk_len)
+{
+	struct sctp_vrf *vrf = NULL;
+	int cnt, limit_out = 0, total_count;
+	uint32_t vrf_id;
+
+	vrf_id = inp->def_vrf_id;
+	SCTP_IPI_ADDR_RLOCK();
+	vrf = sctp_find_vrf(vrf_id);
+	if (vrf == NULL) {
+		SCTP_IPI_ADDR_RUNLOCK();
+		return (m_at);
+	}
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		struct sctp_ifa *sctp_ifap;
+		struct sctp_ifn *sctp_ifnp;
+
+		cnt = cnt_inits_to;
+		if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
+			limit_out = 1;
+			cnt = SCTP_ADDRESS_LIMIT;
+			goto skip_count;
+		}
+		LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
+			if ((scope->loopback_scope == 0) &&
+			    SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
+				/*
+				 * Skip loopback devices if loopback_scope
+				 * not set
+				 */
+				continue;
+			}
+			LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
+#if defined(__FreeBSD__)
+#ifdef INET
+				if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
+				    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+				                      &sctp_ifap->address.sin.sin_addr) != 0)) {
+					continue;
+				}
+#endif
+#ifdef INET6
+				if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
+				    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+				                      &sctp_ifap->address.sin6.sin6_addr) != 0)) {
+					continue;
+				}
+#endif
+#endif
+				if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
+					continue;
+				}
+#if defined(__Userspace__)
+				if (sctp_ifap->address.sa.sa_family == AF_CONN) {
+					continue;
+				}
+#endif
+				if (sctp_is_address_in_scope(sctp_ifap, scope, 1) == 0) {
+					continue;
+				}
+				cnt++;
+				if (cnt > SCTP_ADDRESS_LIMIT) {
+					break;
+				}
+			}
+			if (cnt > SCTP_ADDRESS_LIMIT) {
+				break;
+			}
+		}
+	skip_count:
+		if (cnt > 1) {
+			total_count = 0;
+			LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
+				cnt = 0;
+				if ((scope->loopback_scope == 0) &&
+				    SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
+					/*
+					 * Skip loopback devices if
+					 * loopback_scope not set
+					 */
+					continue;
+				}
+				LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
+#if defined(__FreeBSD__)
+#ifdef INET
+					if ((sctp_ifap->address.sa.sa_family == AF_INET) &&
+					    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+					                      &sctp_ifap->address.sin.sin_addr) != 0)) {
+						continue;
+					}
+#endif
+#ifdef INET6
+					if ((sctp_ifap->address.sa.sa_family == AF_INET6) &&
+					    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+					                      &sctp_ifap->address.sin6.sin6_addr) != 0)) {
+						continue;
+					}
+#endif
+#endif
+					if (sctp_is_addr_restricted(stcb, sctp_ifap)) {
+						continue;
+					}
+#if defined(__Userspace__)
+					if (sctp_ifap->address.sa.sa_family == AF_CONN) {
+						continue;
+					}
+#endif
+					if (sctp_is_address_in_scope(sctp_ifap,
+								     scope, 0) == 0) {
+						continue;
+					}
+					if ((chunk_len != NULL) &&
+					    (padding_len != NULL) &&
+					    (*padding_len > 0)) {
+						memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
+						SCTP_BUF_LEN(m_at) += *padding_len;
+						*chunk_len += *padding_len;
+						*padding_len = 0;
+					}
+					m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap, chunk_len);
+					if (limit_out) {
+						cnt++;
+						total_count++;
+						if (cnt >= 2) {
+							/* two from each address */
+							break;
+						}
+						if (total_count > SCTP_ADDRESS_LIMIT) {
+							/* No more addresses */
+							break;
+						}
+					}
+				}
+			}
+		}
+	} else {
+		struct sctp_laddr *laddr;
+
+		cnt = cnt_inits_to;
+		/* First, how many ? */
+		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+			if (laddr->ifa == NULL) {
+				continue;
+			}
+			if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
+				/* Address being deleted by the system, dont
+				 * list.
+				 */
+				continue;
+			if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+				/* Address being deleted on this ep
+				 * don't list.
+				 */
+				continue;
+			}
+#if defined(__Userspace__)
+			if (laddr->ifa->address.sa.sa_family == AF_CONN) {
+				continue;
+			}
+#endif
+			if (sctp_is_address_in_scope(laddr->ifa,
+						     scope, 1) == 0) {
+				continue;
+			}
+			cnt++;
+		}
+		/*
+		 * To get through a NAT we only list addresses if we have
+		 * more than one. That way if you just bind a single address
+		 * we let the source of the init dictate our address.
+		 */
+		if (cnt > 1) {
+			cnt = cnt_inits_to;
+			LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+				if (laddr->ifa == NULL) {
+					continue;
+				}
+				if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
+					continue;
+				}
+#if defined(__Userspace__)
+				if (laddr->ifa->address.sa.sa_family == AF_CONN) {
+					continue;
+				}
+#endif
+				if (sctp_is_address_in_scope(laddr->ifa,
+							     scope, 0) == 0) {
+					continue;
+				}
+				if ((chunk_len != NULL) &&
+				    (padding_len != NULL) &&
+				    (*padding_len > 0)) {
+					memset(mtod(m_at, caddr_t) + *chunk_len, 0, *padding_len);
+					SCTP_BUF_LEN(m_at) += *padding_len;
+					*chunk_len += *padding_len;
+					*padding_len = 0;
+				}
+				m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa, chunk_len);
+				cnt++;
+				if (cnt >= SCTP_ADDRESS_LIMIT) {
+					break;
+				}
+			}
+		}
+	}
+	SCTP_IPI_ADDR_RUNLOCK();
+	return (m_at);
+}
+
+static struct sctp_ifa *
+sctp_is_ifa_addr_preferred(struct sctp_ifa *ifa,
+			   uint8_t dest_is_loop,
+			   uint8_t dest_is_priv,
+			   sa_family_t fam)
+{
+	uint8_t dest_is_global = 0;
+	/* dest_is_priv is true if destination is a private address */
+	/* dest_is_loop is true if destination is a loopback addresses */
+
+	/**
+	 * Here we determine if its a preferred address. A preferred address
+	 * means it is the same scope or higher scope then the destination.
+	 * L = loopback, P = private, G = global
+	 * -----------------------------------------
+	 *    src    |  dest | result
+	 *  ----------------------------------------
+	 *     L     |    L  |    yes
+	 *  -----------------------------------------
+	 *     P     |    L  |    yes-v4 no-v6
+	 *  -----------------------------------------
+	 *     G     |    L  |    yes-v4 no-v6
+	 *  -----------------------------------------
+	 *     L     |    P  |    no
+	 *  -----------------------------------------
+	 *     P     |    P  |    yes
+	 *  -----------------------------------------
+	 *     G     |    P  |    no
+	 *   -----------------------------------------
+	 *     L     |    G  |    no
+	 *   -----------------------------------------
+	 *     P     |    G  |    no
+	 *    -----------------------------------------
+	 *     G     |    G  |    yes
+	 *    -----------------------------------------
+	 */
+
+	if (ifa->address.sa.sa_family != fam) {
+		/* forget mis-matched family */
+		return (NULL);
+	}
+	if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
+		dest_is_global = 1;
+	}
+	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Is destination preferred:");
+	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &ifa->address.sa);
+	/* Ok the address may be ok */
+#ifdef INET6
+	if (fam == AF_INET6) {
+		/* ok to use deprecated addresses? no lets not! */
+		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+			SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:1\n");
+			return (NULL);
+		}
+		if (ifa->src_is_priv && !ifa->src_is_loop) {
+			if (dest_is_loop) {
+				SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:2\n");
+				return (NULL);
+			}
+		}
+		if (ifa->src_is_glob) {
+			if (dest_is_loop) {
+				SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:3\n");
+				return (NULL);
+			}
+		}
+	}
+#endif
+	/* Now that we know what is what, implement or table
+	 * this could in theory be done slicker (it used to be), but this
+	 * is straightforward and easier to validate :-)
+	 */
+	SCTPDBG(SCTP_DEBUG_OUTPUT3, "src_loop:%d src_priv:%d src_glob:%d\n",
+		ifa->src_is_loop, ifa->src_is_priv, ifa->src_is_glob);
+	SCTPDBG(SCTP_DEBUG_OUTPUT3, "dest_loop:%d dest_priv:%d dest_glob:%d\n",
+		dest_is_loop, dest_is_priv, dest_is_global);
+
+	if ((ifa->src_is_loop) && (dest_is_priv)) {
+		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:4\n");
+		return (NULL);
+	}
+	if ((ifa->src_is_glob) && (dest_is_priv)) {
+		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:5\n");
+		return (NULL);
+	}
+	if ((ifa->src_is_loop) && (dest_is_global)) {
+		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:6\n");
+		return (NULL);
+	}
+	if ((ifa->src_is_priv) && (dest_is_global)) {
+		SCTPDBG(SCTP_DEBUG_OUTPUT3, "NO:7\n");
+		return (NULL);
+	}
+	SCTPDBG(SCTP_DEBUG_OUTPUT3, "YES\n");
+	/* its a preferred address */
+	return (ifa);
+}
+
+static struct sctp_ifa *
+sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
+			    uint8_t dest_is_loop,
+			    uint8_t dest_is_priv,
+			    sa_family_t fam)
+{
+	uint8_t dest_is_global = 0;
+
+	/**
+	 * Here we determine if its a acceptable address. A acceptable
+	 * address means it is the same scope or higher scope but we can
+	 * allow for NAT which means its ok to have a global dest and a
+	 * private src.
+	 *
+	 * L = loopback, P = private, G = global
+	 * -----------------------------------------
+	 *  src    |  dest | result
+	 * -----------------------------------------
+	 *   L     |   L   |    yes
+	 *  -----------------------------------------
+	 *   P     |   L   |    yes-v4 no-v6
+	 *  -----------------------------------------
+	 *   G     |   L   |    yes
+	 * -----------------------------------------
+	 *   L     |   P   |    no
+	 * -----------------------------------------
+	 *   P     |   P   |    yes
+	 * -----------------------------------------
+	 *   G     |   P   |    yes - May not work
+	 * -----------------------------------------
+	 *   L     |   G   |    no
+	 * -----------------------------------------
+	 *   P     |   G   |    yes - May not work
+	 * -----------------------------------------
+	 *   G     |   G   |    yes
+	 * -----------------------------------------
+	 */
+
+	if (ifa->address.sa.sa_family != fam) {
+		/* forget non matching family */
+		SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa_fam:%d fam:%d\n",
+			ifa->address.sa.sa_family, fam);
+		return (NULL);
+	}
+	/* Ok the address may be ok */
+	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, &ifa->address.sa);
+	SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst_is_loop:%d dest_is_priv:%d\n",
+		dest_is_loop, dest_is_priv);
+	if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
+		dest_is_global = 1;
+	}
+#ifdef INET6
+	if (fam == AF_INET6) {
+		/* ok to use deprecated addresses? */
+		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+			return (NULL);
+		}
+		if (ifa->src_is_priv) {
+			/* Special case, linklocal to loop */
+			if (dest_is_loop)
+				return (NULL);
+		}
+	}
+#endif
+	/*
+	 * Now that we know what is what, implement our table.
+	 * This could in theory be done slicker (it used to be), but this
+	 * is straightforward and easier to validate :-)
+	 */
+	SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_priv:%d\n",
+		ifa->src_is_loop,
+		dest_is_priv);
+	if ((ifa->src_is_loop == 1) && (dest_is_priv)) {
+		return (NULL);
+	}
+	SCTPDBG(SCTP_DEBUG_OUTPUT3, "ifa->src_is_loop:%d dest_is_glob:%d\n",
+		ifa->src_is_loop,
+		dest_is_global);
+	if ((ifa->src_is_loop == 1) && (dest_is_global)) {
+		return (NULL);
+	}
+	SCTPDBG(SCTP_DEBUG_OUTPUT3, "address is acceptable\n");
+	/* its an acceptable address */
+	return (ifa);
+}
+
+int
+sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
+{
+	struct sctp_laddr *laddr;
+
+	if (stcb == NULL) {
+		/* There are no restrictions, no TCB :-) */
+		return (0);
+	}
+	LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
+		if (laddr->ifa == NULL) {
+			SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
+				__func__);
+			continue;
+		}
+		if (laddr->ifa == ifa) {
+			/* Yes it is on the list */
+			return (1);
+		}
+	}
+	return (0);
+}
+
+
+int
+sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
+{
+	struct sctp_laddr *laddr;
+
+	if (ifa == NULL)
+		return (0);
+	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+		if (laddr->ifa == NULL) {
+			SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
+				__func__);
+			continue;
+		}
+		if ((laddr->ifa == ifa) && laddr->action == 0)
+			/* same pointer */
+			return (1);
+	}
+	return (0);
+}
+
+
+
+static struct sctp_ifa *
+sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
+			      sctp_route_t *ro,
+			      uint32_t vrf_id,
+			      int non_asoc_addr_ok,
+			      uint8_t dest_is_priv,
+			      uint8_t dest_is_loop,
+			      sa_family_t fam)
+{
+	struct sctp_laddr *laddr, *starting_point;
+	void *ifn;
+	int resettotop = 0;
+	struct sctp_ifn *sctp_ifn;
+	struct sctp_ifa *sctp_ifa, *sifa;
+	struct sctp_vrf *vrf;
+	uint32_t ifn_index;
+
+	vrf = sctp_find_vrf(vrf_id);
+	if (vrf == NULL)
+		return (NULL);
+
+	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
+	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
+	sctp_ifn = sctp_find_ifn(ifn, ifn_index);
+	/*
+	 * first question, is the ifn we will emit on in our list, if so, we
+	 * want such an address. Note that we first looked for a
+	 * preferred address.
+	 */
+	if (sctp_ifn) {
+		/* is a preferred one on the interface we route out? */
+		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+#if defined(__FreeBSD__)
+#ifdef INET
+			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
+			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
+				continue;
+			}
+#endif
+#ifdef INET6
+			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
+			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
+				continue;
+			}
+#endif
+#endif
+			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+			    (non_asoc_addr_ok == 0))
+				continue;
+			sifa = sctp_is_ifa_addr_preferred(sctp_ifa,
+							  dest_is_loop,
+							  dest_is_priv, fam);
+			if (sifa == NULL)
+				continue;
+			if (sctp_is_addr_in_ep(inp, sifa)) {
+				atomic_add_int(&sifa->refcount, 1);
+				return (sifa);
+			}
+		}
+	}
+	/*
+	 * ok, now we now need to find one on the list of the addresses.
+	 * We can't get one on the emitting interface so let's find first
+	 * a preferred one. If not that an acceptable one otherwise...
+	 * we return NULL.
+	 */
+	starting_point = inp->next_addr_touse;
+ once_again:
+	if (inp->next_addr_touse == NULL) {
+		inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
+		resettotop = 1;
+	}
+	for (laddr = inp->next_addr_touse; laddr;
+	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+		if (laddr->ifa == NULL) {
+			/* address has been removed */
+			continue;
+		}
+		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+			/* address is being deleted */
+			continue;
+		}
+		sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop,
+						  dest_is_priv, fam);
+		if (sifa == NULL)
+			continue;
+		atomic_add_int(&sifa->refcount, 1);
+		return (sifa);
+	}
+	if (resettotop == 0) {
+		inp->next_addr_touse = NULL;
+		goto once_again;
+	}
+
+	inp->next_addr_touse = starting_point;
+	resettotop = 0;
+ once_again_too:
+	if (inp->next_addr_touse == NULL) {
+		inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
+		resettotop = 1;
+	}
+
+	/* ok, what about an acceptable address in the inp */
+	for (laddr = inp->next_addr_touse; laddr;
+	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+		if (laddr->ifa == NULL) {
+			/* address has been removed */
+			continue;
+		}
+		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+			/* address is being deleted */
+			continue;
+		}
+		sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
+						   dest_is_priv, fam);
+		if (sifa == NULL)
+			continue;
+		atomic_add_int(&sifa->refcount, 1);
+		return (sifa);
+	}
+	if (resettotop == 0) {
+		inp->next_addr_touse = NULL;
+		goto once_again_too;
+	}
+
+	/*
+	 * no address bound can be a source for the destination we are in
+	 * trouble
+	 */
+	return (NULL);
+}
+
+
+
+static struct sctp_ifa *
+sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
+			       struct sctp_tcb *stcb,
+			       sctp_route_t *ro,
+			       uint32_t vrf_id,
+			       uint8_t dest_is_priv,
+			       uint8_t dest_is_loop,
+			       int non_asoc_addr_ok,
+			       sa_family_t fam)
+{
+	struct sctp_laddr *laddr, *starting_point;
+	void *ifn;
+	struct sctp_ifn *sctp_ifn;
+	struct sctp_ifa *sctp_ifa, *sifa;
+	uint8_t start_at_beginning = 0;
+	struct sctp_vrf *vrf;
+	uint32_t ifn_index;
+
+	/*
+	 * first question, is the ifn we will emit on in our list, if so, we
+	 * want that one.
+	 */
+	vrf = sctp_find_vrf(vrf_id);
+	if (vrf == NULL)
+		return (NULL);
+
+	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
+	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
+	sctp_ifn = sctp_find_ifn( ifn, ifn_index);
+
+	/*
+	 * first question, is the ifn we will emit on in our list?  If so,
+	 * we want that one. First we look for a preferred. Second, we go
+	 * for an acceptable.
+	 */
+	if (sctp_ifn) {
+		/* first try for a preferred address on the ep */
+		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+#if defined(__FreeBSD__)
+#ifdef INET
+			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
+			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
+				continue;
+			}
+#endif
+#ifdef INET6
+			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
+			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
+				continue;
+			}
+#endif
+#endif
+			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
+				continue;
+			if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
+				sifa = sctp_is_ifa_addr_preferred(sctp_ifa, dest_is_loop, dest_is_priv, fam);
+				if (sifa == NULL)
+					continue;
+				if (((non_asoc_addr_ok == 0) &&
+				     (sctp_is_addr_restricted(stcb, sifa))) ||
+				    (non_asoc_addr_ok &&
+				     (sctp_is_addr_restricted(stcb, sifa)) &&
+				     (!sctp_is_addr_pending(stcb, sifa)))) {
+					/* on the no-no list */
+					continue;
+				}
+				atomic_add_int(&sifa->refcount, 1);
+				return (sifa);
+			}
+		}
+		/* next try for an acceptable address on the ep */
+		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+#if defined(__FreeBSD__)
+#ifdef INET
+			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
+			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
+				continue;
+			}
+#endif
+#ifdef INET6
+			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
+			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
+				continue;
+			}
+#endif
+#endif
+			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
+				continue;
+			if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
+				sifa= sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv,fam);
+				if (sifa == NULL)
+					continue;
+				if (((non_asoc_addr_ok == 0) &&
+				     (sctp_is_addr_restricted(stcb, sifa))) ||
+				    (non_asoc_addr_ok &&
+				     (sctp_is_addr_restricted(stcb, sifa)) &&
+				     (!sctp_is_addr_pending(stcb, sifa)))) {
+					/* on the no-no list */
+					continue;
+				}
+				atomic_add_int(&sifa->refcount, 1);
+				return (sifa);
+			}
+		}
+
+	}
+	/*
+	 * if we can't find one like that then we must look at all
+	 * addresses bound to pick one at first preferable then
+	 * secondly acceptable.
+	 */
+	starting_point = stcb->asoc.last_used_address;
+ sctp_from_the_top:
+	if (stcb->asoc.last_used_address == NULL) {
+		start_at_beginning = 1;
+		stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
+	}
+	/* search beginning with the last used address */
+	for (laddr = stcb->asoc.last_used_address; laddr;
+	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+		if (laddr->ifa == NULL) {
+			/* address has been removed */
+			continue;
+		}
+		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+			/* address is being deleted */
+			continue;
+		}
+		sifa = sctp_is_ifa_addr_preferred(laddr->ifa, dest_is_loop, dest_is_priv, fam);
+		if (sifa == NULL)
+			continue;
+		if (((non_asoc_addr_ok == 0) &&
+		     (sctp_is_addr_restricted(stcb, sifa))) ||
+		    (non_asoc_addr_ok &&
+		     (sctp_is_addr_restricted(stcb, sifa)) &&
+		     (!sctp_is_addr_pending(stcb, sifa)))) {
+			/* on the no-no list */
+			continue;
+		}
+		stcb->asoc.last_used_address = laddr;
+		atomic_add_int(&sifa->refcount, 1);
+		return (sifa);
+	}
+	if (start_at_beginning == 0) {
+		stcb->asoc.last_used_address = NULL;
+		goto sctp_from_the_top;
+	}
+	/* now try for any higher scope than the destination */
+	stcb->asoc.last_used_address = starting_point;
+	start_at_beginning = 0;
+ sctp_from_the_top2:
+	if (stcb->asoc.last_used_address == NULL) {
+		start_at_beginning = 1;
+		stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
+	}
+	/* search beginning with the last used address */
+	for (laddr = stcb->asoc.last_used_address; laddr;
+	     laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
+		if (laddr->ifa == NULL) {
+			/* address has been removed */
+			continue;
+		}
+		if (laddr->action == SCTP_DEL_IP_ADDRESS) {
+			/* address is being deleted */
+			continue;
+		}
+		sifa = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop,
+						   dest_is_priv, fam);
+		if (sifa == NULL)
+			continue;
+		if (((non_asoc_addr_ok == 0) &&
+		     (sctp_is_addr_restricted(stcb, sifa))) ||
+		    (non_asoc_addr_ok &&
+		     (sctp_is_addr_restricted(stcb, sifa)) &&
+		     (!sctp_is_addr_pending(stcb, sifa)))) {
+			/* on the no-no list */
+			continue;
+		}
+		stcb->asoc.last_used_address = laddr;
+		atomic_add_int(&sifa->refcount, 1);
+		return (sifa);
+	}
+	if (start_at_beginning == 0) {
+		stcb->asoc.last_used_address = NULL;
+		goto sctp_from_the_top2;
+	}
+	return (NULL);
+}
+
+static struct sctp_ifa *
+sctp_select_nth_preferred_addr_from_ifn_boundall(struct sctp_ifn *ifn,
+#if defined(__FreeBSD__)
+                                                 struct sctp_inpcb *inp,
+#else
+                                                 struct sctp_inpcb *inp SCTP_UNUSED,
+#endif
+						 struct sctp_tcb *stcb,
+						 int non_asoc_addr_ok,
+						 uint8_t dest_is_loop,
+						 uint8_t dest_is_priv,
+						 int addr_wanted,
+						 sa_family_t fam,
+						 sctp_route_t *ro
+						 )
+{
+	struct sctp_ifa *ifa, *sifa;
+	int num_eligible_addr = 0;
+#ifdef INET6
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+	struct sockaddr_in6 sin6, lsa6;
+
+	if (fam == AF_INET6) {
+		memcpy(&sin6, &ro->ro_dst, sizeof(struct sockaddr_in6));
+#ifdef SCTP_KAME
+		(void)sa6_recoverscope(&sin6);
+#else
+		(void)in6_recoverscope(&sin6, &sin6.sin6_addr, NULL);
+#endif  /* SCTP_KAME */
+	}
+#endif  /* SCTP_EMBEDDED_V6_SCOPE */
+#endif	/* INET6 */
+	LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
+#if defined(__FreeBSD__)
+#ifdef INET
+		if ((ifa->address.sa.sa_family == AF_INET) &&
+		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+		                      &ifa->address.sin.sin_addr) != 0)) {
+			continue;
+		}
+#endif
+#ifdef INET6
+		if ((ifa->address.sa.sa_family == AF_INET6) &&
+		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+		                      &ifa->address.sin6.sin6_addr) != 0)) {
+			continue;
+		}
+#endif
+#endif
+		if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+		    (non_asoc_addr_ok == 0))
+			continue;
+		sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
+						  dest_is_priv, fam);
+		if (sifa == NULL)
+			continue;
+#ifdef INET6
+		if (fam == AF_INET6 &&
+		    dest_is_loop &&
+		    sifa->src_is_loop && sifa->src_is_priv) {
+			/* don't allow fe80::1 to be a src on loop ::1, we don't list it
+			 * to the peer so we will get an abort.
+			 */
+			continue;
+		}
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+		if (fam == AF_INET6 &&
+		    IN6_IS_ADDR_LINKLOCAL(&sifa->address.sin6.sin6_addr) &&
+		    IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
+			/* link-local <-> link-local must belong to the same scope. */
+			memcpy(&lsa6, &sifa->address.sin6, sizeof(struct sockaddr_in6));
+#ifdef SCTP_KAME
+			(void)sa6_recoverscope(&lsa6);
+#else
+			(void)in6_recoverscope(&lsa6, &lsa6.sin6_addr, NULL);
+#endif  /* SCTP_KAME */
+			if (sin6.sin6_scope_id != lsa6.sin6_scope_id) {
+				continue;
+			}
+		}
+#endif  /* SCTP_EMBEDDED_V6_SCOPE */
+#endif	/* INET6 */
+
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
+		/* Check if the IPv6 address matches to next-hop.
+		   In the mobile case, old IPv6 address may be not deleted
+		   from the interface. Then, the interface has previous and
+		   new addresses.  We should use one corresponding to the
+		   next-hop.  (by micchie)
+		 */
+#ifdef INET6
+		if (stcb && fam == AF_INET6 &&
+		    sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
+			if (sctp_v6src_match_nexthop(&sifa->address.sin6, ro)
+			    == 0) {
+				continue;
+			}
+		}
+#endif
+#ifdef INET
+		/* Avoid topologically incorrect IPv4 address */
+		if (stcb && fam == AF_INET &&
+		    sctp_is_mobility_feature_on(stcb->sctp_ep, SCTP_MOBILITY_BASE)) {
+			if (sctp_v4src_match_nexthop(sifa, ro) == 0) {
+				continue;
+			}
+		}
+#endif
+#endif
+		if (stcb) {
+			if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
+				continue;
+			}
+			if (((non_asoc_addr_ok == 0) &&
+			     (sctp_is_addr_restricted(stcb, sifa))) ||
+			    (non_asoc_addr_ok &&
+			     (sctp_is_addr_restricted(stcb, sifa)) &&
+			     (!sctp_is_addr_pending(stcb, sifa)))) {
+				/*
+				 * It is restricted for some reason..
+				 * probably not yet added.
+				 */
+				continue;
+			}
+		}
+		if (num_eligible_addr >= addr_wanted) {
+			return (sifa);
+		}
+		num_eligible_addr++;
+	}
+	return (NULL);
+}
+
+
+static int
+sctp_count_num_preferred_boundall(struct sctp_ifn *ifn,
+#if defined(__FreeBSD__)
+                                  struct sctp_inpcb *inp,
+#else
+                                  struct sctp_inpcb *inp SCTP_UNUSED,
+#endif
+				  struct sctp_tcb *stcb,
+				  int non_asoc_addr_ok,
+				  uint8_t dest_is_loop,
+				  uint8_t dest_is_priv,
+				  sa_family_t fam)
+{
+	struct sctp_ifa *ifa, *sifa;
+	int num_eligible_addr = 0;
+
+	LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
+#if defined(__FreeBSD__)
+#ifdef INET
+		if ((ifa->address.sa.sa_family == AF_INET) &&
+		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+		                      &ifa->address.sin.sin_addr) != 0)) {
+			continue;
+		}
+#endif
+#ifdef INET6
+		if ((ifa->address.sa.sa_family == AF_INET6) &&
+		    (stcb != NULL) &&
+		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+		                      &ifa->address.sin6.sin6_addr) != 0)) {
+			continue;
+		}
+#endif
+#endif
+		if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+		    (non_asoc_addr_ok == 0)) {
+			continue;
+		}
+		sifa = sctp_is_ifa_addr_preferred(ifa, dest_is_loop,
+						  dest_is_priv, fam);
+		if (sifa == NULL) {
+			continue;
+		}
+		if (stcb) {
+			if (sctp_is_address_in_scope(ifa, &stcb->asoc.scope, 0) == 0) {
+				continue;
+			}
+			if (((non_asoc_addr_ok == 0) &&
+			     (sctp_is_addr_restricted(stcb, sifa))) ||
+			    (non_asoc_addr_ok &&
+			     (sctp_is_addr_restricted(stcb, sifa)) &&
+			     (!sctp_is_addr_pending(stcb, sifa)))) {
+				/*
+				 * It is restricted for some reason..
+				 * probably not yet added.
+				 */
+				continue;
+			}
+		}
+		num_eligible_addr++;
+	}
+	return (num_eligible_addr);
+}
+
+static struct sctp_ifa *
+sctp_choose_boundall(struct sctp_inpcb *inp,
+                     struct sctp_tcb *stcb,
+		     struct sctp_nets *net,
+		     sctp_route_t *ro,
+		     uint32_t vrf_id,
+		     uint8_t dest_is_priv,
+		     uint8_t dest_is_loop,
+		     int non_asoc_addr_ok,
+		     sa_family_t fam)
+{
+	int cur_addr_num = 0, num_preferred = 0;
+	void *ifn;
+	struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
+	struct sctp_ifa *sctp_ifa, *sifa;
+	uint32_t ifn_index;
+	struct sctp_vrf *vrf;
+#ifdef INET
+	int retried = 0;
+#endif
+
+	/*-
+	 * For boundall we can use any address in the association.
+	 * If non_asoc_addr_ok is set we can use any address (at least in
+	 * theory). So we look for preferred addresses first. If we find one,
+	 * we use it. Otherwise we next try to get an address on the
+	 * interface, which we should be able to do (unless non_asoc_addr_ok
+	 * is false and we are routed out that way). In these cases where we
+	 * can't use the address of the interface we go through all the
+	 * ifn's looking for an address we can use and fill that in. Punting
+	 * means we send back address 0, which will probably cause problems
+	 * actually since then IP will fill in the address of the route ifn,
+	 * which means we probably already rejected it.. i.e. here comes an
+	 * abort :-<.
+	 */
+	vrf = sctp_find_vrf(vrf_id);
+	if (vrf == NULL)
+		return (NULL);
+
+	ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
+	ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
+	SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn from route:%p ifn_index:%d\n", ifn, ifn_index);
+	emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(ifn, ifn_index);
+	if (sctp_ifn == NULL) {
+		/* ?? We don't have this guy ?? */
+		SCTPDBG(SCTP_DEBUG_OUTPUT2,"No ifn emit interface?\n");
+		goto bound_all_plan_b;
+	}
+	SCTPDBG(SCTP_DEBUG_OUTPUT2,"ifn_index:%d name:%s is emit interface\n",
+		ifn_index, sctp_ifn->ifn_name);
+
+	if (net) {
+		cur_addr_num = net->indx_of_eligible_next_to_use;
+	}
+	num_preferred = sctp_count_num_preferred_boundall(sctp_ifn,
+							  inp, stcb,
+							  non_asoc_addr_ok,
+							  dest_is_loop,
+							  dest_is_priv, fam);
+	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Found %d preferred source addresses for intf:%s\n",
+		num_preferred, sctp_ifn->ifn_name);
+	if (num_preferred == 0) {
+		/*
+		 * no eligible addresses, we must use some other interface
+		 * address if we can find one.
+		 */
+		goto bound_all_plan_b;
+	}
+	/*
+	 * Ok we have num_eligible_addr set with how many we can use, this
+	 * may vary from call to call due to addresses being deprecated
+	 * etc..
+	 */
+	if (cur_addr_num >= num_preferred) {
+		cur_addr_num = 0;
+	}
+	/*
+	 * select the nth address from the list (where cur_addr_num is the
+	 * nth) and 0 is the first one, 1 is the second one etc...
+	 */
+	SCTPDBG(SCTP_DEBUG_OUTPUT2, "cur_addr_num:%d\n", cur_addr_num);
+
+	sctp_ifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
+                                                                    dest_is_priv, cur_addr_num, fam, ro);
+
+	/* if sctp_ifa is NULL something changed??, fall to plan b. */
+	if (sctp_ifa) {
+		atomic_add_int(&sctp_ifa->refcount, 1);
+		if (net) {
+			/* save off where the next one we will want */
+			net->indx_of_eligible_next_to_use = cur_addr_num + 1;
+		}
+		return (sctp_ifa);
+	}
+	/*
+	 * plan_b: Look at all interfaces and find a preferred address. If
+	 * no preferred fall through to plan_c.
+	 */
+ bound_all_plan_b:
+	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan B\n");
+	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+		SCTPDBG(SCTP_DEBUG_OUTPUT2, "Examine interface %s\n",
+			sctp_ifn->ifn_name);
+		if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+			/* wrong base scope */
+			SCTPDBG(SCTP_DEBUG_OUTPUT2, "skip\n");
+			continue;
+		}
+		if ((sctp_ifn == looked_at) && looked_at) {
+			/* already looked at this guy */
+			SCTPDBG(SCTP_DEBUG_OUTPUT2, "already seen\n");
+			continue;
+		}
+		num_preferred = sctp_count_num_preferred_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok,
+                                                                  dest_is_loop, dest_is_priv, fam);
+		SCTPDBG(SCTP_DEBUG_OUTPUT2,
+			"Found ifn:%p %d preferred source addresses\n",
+			ifn, num_preferred);
+		if (num_preferred == 0) {
+			/* None on this interface. */
+			SCTPDBG(SCTP_DEBUG_OUTPUT2, "No preferred -- skipping to next\n");
+			continue;
+		}
+		SCTPDBG(SCTP_DEBUG_OUTPUT2,
+			"num preferred:%d on interface:%p cur_addr_num:%d\n",
+			num_preferred, (void *)sctp_ifn, cur_addr_num);
+
+		/*
+		 * Ok we have num_eligible_addr set with how many we can
+		 * use, this may vary from call to call due to addresses
+		 * being deprecated etc..
+		 */
+		if (cur_addr_num >= num_preferred) {
+			cur_addr_num = 0;
+		}
+		sifa = sctp_select_nth_preferred_addr_from_ifn_boundall(sctp_ifn, inp, stcb, non_asoc_addr_ok, dest_is_loop,
+                                                                        dest_is_priv, cur_addr_num, fam, ro);
+		if (sifa == NULL)
+			continue;
+		if (net) {
+			net->indx_of_eligible_next_to_use = cur_addr_num + 1;
+			SCTPDBG(SCTP_DEBUG_OUTPUT2, "we selected %d\n",
+				cur_addr_num);
+			SCTPDBG(SCTP_DEBUG_OUTPUT2, "Source:");
+			SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
+			SCTPDBG(SCTP_DEBUG_OUTPUT2, "Dest:");
+			SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &net->ro._l_addr.sa);
+		}
+		atomic_add_int(&sifa->refcount, 1);
+		return (sifa);
+	}
+#ifdef INET
+again_with_private_addresses_allowed:
+#endif
+	/* plan_c: do we have an acceptable address on the emit interface */
+	sifa = NULL;
+	SCTPDBG(SCTP_DEBUG_OUTPUT2,"Trying Plan C: find acceptable on interface\n");
+	if (emit_ifn == NULL) {
+		SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jump to Plan D - no emit_ifn\n");
+		goto plan_d;
+	}
+	LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
+		SCTPDBG(SCTP_DEBUG_OUTPUT2, "ifa:%p\n", (void *)sctp_ifa);
+#if defined(__FreeBSD__)
+#ifdef INET
+		if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
+		    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+		                      &sctp_ifa->address.sin.sin_addr) != 0)) {
+			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
+			continue;
+		}
+#endif
+#ifdef INET6
+		if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
+		    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+		                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
+			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Jailed\n");
+			continue;
+		}
+#endif
+#endif
+		if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+		    (non_asoc_addr_ok == 0)) {
+			SCTPDBG(SCTP_DEBUG_OUTPUT2,"Defer\n");
+			continue;
+		}
+		sifa = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop,
+						   dest_is_priv, fam);
+		if (sifa == NULL) {
+			SCTPDBG(SCTP_DEBUG_OUTPUT2, "IFA not acceptable\n");
+			continue;
+		}
+		if (stcb) {
+			if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
+				SCTPDBG(SCTP_DEBUG_OUTPUT2, "NOT in scope\n");
+				sifa = NULL;
+				continue;
+			}
+			if (((non_asoc_addr_ok == 0) &&
+			     (sctp_is_addr_restricted(stcb, sifa))) ||
+			    (non_asoc_addr_ok &&
+			     (sctp_is_addr_restricted(stcb, sifa)) &&
+			     (!sctp_is_addr_pending(stcb, sifa)))) {
+				/*
+				 * It is restricted for some
+				 * reason.. probably not yet added.
+				 */
+				SCTPDBG(SCTP_DEBUG_OUTPUT2, "Its restricted\n");
+				sifa = NULL;
+				continue;
+			}
+		}
+		atomic_add_int(&sifa->refcount, 1);
+		goto out;
+	}
+ plan_d:
+	/*
+	 * plan_d: We are in trouble. No preferred address on the emit
+	 * interface. And not even a preferred address on all interfaces.
+	 * Go out and see if we can find an acceptable address somewhere
+	 * amongst all interfaces.
+	 */
+	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Trying Plan D looked_at is %p\n", (void *)looked_at);
+	LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+		if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+			/* wrong base scope */
+			continue;
+		}
+		LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+#if defined(__FreeBSD__)
+#ifdef INET
+			if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
+			    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+			                      &sctp_ifa->address.sin.sin_addr) != 0)) {
+				continue;
+			}
+#endif
+#ifdef INET6
+			if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
+			    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+			                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
+				continue;
+			}
+#endif
+#endif
+			if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+			    (non_asoc_addr_ok == 0))
+				continue;
+			sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
+							   dest_is_loop,
+							   dest_is_priv, fam);
+			if (sifa == NULL)
+				continue;
+			if (stcb) {
+				if (sctp_is_address_in_scope(sifa, &stcb->asoc.scope, 0) == 0) {
+					sifa = NULL;
+					continue;
+				}
+				if (((non_asoc_addr_ok == 0) &&
+				     (sctp_is_addr_restricted(stcb, sifa))) ||
+				    (non_asoc_addr_ok &&
+				     (sctp_is_addr_restricted(stcb, sifa)) &&
+				     (!sctp_is_addr_pending(stcb, sifa)))) {
+					/*
+					 * It is restricted for some
+					 * reason.. probably not yet added.
+					 */
+					sifa = NULL;
+					continue;
+				}
+			}
+			goto out;
+		}
+	}
+#ifdef INET
+	if (stcb) {
+		if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) {
+			stcb->asoc.scope.ipv4_local_scope = 1;
+			retried = 1;
+			goto again_with_private_addresses_allowed;
+		} else if (retried == 1) {
+			stcb->asoc.scope.ipv4_local_scope = 0;
+		}
+	}
+#endif
+out:
+#ifdef INET
+	if (sifa) {
+		if (retried == 1) {
+			LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+				if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+					/* wrong base scope */
+					continue;
+				}
+				LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+					struct sctp_ifa *tmp_sifa;
+
+#if defined(__FreeBSD__)
+#ifdef INET
+					if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
+					    (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+					                      &sctp_ifa->address.sin.sin_addr) != 0)) {
+						continue;
+					}
+#endif
+#ifdef INET6
+					if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
+					    (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+					                      &sctp_ifa->address.sin6.sin6_addr) != 0)) {
+						continue;
+					}
+#endif
+#endif
+					if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) &&
+					    (non_asoc_addr_ok == 0))
+						continue;
+					tmp_sifa = sctp_is_ifa_addr_acceptable(sctp_ifa,
+					                                       dest_is_loop,
+					                                       dest_is_priv, fam);
+					if (tmp_sifa == NULL) {
+						continue;
+					}
+					if (tmp_sifa == sifa) {
+						continue;
+					}
+					if (stcb) {
+						if (sctp_is_address_in_scope(tmp_sifa,
+						                             &stcb->asoc.scope, 0) == 0) {
+							continue;
+						}
+						if (((non_asoc_addr_ok == 0) &&
+						     (sctp_is_addr_restricted(stcb, tmp_sifa))) ||
+						    (non_asoc_addr_ok &&
+						     (sctp_is_addr_restricted(stcb, tmp_sifa)) &&
+						     (!sctp_is_addr_pending(stcb, tmp_sifa)))) {
+							/*
+							 * It is restricted for some
+							 * reason.. probably not yet added.
+							 */
+							continue;
+						}
+					}
+					if ((tmp_sifa->address.sin.sin_family == AF_INET) &&
+					    (IN4_ISPRIVATE_ADDRESS(&(tmp_sifa->address.sin.sin_addr)))) {
+						sctp_add_local_addr_restricted(stcb, tmp_sifa);
+					}
+				}
+			}
+		}
+		atomic_add_int(&sifa->refcount, 1);
+	}
+#endif
+	return (sifa);
+}
+
+
+
+/* tcb may be NULL */
+struct sctp_ifa *
+sctp_source_address_selection(struct sctp_inpcb *inp,
+			      struct sctp_tcb *stcb,
+			      sctp_route_t *ro,
+			      struct sctp_nets *net,
+			      int non_asoc_addr_ok, uint32_t vrf_id)
+{
+	struct sctp_ifa *answer;
+	uint8_t dest_is_priv, dest_is_loop;
+	sa_family_t fam;
+#ifdef INET
+	struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
+#endif
+
+	/**
+	 * Rules:
+	 * - Find the route if needed, cache if I can.
+	 * - Look at interface address in route, Is it in the bound list. If so we
+	 *   have the best source.
+	 * - If not we must rotate amongst the addresses.
+	 *
+	 * Cavets and issues
+	 *
+	 * Do we need to pay attention to scope. We can have a private address
+	 * or a global address we are sourcing or sending to. So if we draw
+	 * it out
+	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+	 * For V4
+	 * ------------------------------------------
+	 *      source     *      dest  *  result
+	 * -----------------------------------------
+	 * <a>  Private    *    Global  *  NAT
+	 * -----------------------------------------
+	 * <b>  Private    *    Private *  No problem
+	 * -----------------------------------------
+	 * <c>  Global     *    Private *  Huh, How will this work?
+	 * -----------------------------------------
+	 * <d>  Global     *    Global  *  No Problem
+	 *------------------------------------------
+	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+	 * For V6
+	 *------------------------------------------
+	 *      source     *      dest  *  result
+	 * -----------------------------------------
+	 * <a>  Linklocal  *    Global  *
+	 * -----------------------------------------
+	 * <b>  Linklocal  * Linklocal  *  No problem
+	 * -----------------------------------------
+	 * <c>  Global     * Linklocal  *  Huh, How will this work?
+	 * -----------------------------------------
+	 * <d>  Global     *    Global  *  No Problem
+	 *------------------------------------------
+	 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
+	 *
+	 * And then we add to that what happens if there are multiple addresses
+	 * assigned to an interface. Remember the ifa on a ifn is a linked
+	 * list of addresses. So one interface can have more than one IP
+	 * address. What happens if we have both a private and a global
+	 * address? Do we then use context of destination to sort out which
+	 * one is best? And what about NAT's sending P->G may get you a NAT
+	 * translation, or should you select the G thats on the interface in
+	 * preference.
+	 *
+	 * Decisions:
+	 *
+	 * - count the number of addresses on the interface.
+	 * - if it is one, no problem except case <c>.
+	 *   For <a> we will assume a NAT out there.
+	 * - if there are more than one, then we need to worry about scope P
+	 *   or G. We should prefer G -> G and P -> P if possible.
+	 *   Then as a secondary fall back to mixed types G->P being a last
+	 *   ditch one.
+	 * - The above all works for bound all, but bound specific we need to
+	 *   use the same concept but instead only consider the bound
+	 *   addresses. If the bound set is NOT assigned to the interface then
+	 *   we must use rotation amongst the bound addresses..
+	 */
+	if (ro->ro_rt == NULL) {
+		/*
+		 * Need a route to cache.
+		 */
+		SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
+	}
+	if (ro->ro_rt == NULL) {
+		return (NULL);
+	}
+#if defined(__Userspace_os_Windows)
+	/* On Windows the sa_family is U_SHORT or ADDRESS_FAMILY */
+	fam = (sa_family_t)ro->ro_dst.sa_family;
+#else
+	fam = ro->ro_dst.sa_family;
+#endif
+	dest_is_priv = dest_is_loop = 0;
+	/* Setup our scopes for the destination */
+	switch (fam) {
+#ifdef INET
+	case AF_INET:
+		/* Scope based on outbound address */
+		if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
+			dest_is_loop = 1;
+			if (net != NULL) {
+				/* mark it as local */
+				net->addr_is_local = 1;
+			}
+		} else if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
+			dest_is_priv = 1;
+		}
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		/* Scope based on outbound address */
+#if defined(__Userspace_os_Windows)
+		if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
+#else
+		if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr) ||
+		    SCTP_ROUTE_IS_REAL_LOOP(ro)) {
+#endif
+			/*
+			 * If the address is a loopback address, which
+			 * consists of "::1" OR "fe80::1%lo0", we are loopback
+			 * scope. But we don't use dest_is_priv (link local
+			 * addresses).
+			 */
+			dest_is_loop = 1;
+			if (net != NULL) {
+				/* mark it as local */
+				net->addr_is_local = 1;
+			}
+		} else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
+			dest_is_priv = 1;
+		}
+		break;
+#endif
+	}
+	SCTPDBG(SCTP_DEBUG_OUTPUT2, "Select source addr for:");
+	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&ro->ro_dst);
+	SCTP_IPI_ADDR_RLOCK();
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		/*
+		 * Bound all case
+		 */
+		answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
+					      dest_is_priv, dest_is_loop,
+					      non_asoc_addr_ok, fam);
+		SCTP_IPI_ADDR_RUNLOCK();
+		return (answer);
+	}
+	/*
+	 * Subset bound case
+	 */
+	if (stcb) {
+		answer = sctp_choose_boundspecific_stcb(inp, stcb, ro,
+							vrf_id,	dest_is_priv,
+							dest_is_loop,
+							non_asoc_addr_ok, fam);
+	} else {
+		answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id,
+						       non_asoc_addr_ok,
+						       dest_is_priv,
+						       dest_is_loop, fam);
+	}
+	SCTP_IPI_ADDR_RUNLOCK();
+	return (answer);
+}
+
+static int
+sctp_find_cmsg(int c_type, void *data, struct mbuf *control, size_t cpsize)
+{
+#if defined(__Userspace_os_Windows)
+	WSACMSGHDR cmh;
+#else
+	struct cmsghdr cmh;
+#endif
+	int tlen, at, found;
+	struct sctp_sndinfo sndinfo;
+	struct sctp_prinfo prinfo;
+	struct sctp_authinfo authinfo;
+
+	tlen = SCTP_BUF_LEN(control);
+	at = 0;
+	found = 0;
+	/*
+	 * Independent of how many mbufs, find the c_type inside the control
+	 * structure and copy out the data.
+	 */
+	while (at < tlen) {
+		if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
+			/* There is not enough room for one more. */
+			return (found);
+		}
+		m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
+		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
+			/* We dont't have a complete CMSG header. */
+			return (found);
+		}
+		if (((int)cmh.cmsg_len + at) > tlen) {
+			/* We don't have the complete CMSG. */
+			return (found);
+		}
+		if ((cmh.cmsg_level == IPPROTO_SCTP) &&
+		    ((c_type == cmh.cmsg_type) ||
+		     ((c_type == SCTP_SNDRCV) &&
+		      ((cmh.cmsg_type == SCTP_SNDINFO) ||
+		       (cmh.cmsg_type == SCTP_PRINFO) ||
+		       (cmh.cmsg_type == SCTP_AUTHINFO))))) {
+			if (c_type == cmh.cmsg_type) {
+				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < cpsize) {
+					return (found);
+				}
+				/* It is exactly what we want. Copy it out. */
+				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), (int)cpsize, (caddr_t)data);
+				return (1);
+			} else {
+				struct sctp_sndrcvinfo *sndrcvinfo;
+
+				sndrcvinfo = (struct sctp_sndrcvinfo *)data;
+				if (found == 0) {
+					if (cpsize < sizeof(struct sctp_sndrcvinfo)) {
+						return (found);
+					}
+					memset(sndrcvinfo, 0, sizeof(struct sctp_sndrcvinfo));
+				}
+				switch (cmh.cmsg_type) {
+				case SCTP_SNDINFO:
+					if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_sndinfo)) {
+						return (found);
+					}
+					m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_sndinfo), (caddr_t)&sndinfo);
+					sndrcvinfo->sinfo_stream = sndinfo.snd_sid;
+					sndrcvinfo->sinfo_flags = sndinfo.snd_flags;
+					sndrcvinfo->sinfo_ppid = sndinfo.snd_ppid;
+					sndrcvinfo->sinfo_context = sndinfo.snd_context;
+					sndrcvinfo->sinfo_assoc_id = sndinfo.snd_assoc_id;
+					break;
+				case SCTP_PRINFO:
+					if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_prinfo)) {
+						return (found);
+					}
+					m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_prinfo), (caddr_t)&prinfo);
+					if (prinfo.pr_policy != SCTP_PR_SCTP_NONE) {
+						sndrcvinfo->sinfo_timetolive = prinfo.pr_value;
+					} else {
+						sndrcvinfo->sinfo_timetolive = 0;
+					}
+					sndrcvinfo->sinfo_flags |= prinfo.pr_policy;
+					break;
+				case SCTP_AUTHINFO:
+					if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_authinfo)) {
+						return (found);
+					}
+					m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_authinfo), (caddr_t)&authinfo);
+					sndrcvinfo->sinfo_keynumber_valid = 1;
+					sndrcvinfo->sinfo_keynumber = authinfo.auth_keynumber;
+					break;
+				default:
+					return (found);
+				}
+				found = 1;
+			}
+		}
+		at += CMSG_ALIGN(cmh.cmsg_len);
+	}
+	return (found);
+}
+
+static int
+sctp_process_cmsgs_for_init(struct sctp_tcb *stcb, struct mbuf *control, int *error)
+{
+#if defined(__Userspace_os_Windows)
+	WSACMSGHDR cmh;
+#else
+	struct cmsghdr cmh;
+#endif
+	int tlen, at;
+	struct sctp_initmsg initmsg;
+#ifdef INET
+	struct sockaddr_in sin;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 sin6;
+#endif
+
+	tlen = SCTP_BUF_LEN(control);
+	at = 0;
+	while (at < tlen) {
+		if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
+			/* There is not enough room for one more. */
+			*error = EINVAL;
+			return (1);
+		}
+		m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
+		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
+			/* We dont't have a complete CMSG header. */
+			*error = EINVAL;
+			return (1);
+		}
+		if (((int)cmh.cmsg_len + at) > tlen) {
+			/* We don't have the complete CMSG. */
+			*error = EINVAL;
+			return (1);
+		}
+		if (cmh.cmsg_level == IPPROTO_SCTP) {
+			switch (cmh.cmsg_type) {
+			case SCTP_INIT:
+				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct sctp_initmsg)) {
+					*error = EINVAL;
+					return (1);
+				}
+				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct sctp_initmsg), (caddr_t)&initmsg);
+				if (initmsg.sinit_max_attempts)
+					stcb->asoc.max_init_times = initmsg.sinit_max_attempts;
+				if (initmsg.sinit_num_ostreams)
+					stcb->asoc.pre_open_streams = initmsg.sinit_num_ostreams;
+				if (initmsg.sinit_max_instreams)
+					stcb->asoc.max_inbound_streams = initmsg.sinit_max_instreams;
+				if (initmsg.sinit_max_init_timeo)
+					stcb->asoc.initial_init_rto_max = initmsg.sinit_max_init_timeo;
+				if (stcb->asoc.streamoutcnt < stcb->asoc.pre_open_streams) {
+					struct sctp_stream_out *tmp_str;
+					unsigned int i;
+#if defined(SCTP_DETAILED_STR_STATS)
+					int j;
+#endif
+
+					/* Default is NOT correct */
+					SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, default:%d pre_open:%d\n",
+						stcb->asoc.streamoutcnt, stcb->asoc.pre_open_streams);
+					SCTP_TCB_UNLOCK(stcb);
+					SCTP_MALLOC(tmp_str,
+					            struct sctp_stream_out *,
+					            (stcb->asoc.pre_open_streams * sizeof(struct sctp_stream_out)),
+					            SCTP_M_STRMO);
+					SCTP_TCB_LOCK(stcb);
+					if (tmp_str != NULL) {
+						SCTP_FREE(stcb->asoc.strmout, SCTP_M_STRMO);
+						stcb->asoc.strmout = tmp_str;
+						stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt = stcb->asoc.pre_open_streams;
+					} else {
+						stcb->asoc.pre_open_streams = stcb->asoc.streamoutcnt;
+					}
+					for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+						TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
+						stcb->asoc.strmout[i].chunks_on_queues = 0;
+						stcb->asoc.strmout[i].next_mid_ordered = 0;
+						stcb->asoc.strmout[i].next_mid_unordered = 0;
+#if defined(SCTP_DETAILED_STR_STATS)
+						for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
+							stcb->asoc.strmout[i].abandoned_sent[j] = 0;
+							stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
+						}
+#else
+						stcb->asoc.strmout[i].abandoned_sent[0] = 0;
+						stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
+#endif
+						stcb->asoc.strmout[i].sid = i;
+						stcb->asoc.strmout[i].last_msg_incomplete = 0;
+						stcb->asoc.strmout[i].state = SCTP_STREAM_OPENING;
+						stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
+					}
+				}
+				break;
+#ifdef INET
+			case SCTP_DSTADDRV4:
+				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
+					*error = EINVAL;
+					return (1);
+				}
+				memset(&sin, 0, sizeof(struct sockaddr_in));
+				sin.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+				sin.sin_len = sizeof(struct sockaddr_in);
+#endif
+				sin.sin_port = stcb->rport;
+				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
+				if ((sin.sin_addr.s_addr == INADDR_ANY) ||
+				    (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
+				    IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
+					*error = EINVAL;
+					return (1);
+				}
+				if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
+				                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
+					*error = ENOBUFS;
+					return (1);
+				}
+				break;
+#endif
+#ifdef INET6
+			case SCTP_DSTADDRV6:
+				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
+					*error = EINVAL;
+					return (1);
+				}
+				memset(&sin6, 0, sizeof(struct sockaddr_in6));
+				sin6.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+				sin6.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+				sin6.sin6_port = stcb->rport;
+				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
+				if (IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) ||
+				    IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
+					*error = EINVAL;
+					return (1);
+				}
+#ifdef INET
+				if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
+					in6_sin6_2_sin(&sin, &sin6);
+					if ((sin.sin_addr.s_addr == INADDR_ANY) ||
+					    (sin.sin_addr.s_addr == INADDR_BROADCAST) ||
+					    IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
+						*error = EINVAL;
+						return (1);
+					}
+					if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin, NULL, stcb->asoc.port,
+					                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
+						*error = ENOBUFS;
+						return (1);
+					}
+				} else
+#endif
+					if (sctp_add_remote_addr(stcb, (struct sockaddr *)&sin6, NULL, stcb->asoc.port,
+					                         SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
+						*error = ENOBUFS;
+						return (1);
+					}
+				break;
+#endif
+			default:
+				break;
+			}
+		}
+		at += CMSG_ALIGN(cmh.cmsg_len);
+	}
+	return (0);
+}
+
+static struct sctp_tcb *
+sctp_findassociation_cmsgs(struct sctp_inpcb **inp_p,
+                           uint16_t port,
+                           struct mbuf *control,
+                           struct sctp_nets **net_p,
+                           int *error)
+{
+#if defined(__Userspace_os_Windows)
+	WSACMSGHDR cmh;
+#else
+	struct cmsghdr cmh;
+#endif
+	int tlen, at;
+	struct sctp_tcb *stcb;
+	struct sockaddr *addr;
+#ifdef INET
+	struct sockaddr_in sin;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 sin6;
+#endif
+
+	tlen = SCTP_BUF_LEN(control);
+	at = 0;
+	while (at < tlen) {
+		if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
+			/* There is not enough room for one more. */
+			*error = EINVAL;
+			return (NULL);
+		}
+		m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
+		if (cmh.cmsg_len < CMSG_ALIGN(sizeof(cmh))) {
+			/* We dont't have a complete CMSG header. */
+			*error = EINVAL;
+			return (NULL);
+		}
+		if (((int)cmh.cmsg_len + at) > tlen) {
+			/* We don't have the complete CMSG. */
+			*error = EINVAL;
+			return (NULL);
+		}
+		if (cmh.cmsg_level == IPPROTO_SCTP) {
+			switch (cmh.cmsg_type) {
+#ifdef INET
+			case SCTP_DSTADDRV4:
+				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in_addr)) {
+					*error = EINVAL;
+					return (NULL);
+				}
+				memset(&sin, 0, sizeof(struct sockaddr_in));
+				sin.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+				sin.sin_len = sizeof(struct sockaddr_in);
+#endif
+				sin.sin_port = port;
+				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in_addr), (caddr_t)&sin.sin_addr);
+				addr = (struct sockaddr *)&sin;
+				break;
+#endif
+#ifdef INET6
+			case SCTP_DSTADDRV6:
+				if ((size_t)(cmh.cmsg_len - CMSG_ALIGN(sizeof(cmh))) < sizeof(struct in6_addr)) {
+					*error = EINVAL;
+					return (NULL);
+				}
+				memset(&sin6, 0, sizeof(struct sockaddr_in6));
+				sin6.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+				sin6.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+				sin6.sin6_port = port;
+				m_copydata(control, at + CMSG_ALIGN(sizeof(cmh)), sizeof(struct in6_addr), (caddr_t)&sin6.sin6_addr);
+#ifdef INET
+				if (IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
+					in6_sin6_2_sin(&sin, &sin6);
+					addr = (struct sockaddr *)&sin;
+				} else
+#endif
+					addr = (struct sockaddr *)&sin6;
+				break;
+#endif
+			default:
+				addr = NULL;
+				break;
+			}
+			if (addr) {
+				stcb = sctp_findassociation_ep_addr(inp_p, addr, net_p, NULL, NULL);
+				if (stcb != NULL) {
+					return (stcb);
+				}
+			}
+		}
+		at += CMSG_ALIGN(cmh.cmsg_len);
+	}
+	return (NULL);
+}
+
+static struct mbuf *
+sctp_add_cookie(struct mbuf *init, int init_offset,
+    struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in, uint8_t **signature)
+{
+	struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
+	struct sctp_state_cookie *stc;
+	struct sctp_paramhdr *ph;
+	uint8_t *foo;
+	int sig_offset;
+	uint16_t cookie_sz;
+
+	mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
+				      sizeof(struct sctp_paramhdr)), 0,
+				     M_NOWAIT, 1, MT_DATA);
+	if (mret == NULL) {
+		return (NULL);
+	}
+	copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_NOWAIT);
+	if (copy_init == NULL) {
+		sctp_m_freem(mret);
+		return (NULL);
+	}
+#ifdef SCTP_MBUF_LOGGING
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+		sctp_log_mbc(copy_init, SCTP_MBUF_ICOPY);
+	}
+#endif
+	copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
+	    M_NOWAIT);
+	if (copy_initack == NULL) {
+		sctp_m_freem(mret);
+		sctp_m_freem(copy_init);
+		return (NULL);
+	}
+#ifdef SCTP_MBUF_LOGGING
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+		sctp_log_mbc(copy_initack, SCTP_MBUF_ICOPY);
+	}
+#endif
+	/* easy side we just drop it on the end */
+	ph = mtod(mret, struct sctp_paramhdr *);
+	SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
+	    sizeof(struct sctp_paramhdr);
+	stc = (struct sctp_state_cookie *)((caddr_t)ph +
+	    sizeof(struct sctp_paramhdr));
+	ph->param_type = htons(SCTP_STATE_COOKIE);
+	ph->param_length = 0;	/* fill in at the end */
+	/* Fill in the stc cookie data */
+	memcpy(stc, stc_in, sizeof(struct sctp_state_cookie));
+
+	/* tack the INIT and then the INIT-ACK onto the chain */
+	cookie_sz = 0;
+	for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
+		cookie_sz += SCTP_BUF_LEN(m_at);
+		if (SCTP_BUF_NEXT(m_at) == NULL) {
+			SCTP_BUF_NEXT(m_at) = copy_init;
+			break;
+		}
+	}
+	for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
+		cookie_sz += SCTP_BUF_LEN(m_at);
+		if (SCTP_BUF_NEXT(m_at) == NULL) {
+			SCTP_BUF_NEXT(m_at) = copy_initack;
+			break;
+		}
+	}
+	for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
+		cookie_sz += SCTP_BUF_LEN(m_at);
+		if (SCTP_BUF_NEXT(m_at) == NULL) {
+			break;
+		}
+	}
+	sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_NOWAIT, 1, MT_DATA);
+	if (sig == NULL) {
+		/* no space, so free the entire chain */
+		sctp_m_freem(mret);
+		return (NULL);
+	}
+	SCTP_BUF_LEN(sig) = 0;
+	SCTP_BUF_NEXT(m_at) = sig;
+	sig_offset = 0;
+	foo = (uint8_t *) (mtod(sig, caddr_t) + sig_offset);
+	memset(foo, 0, SCTP_SIGNATURE_SIZE);
+	*signature = foo;
+	SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
+	cookie_sz += SCTP_SIGNATURE_SIZE;
+	ph->param_length = htons(cookie_sz);
+	return (mret);
+}
+
+
+static uint8_t
+sctp_get_ect(struct sctp_tcb *stcb)
+{
+	if ((stcb != NULL) && (stcb->asoc.ecn_supported == 1)) {
+		return (SCTP_ECT0_BIT);
+	} else {
+		return (0);
+	}
+}
+
+#if defined(INET) || defined(INET6)
+static void
+sctp_handle_no_route(struct sctp_tcb *stcb,
+                     struct sctp_nets *net,
+                     int so_locked)
+{
+	SCTPDBG(SCTP_DEBUG_OUTPUT1, "dropped packet - no valid source addr\n");
+
+	if (net) {
+		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination was ");
+		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT1, &net->ro._l_addr.sa);
+		if (net->dest_state & SCTP_ADDR_CONFIRMED) {
+			if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
+				SCTPDBG(SCTP_DEBUG_OUTPUT1, "no route takes interface %p down\n", (void *)net);
+				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
+			                        stcb, 0,
+			                        (void *)net,
+			                        so_locked);
+				net->dest_state &= ~SCTP_ADDR_REACHABLE;
+				net->dest_state &= ~SCTP_ADDR_PF;
+			}
+		}
+		if (stcb) {
+			if (net == stcb->asoc.primary_destination) {
+				/* need a new primary */
+				struct sctp_nets *alt;
+
+				alt = sctp_find_alternate_net(stcb, net, 0);
+				if (alt != net) {
+					if (stcb->asoc.alternate) {
+						sctp_free_remote_addr(stcb->asoc.alternate);
+					}
+					stcb->asoc.alternate = alt;
+					atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
+					if (net->ro._s_addr) {
+						sctp_free_ifa(net->ro._s_addr);
+						net->ro._s_addr = NULL;
+					}
+					net->src_addr_selected = 0;
+				}
+			}
+		}
+	}
+}
+#endif
+
+static int
+sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
+    struct sctp_tcb *stcb,	/* may be NULL */
+    struct sctp_nets *net,
+    struct sockaddr *to,
+    struct mbuf *m,
+    uint32_t auth_offset,
+    struct sctp_auth_chunk *auth,
+    uint16_t auth_keyid,
+    int nofragment_flag,
+    int ecn_ok,
+    int out_of_asoc_ok,
+    uint16_t src_port,
+    uint16_t dest_port,
+    uint32_t v_tag,
+    uint16_t port,
+    union sctp_sockstore *over_addr,
+#if defined(__FreeBSD__)
+    uint8_t mflowtype, uint32_t mflowid,
+#endif
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    int so_locked SCTP_UNUSED
+#else
+    int so_locked
+#endif
+    )
+/* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
+{
+	/**
+	 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet header
+	 * WITH an SCTPHDR but no IP header, endpoint inp and sa structure:
+	 * - fill in the HMAC digest of any AUTH chunk in the packet.
+	 * - calculate and fill in the SCTP checksum.
+	 * - prepend an IP address header.
+	 * - if boundall use INADDR_ANY.
+	 * - if boundspecific do source address selection.
+	 * - set fragmentation option for ipV4.
+	 * - On return from IP output, check/adjust mtu size of output
+	 *   interface and smallest_mtu size as well.
+	 */
+	/* Will need ifdefs around this */
+#ifdef __Panda__
+	pakhandle_type o_pak;
+#endif
+	struct mbuf *newm;
+	struct sctphdr *sctphdr;
+	int packet_length;
+	int ret;
+#if defined(INET) || defined(INET6)
+	uint32_t vrf_id;
+#endif
+#if defined(INET) || defined(INET6)
+#if !defined(__Panda__)
+	struct mbuf *o_pak;
+#endif
+	sctp_route_t *ro = NULL;
+	struct udphdr *udp = NULL;
+#endif
+	uint8_t tos_value;
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	struct socket *so = NULL;
+#endif
+
+#if defined(__APPLE__)
+	if (so_locked) {
+		sctp_lock_assert(SCTP_INP_SO(inp));
+		SCTP_TCB_LOCK_ASSERT(stcb);
+	} else {
+		sctp_unlock_assert(SCTP_INP_SO(inp));
+	}
+#endif
+	if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
+		SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
+		sctp_m_freem(m);
+		return (EFAULT);
+	}
+#if defined(INET) || defined(INET6)
+	if (stcb) {
+		vrf_id = stcb->asoc.vrf_id;
+	} else {
+		vrf_id = inp->def_vrf_id;
+	}
+#endif
+	/* fill in the HMAC digest for any AUTH chunk in the packet */
+	if ((auth != NULL) && (stcb != NULL)) {
+		sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb, auth_keyid);
+	}
+
+	if (net) {
+		tos_value = net->dscp;
+	} else if (stcb) {
+		tos_value = stcb->asoc.default_dscp;
+	} else {
+		tos_value = inp->sctp_ep.default_dscp;
+	}
+
+	switch (to->sa_family) {
+#ifdef INET
+	case AF_INET:
+	{
+		struct ip *ip = NULL;
+		sctp_route_t iproute;
+		int len;
+
+		len = SCTP_MIN_V4_OVERHEAD;
+		if (port) {
+			len += sizeof(struct udphdr);
+		}
+		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
+		if (newm == NULL) {
+			sctp_m_freem(m);
+			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+			return (ENOMEM);
+		}
+		SCTP_ALIGN_TO_END(newm, len);
+		SCTP_BUF_LEN(newm) = len;
+		SCTP_BUF_NEXT(newm) = m;
+		m = newm;
+#if defined(__FreeBSD__)
+		if (net != NULL) {
+			m->m_pkthdr.flowid = net->flowid;
+			M_HASHTYPE_SET(m, net->flowtype);
+		} else {
+			m->m_pkthdr.flowid = mflowid;
+			M_HASHTYPE_SET(m, mflowtype);
+ 		}
+#endif
+		packet_length = sctp_calculate_len(m);
+		ip = mtod(m, struct ip *);
+		ip->ip_v = IPVERSION;
+		ip->ip_hl = (sizeof(struct ip) >> 2);
+		if (tos_value == 0) {
+			/*
+			 * This means especially, that it is not set at the
+			 * SCTP layer. So use the value from the IP layer.
+			 */
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
+			tos_value = inp->ip_inp.inp.inp_ip_tos;
+#else
+			tos_value = inp->inp_ip_tos;
+#endif
+		}
+		tos_value &= 0xfc;
+		if (ecn_ok) {
+			tos_value |= sctp_get_ect(stcb);
+		}
+                if ((nofragment_flag) && (port == 0)) {
+#if defined(__FreeBSD__)
+#if __FreeBSD_version >= 1000000
+			ip->ip_off = htons(IP_DF);
+#else
+			ip->ip_off = IP_DF;
+#endif
+#elif defined(WITH_CONVERT_IP_OFF) || defined(__APPLE__) || defined(__Userspace__)
+			ip->ip_off = IP_DF;
+#else
+			ip->ip_off = htons(IP_DF);
+#endif
+		} else {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 1000000
+			ip->ip_off = htons(0);
+#else
+			ip->ip_off = 0;
+#endif
+		}
+#if defined(__FreeBSD__)
+		/* FreeBSD has a function for ip_id's */
+		ip_fillid(ip);
+#elif defined(__APPLE__)
+#if RANDOM_IP_ID
+		ip->ip_id = ip_randomid();
+#else
+		ip->ip_id = htons(ip_id++);
+#endif
+#elif defined(__Userspace__)
+                ip->ip_id = htons(SCTP_IP_ID(inp)++);
+#else
+		ip->ip_id = SCTP_IP_ID(inp)++;
+#endif
+
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
+		ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
+#else
+		ip->ip_ttl = inp->inp_ip_ttl;
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_version >= 1000000
+		ip->ip_len = htons(packet_length);
+#else
+		ip->ip_len = packet_length;
+#endif
+		ip->ip_tos = tos_value;
+		if (port) {
+			ip->ip_p = IPPROTO_UDP;
+		} else {
+			ip->ip_p = IPPROTO_SCTP;
+		}
+		ip->ip_sum = 0;
+		if (net == NULL) {
+			ro = &iproute;
+			memset(&iproute, 0, sizeof(iproute));
+#ifdef HAVE_SA_LEN
+			memcpy(&ro->ro_dst, to, to->sa_len);
+#else
+			memcpy(&ro->ro_dst, to, sizeof(struct sockaddr_in));
+#endif
+		} else {
+			ro = (sctp_route_t *)&net->ro;
+		}
+		/* Now the address selection part */
+		ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
+
+		/* call the routine to select the src address */
+		if (net && out_of_asoc_ok == 0) {
+			if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
+				sctp_free_ifa(net->ro._s_addr);
+				net->ro._s_addr = NULL;
+				net->src_addr_selected = 0;
+				if (ro->ro_rt) {
+					RTFREE(ro->ro_rt);
+					ro->ro_rt = NULL;
+				}
+			}
+			if (net->src_addr_selected == 0) {
+				/* Cache the source address */
+				net->ro._s_addr = sctp_source_address_selection(inp,stcb,
+										ro, net, 0,
+										vrf_id);
+				net->src_addr_selected = 1;
+			}
+			if (net->ro._s_addr == NULL) {
+				/* No route to host */
+				net->src_addr_selected = 0;
+				sctp_handle_no_route(stcb, net, so_locked);
+				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+				sctp_m_freem(m);
+				return (EHOSTUNREACH);
+			}
+			ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
+		} else {
+			if (over_addr == NULL) {
+				struct sctp_ifa *_lsrc;
+
+				_lsrc = sctp_source_address_selection(inp, stcb, ro,
+				                                      net,
+				                                      out_of_asoc_ok,
+				                                      vrf_id);
+				if (_lsrc == NULL) {
+					sctp_handle_no_route(stcb, net, so_locked);
+					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+					sctp_m_freem(m);
+					return (EHOSTUNREACH);
+				}
+				ip->ip_src = _lsrc->address.sin.sin_addr;
+				sctp_free_ifa(_lsrc);
+			} else {
+				ip->ip_src = over_addr->sin.sin_addr;
+				SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
+			}
+		}
+		if (port) {
+			if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
+				sctp_handle_no_route(stcb, net, so_locked);
+				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+				sctp_m_freem(m);
+				return (EHOSTUNREACH);
+			}
+			udp = (struct udphdr *)((caddr_t)ip + sizeof(struct ip));
+			udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
+			udp->uh_dport = port;
+			udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip)));
+#if !defined(__Windows__) && !defined(__Userspace__)
+#if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
+			if (V_udp_cksum) {
+				udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
+			} else {
+				udp->uh_sum = 0;
+			}
+#else
+			udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
+#endif
+#else
+			udp->uh_sum = 0;
+#endif
+			sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
+		} else {
+			sctphdr = (struct sctphdr *)((caddr_t)ip + sizeof(struct ip));
+		}
+
+		sctphdr->src_port = src_port;
+		sctphdr->dest_port = dest_port;
+		sctphdr->v_tag = v_tag;
+		sctphdr->checksum = 0;
+
+		/*
+		 * If source address selection fails and we find no route
+		 * then the ip_output should fail as well with a
+		 * NO_ROUTE_TO_HOST type error. We probably should catch
+		 * that somewhere and abort the association right away
+		 * (assuming this is an INIT being sent).
+		 */
+		if (ro->ro_rt == NULL) {
+			/*
+			 * src addr selection failed to find a route (or
+			 * valid source addr), so we can't get there from
+			 * here (yet)!
+			 */
+			sctp_handle_no_route(stcb, net, so_locked);
+			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+			sctp_m_freem(m);
+			return (EHOSTUNREACH);
+		}
+		if (ro != &iproute) {
+			memcpy(&iproute, ro, sizeof(*ro));
+		}
+		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv4 output routine from low level src addr:%x\n",
+			(uint32_t) (ntohl(ip->ip_src.s_addr)));
+		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Destination is %x\n",
+			(uint32_t)(ntohl(ip->ip_dst.s_addr)));
+		SCTPDBG(SCTP_DEBUG_OUTPUT3, "RTP route is %p through\n",
+			(void *)ro->ro_rt);
+
+		if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
+			/* failed to prepend data, give up */
+			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+			sctp_m_freem(m);
+			return (ENOMEM);
+		}
+		SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
+		if (port) {
+#if defined(SCTP_WITH_NO_CSUM)
+			SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip) + sizeof(struct udphdr));
+			SCTP_STAT_INCR(sctps_sendswcrc);
+#endif
+#if !defined(__Windows__) && !defined(__Userspace__)
+#if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
+			if (V_udp_cksum) {
+				SCTP_ENABLE_UDP_CSUM(o_pak);
+			}
+#else
+			SCTP_ENABLE_UDP_CSUM(o_pak);
+#endif
+#endif
+		} else {
+#if defined(SCTP_WITH_NO_CSUM)
+			SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+			m->m_pkthdr.csum_flags = CSUM_SCTP;
+			m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
+			SCTP_STAT_INCR(sctps_sendhwcrc);
+#else
+			if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
+			      (stcb) && (stcb->asoc.scope.loopback_scope))) {
+				sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip));
+				SCTP_STAT_INCR(sctps_sendswcrc);
+			} else {
+				SCTP_STAT_INCR(sctps_sendnocrc);
+			}
+#endif
+#endif
+		}
+#ifdef SCTP_PACKET_LOGGING
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
+			sctp_packet_log(o_pak);
+#endif
+		/* send it out.  table id is taken from stcb */
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
+			so = SCTP_INP_SO(inp);
+			SCTP_SOCKET_UNLOCK(so, 0);
+		}
+#endif
+		SCTP_IP_OUTPUT(ret, o_pak, ro, stcb, vrf_id);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
+			atomic_add_int(&stcb->asoc.refcnt, 1);
+			SCTP_TCB_UNLOCK(stcb);
+			SCTP_SOCKET_LOCK(so, 0);
+			SCTP_TCB_LOCK(stcb);
+			atomic_subtract_int(&stcb->asoc.refcnt, 1);
+		}
+#endif
+		SCTP_STAT_INCR(sctps_sendpackets);
+		SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+		if (ret)
+			SCTP_STAT_INCR(sctps_senderrors);
+
+		SCTPDBG(SCTP_DEBUG_OUTPUT3, "IP output returns %d\n", ret);
+		if (net == NULL) {
+			/* free tempy routes */
+#if defined(__FreeBSD__) && __FreeBSD_version > 901000
+			RO_RTFREE(ro);
+#else
+			if (ro->ro_rt) {
+				RTFREE(ro->ro_rt);
+				ro->ro_rt = NULL;
+			}
+#endif
+		} else {
+			/* PMTU check versus smallest asoc MTU goes here */
+			if ((ro->ro_rt != NULL) &&
+			    (net->ro._s_addr)) {
+				uint32_t mtu;
+				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
+				if (net->port) {
+					mtu -= sizeof(struct udphdr);
+				}
+				if (mtu && (stcb->asoc.smallest_mtu > mtu)) {
+					sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
+					net->mtu = mtu;
+				}
+			} else if (ro->ro_rt == NULL) {
+				/* route was freed */
+				if (net->ro._s_addr &&
+				    net->src_addr_selected) {
+					sctp_free_ifa(net->ro._s_addr);
+					net->ro._s_addr = NULL;
+				}
+				net->src_addr_selected = 0;
+			}
+		}
+		return (ret);
+	}
+#endif
+#ifdef INET6
+	case AF_INET6:
+	{
+		uint32_t flowlabel, flowinfo;
+		struct ip6_hdr *ip6h;
+		struct route_in6 ip6route;
+#if !(defined(__Panda__) || defined(__Userspace__))
+		struct ifnet *ifp;
+#endif
+		struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
+		int prev_scope = 0;
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+		struct sockaddr_in6 lsa6_storage;
+		int error;
+#endif
+		u_short prev_port = 0;
+		int len;
+
+		if (net) {
+			flowlabel = net->flowlabel;
+		} else if (stcb) {
+			flowlabel = stcb->asoc.default_flowlabel;
+		} else {
+			flowlabel = inp->sctp_ep.default_flowlabel;
+		}
+		if (flowlabel == 0) {
+			/*
+			 * This means especially, that it is not set at the
+			 * SCTP layer. So use the value from the IP layer.
+			 */
+#if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
+			flowlabel = ntohl(inp->ip_inp.inp.inp_flow);
+#else
+			flowlabel = ntohl(((struct in6pcb *)inp)->in6p_flowinfo);
+#endif
+		}
+		flowlabel &= 0x000fffff;
+		len = SCTP_MIN_OVERHEAD;
+		if (port) {
+			len += sizeof(struct udphdr);
+		}
+		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
+		if (newm == NULL) {
+			sctp_m_freem(m);
+			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+			return (ENOMEM);
+		}
+		SCTP_ALIGN_TO_END(newm, len);
+		SCTP_BUF_LEN(newm) = len;
+		SCTP_BUF_NEXT(newm) = m;
+		m = newm;
+#if defined(__FreeBSD__)
+		if (net != NULL) {
+			m->m_pkthdr.flowid = net->flowid;
+			M_HASHTYPE_SET(m, net->flowtype);
+		} else {
+			m->m_pkthdr.flowid = mflowid;
+			M_HASHTYPE_SET(m, mflowtype);
+ 		}
+#endif
+		packet_length = sctp_calculate_len(m);
+
+		ip6h = mtod(m, struct ip6_hdr *);
+		/* protect *sin6 from overwrite */
+		sin6 = (struct sockaddr_in6 *)to;
+		tmp = *sin6;
+		sin6 = &tmp;
+
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+		/* KAME hack: embed scopeid */
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+		if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
+#else
+		if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
+#endif
+#elif defined(SCTP_KAME)
+		if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
+#else
+		if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
+#endif
+		{
+			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+			return (EINVAL);
+		}
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+		if (net == NULL) {
+			memset(&ip6route, 0, sizeof(ip6route));
+			ro = (sctp_route_t *)&ip6route;
+#ifdef HAVE_SIN6_LEN
+			memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
+#else
+			memcpy(&ro->ro_dst, sin6, sizeof(struct sockaddr_in6));
+#endif
+		} else {
+			ro = (sctp_route_t *)&net->ro;
+		}
+		/*
+		 * We assume here that inp_flow is in host byte order within
+		 * the TCB!
+		 */
+		if (tos_value == 0) {
+			/*
+			 * This means especially, that it is not set at the
+			 * SCTP layer. So use the value from the IP layer.
+			 */
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
+#if defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
+			tos_value = (ntohl(inp->ip_inp.inp.inp_flow) >> 20) & 0xff;
+#else
+			tos_value = (ntohl(((struct in6pcb *)inp)->in6p_flowinfo) >> 20) & 0xff;
+#endif
+#endif
+		}
+		tos_value &= 0xfc;
+		if (ecn_ok) {
+			tos_value |= sctp_get_ect(stcb);
+		}
+		flowinfo = 0x06;
+		flowinfo <<= 8;
+		flowinfo |= tos_value;
+		flowinfo <<= 20;
+		flowinfo |= flowlabel;
+		ip6h->ip6_flow = htonl(flowinfo);
+		if (port) {
+			ip6h->ip6_nxt = IPPROTO_UDP;
+		} else {
+			ip6h->ip6_nxt = IPPROTO_SCTP;
+		}
+		ip6h->ip6_plen = (uint16_t)(packet_length - sizeof(struct ip6_hdr));
+		ip6h->ip6_dst = sin6->sin6_addr;
+
+		/*
+		 * Add SRC address selection here: we can only reuse to a
+		 * limited degree the kame src-addr-sel, since we can try
+		 * their selection but it may not be bound.
+		 */
+		bzero(&lsa6_tmp, sizeof(lsa6_tmp));
+		lsa6_tmp.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+		lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
+#endif
+		lsa6 = &lsa6_tmp;
+		if (net && out_of_asoc_ok == 0) {
+			if (net->ro._s_addr && (net->ro._s_addr->localifa_flags & (SCTP_BEING_DELETED|SCTP_ADDR_IFA_UNUSEABLE))) {
+				sctp_free_ifa(net->ro._s_addr);
+				net->ro._s_addr = NULL;
+				net->src_addr_selected = 0;
+				if (ro->ro_rt) {
+					RTFREE(ro->ro_rt);
+					ro->ro_rt = NULL;
+				}
+			}
+			if (net->src_addr_selected == 0) {
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+				sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+				/* KAME hack: embed scopeid */
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+				if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
+#else
+				if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
+#endif
+#elif defined(SCTP_KAME)
+				if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
+#else
+				if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
+#endif
+				{
+					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+					return (EINVAL);
+				}
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+				/* Cache the source address */
+				net->ro._s_addr = sctp_source_address_selection(inp,
+										stcb,
+										ro,
+										net,
+										0,
+										vrf_id);
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+#ifdef SCTP_KAME
+				(void)sa6_recoverscope(sin6);
+#else
+				(void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
+#endif	/* SCTP_KAME */
+#endif	/* SCTP_EMBEDDED_V6_SCOPE */
+				net->src_addr_selected = 1;
+			}
+			if (net->ro._s_addr == NULL) {
+				SCTPDBG(SCTP_DEBUG_OUTPUT3, "V6:No route to host\n");
+				net->src_addr_selected = 0;
+				sctp_handle_no_route(stcb, net, so_locked);
+				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+				sctp_m_freem(m);
+				return (EHOSTUNREACH);
+			}
+			lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
+		} else {
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+			sin6 = (struct sockaddr_in6 *)&ro->ro_dst;
+			/* KAME hack: embed scopeid */
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+			if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
+#else
+			if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL) != 0)
+#endif
+#elif defined(SCTP_KAME)
+			if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0)
+#else
+			if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
+#endif
+			  {
+				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+				return (EINVAL);
+			  }
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+			if (over_addr == NULL) {
+				struct sctp_ifa *_lsrc;
+
+				_lsrc = sctp_source_address_selection(inp, stcb, ro,
+				                                      net,
+				                                      out_of_asoc_ok,
+				                                      vrf_id);
+				if (_lsrc == NULL) {
+					sctp_handle_no_route(stcb, net, so_locked);
+					SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+					sctp_m_freem(m);
+					return (EHOSTUNREACH);
+				}
+				lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
+				sctp_free_ifa(_lsrc);
+			} else {
+				lsa6->sin6_addr = over_addr->sin6.sin6_addr;
+				SCTP_RTALLOC(ro, vrf_id, inp->fibnum);
+			}
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+#ifdef SCTP_KAME
+			(void)sa6_recoverscope(sin6);
+#else
+			(void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
+#endif	/* SCTP_KAME */
+#endif	/* SCTP_EMBEDDED_V6_SCOPE */
+		}
+		lsa6->sin6_port = inp->sctp_lport;
+
+		if (ro->ro_rt == NULL) {
+			/*
+			 * src addr selection failed to find a route (or
+			 * valid source addr), so we can't get there from
+			 * here!
+			 */
+			sctp_handle_no_route(stcb, net, so_locked);
+			SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+			sctp_m_freem(m);
+			return (EHOSTUNREACH);
+		}
+#ifndef SCOPEDROUTING
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+		/*
+		 * XXX: sa6 may not have a valid sin6_scope_id in the
+		 * non-SCOPEDROUTING case.
+		 */
+		bzero(&lsa6_storage, sizeof(lsa6_storage));
+		lsa6_storage.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+		lsa6_storage.sin6_len = sizeof(lsa6_storage);
+#endif
+#ifdef SCTP_KAME
+		lsa6_storage.sin6_addr = lsa6->sin6_addr;
+		if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
+#else
+		if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr,
+		    NULL)) != 0) {
+#endif				/* SCTP_KAME */
+			SCTPDBG(SCTP_DEBUG_OUTPUT3, "recover scope fails error %d\n", error);
+			sctp_m_freem(m);
+			return (error);
+		}
+		/* XXX */
+		lsa6_storage.sin6_addr = lsa6->sin6_addr;
+		lsa6_storage.sin6_port = inp->sctp_lport;
+		lsa6 = &lsa6_storage;
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+#endif /* SCOPEDROUTING */
+		ip6h->ip6_src = lsa6->sin6_addr;
+
+		if (port) {
+			if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
+				sctp_handle_no_route(stcb, net, so_locked);
+				SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EHOSTUNREACH);
+				sctp_m_freem(m);
+				return (EHOSTUNREACH);
+			}
+			udp = (struct udphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
+			udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
+			udp->uh_dport = port;
+			udp->uh_ulen = htons((uint16_t)(packet_length - sizeof(struct ip6_hdr)));
+			udp->uh_sum = 0;
+			sctphdr = (struct sctphdr *)((caddr_t)udp + sizeof(struct udphdr));
+		} else {
+			sctphdr = (struct sctphdr *)((caddr_t)ip6h + sizeof(struct ip6_hdr));
+		}
+
+		sctphdr->src_port = src_port;
+		sctphdr->dest_port = dest_port;
+		sctphdr->v_tag = v_tag;
+		sctphdr->checksum = 0;
+
+		/*
+		 * We set the hop limit now since there is a good chance
+		 * that our ro pointer is now filled
+		 */
+		ip6h->ip6_hlim = SCTP_GET_HLIM(inp, ro);
+#if !(defined(__Panda__) || defined(__Userspace__))
+		ifp = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
+#endif
+
+#ifdef SCTP_DEBUG
+		/* Copy to be sure something bad is not happening */
+		sin6->sin6_addr = ip6h->ip6_dst;
+		lsa6->sin6_addr = ip6h->ip6_src;
+#endif
+
+		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Calling ipv6 output routine from low level\n");
+		SCTPDBG(SCTP_DEBUG_OUTPUT3, "src: ");
+		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)lsa6);
+		SCTPDBG(SCTP_DEBUG_OUTPUT3, "dst: ");
+		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT3, (struct sockaddr *)sin6);
+		if (net) {
+			sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+			/* preserve the port and scope for link local send */
+			prev_scope = sin6->sin6_scope_id;
+			prev_port = sin6->sin6_port;
+		}
+
+		if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
+			/* failed to prepend data, give up */
+			sctp_m_freem(m);
+			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+			return (ENOMEM);
+		}
+		SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
+		if (port) {
+#if defined(SCTP_WITH_NO_CSUM)
+			SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
+			SCTP_STAT_INCR(sctps_sendswcrc);
+#endif
+#if defined(__Windows__)
+			udp->uh_sum = 0;
+#elif !defined(__Userspace__)
+			if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), packet_length - sizeof(struct ip6_hdr))) == 0) {
+				udp->uh_sum = 0xffff;
+			}
+#endif
+		} else {
+#if defined(SCTP_WITH_NO_CSUM)
+			SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+#if __FreeBSD_version < 900000
+			sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
+			SCTP_STAT_INCR(sctps_sendswcrc);
+#else
+#if __FreeBSD_version > 901000
+			m->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
+#else
+			m->m_pkthdr.csum_flags = CSUM_SCTP;
+#endif
+			m->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
+			SCTP_STAT_INCR(sctps_sendhwcrc);
+#endif
+#else
+			if (!(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) &&
+			      (stcb) && (stcb->asoc.scope.loopback_scope))) {
+				sctphdr->checksum = sctp_calculate_cksum(m, sizeof(struct ip6_hdr));
+				SCTP_STAT_INCR(sctps_sendswcrc);
+			} else {
+				SCTP_STAT_INCR(sctps_sendnocrc);
+			}
+#endif
+#endif
+		}
+		/* send it out. table id is taken from stcb */
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
+			so = SCTP_INP_SO(inp);
+			SCTP_SOCKET_UNLOCK(so, 0);
+		}
+#endif
+#ifdef SCTP_PACKET_LOGGING
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING)
+			sctp_packet_log(o_pak);
+#endif
+#if !(defined(__Panda__) || defined(__Userspace__))
+		SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, &ifp, stcb, vrf_id);
+#else
+		SCTP_IP6_OUTPUT(ret, o_pak, (struct route_in6 *)ro, NULL, stcb, vrf_id);
+#endif
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		if ((SCTP_BASE_SYSCTL(sctp_output_unlocked)) && (so_locked)) {
+			atomic_add_int(&stcb->asoc.refcnt, 1);
+			SCTP_TCB_UNLOCK(stcb);
+			SCTP_SOCKET_LOCK(so, 0);
+			SCTP_TCB_LOCK(stcb);
+			atomic_subtract_int(&stcb->asoc.refcnt, 1);
+		}
+#endif
+		if (net) {
+			/* for link local this must be done */
+			sin6->sin6_scope_id = prev_scope;
+			sin6->sin6_port = prev_port;
+		}
+		SCTPDBG(SCTP_DEBUG_OUTPUT3, "return from send is %d\n", ret);
+		SCTP_STAT_INCR(sctps_sendpackets);
+		SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+		if (ret) {
+			SCTP_STAT_INCR(sctps_senderrors);
+		}
+		if (net == NULL) {
+			/* Now if we had a temp route free it */
+#if defined(__FreeBSD__) && __FreeBSD_version > 901000
+			RO_RTFREE(ro);
+#else
+			if (ro->ro_rt) {
+				RTFREE(ro->ro_rt);
+				ro->ro_rt = NULL;
+			}
+#endif
+		} else {
+			/* PMTU check versus smallest asoc MTU goes here */
+			if (ro->ro_rt == NULL) {
+				/* Route was freed */
+				if (net->ro._s_addr &&
+				    net->src_addr_selected) {
+					sctp_free_ifa(net->ro._s_addr);
+					net->ro._s_addr = NULL;
+				}
+				net->src_addr_selected = 0;
+			}
+			if ((ro->ro_rt != NULL) &&
+			    (net->ro._s_addr)) {
+				uint32_t mtu;
+				mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, ro->ro_rt);
+				if (mtu &&
+				    (stcb->asoc.smallest_mtu > mtu)) {
+					sctp_mtu_size_reset(inp, &stcb->asoc, mtu);
+					net->mtu = mtu;
+					if (net->port) {
+						net->mtu -= sizeof(struct udphdr);
+					}
+				}
+			}
+#if !defined(__Panda__) && !defined(__Userspace__)
+			else if (ifp) {
+#if defined(__Windows__)
+#define ND_IFINFO(ifp)	(ifp)
+#define linkmtu		if_mtu
+#endif
+				if (ND_IFINFO(ifp)->linkmtu &&
+				    (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
+					sctp_mtu_size_reset(inp,
+					    &stcb->asoc,
+					    ND_IFINFO(ifp)->linkmtu);
+				}
+			}
+#endif
+		}
+		return (ret);
+	}
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+	{
+		char *buffer;
+		struct sockaddr_conn *sconn;
+		int len;
+
+		sconn = (struct sockaddr_conn *)to;
+		len = sizeof(struct sctphdr);
+		newm = sctp_get_mbuf_for_msg(len, 1, M_NOWAIT, 1, MT_DATA);
+		if (newm == NULL) {
+			sctp_m_freem(m);
+			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+			return (ENOMEM);
+		}
+		SCTP_ALIGN_TO_END(newm, len);
+		SCTP_BUF_LEN(newm) = len;
+		SCTP_BUF_NEXT(newm) = m;
+		m = newm;
+		packet_length = sctp_calculate_len(m);
+		sctphdr = mtod(m, struct sctphdr *);
+		sctphdr->src_port = src_port;
+		sctphdr->dest_port = dest_port;
+		sctphdr->v_tag = v_tag;
+		sctphdr->checksum = 0;
+#if defined(SCTP_WITH_NO_CSUM)
+		SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+		if (SCTP_BASE_VAR(crc32c_offloaded) == 0) {
+			sctphdr->checksum = sctp_calculate_cksum(m, 0);
+			SCTP_STAT_INCR(sctps_sendswcrc);
+		} else {
+			SCTP_STAT_INCR(sctps_sendhwcrc);
+		}
+#endif
+		if (tos_value == 0) {
+			tos_value = inp->ip_inp.inp.inp_ip_tos;
+		}
+		tos_value &= 0xfc;
+		if (ecn_ok) {
+			tos_value |= sctp_get_ect(stcb);
+		}
+		/* Don't alloc/free for each packet */
+		if ((buffer = malloc(packet_length)) != NULL) {
+			m_copydata(m, 0, packet_length, buffer);
+			ret = SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, packet_length, tos_value, nofragment_flag);
+			free(buffer);
+		} else {
+			ret = ENOMEM;
+		}
+		sctp_m_freem(m);
+		return (ret);
+	}
+#endif
+	default:
+		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
+		        ((struct sockaddr *)to)->sa_family);
+		sctp_m_freem(m);
+		SCTP_LTRACE_ERR_RET_PKT(m, inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
+		return (EFAULT);
+	}
+}
+
+
+void
+sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+    )
+{
+	struct mbuf *m, *m_last;
+	struct sctp_nets *net;
+	struct sctp_init_chunk *init;
+	struct sctp_supported_addr_param *sup_addr;
+	struct sctp_adaptation_layer_indication *ali;
+	struct sctp_supported_chunk_types_param *pr_supported;
+	struct sctp_paramhdr *ph;
+	int cnt_inits_to = 0;
+	int error;
+	uint16_t num_ext, chunk_len, padding_len, parameter_len;
+
+#if defined(__APPLE__)
+	if (so_locked) {
+		sctp_lock_assert(SCTP_INP_SO(inp));
+	} else {
+		sctp_unlock_assert(SCTP_INP_SO(inp));
+	}
+#endif
+	/* INIT's always go to the primary (and usually ONLY address) */
+	net = stcb->asoc.primary_destination;
+	if (net == NULL) {
+		net = TAILQ_FIRST(&stcb->asoc.nets);
+		if (net == NULL) {
+			/* TSNH */
+			return;
+		}
+		/* we confirm any address we send an INIT to */
+		net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
+		(void)sctp_set_primary_addr(stcb, NULL, net);
+	} else {
+		/* we confirm any address we send an INIT to */
+		net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
+	}
+	SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT\n");
+#ifdef INET6
+	if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+		/*
+		 * special hook, if we are sending to link local it will not
+		 * show up in our private address count.
+		 */
+		if (IN6_IS_ADDR_LINKLOCAL(&net->ro._l_addr.sin6.sin6_addr))
+			cnt_inits_to = 1;
+	}
+#endif
+	if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+		/* This case should not happen */
+		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - failed timer?\n");
+		return;
+	}
+	/* start the INIT timer */
+	sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
+
+	m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_NOWAIT, 1, MT_DATA);
+	if (m == NULL) {
+		/* No memory, INIT timer will re-attempt. */
+		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - mbuf?\n");
+		return;
+	}
+	chunk_len = (uint16_t)sizeof(struct sctp_init_chunk);
+	padding_len = 0;
+	/* Now lets put the chunk header in place */
+	init = mtod(m, struct sctp_init_chunk *);
+	/* now the chunk header */
+	init->ch.chunk_type = SCTP_INITIATION;
+	init->ch.chunk_flags = 0;
+	/* fill in later from mbuf we build */
+	init->ch.chunk_length = 0;
+	/* place in my tag */
+	init->init.initiate_tag = htonl(stcb->asoc.my_vtag);
+	/* set up some of the credits. */
+	init->init.a_rwnd = htonl(max(inp->sctp_socket?SCTP_SB_LIMIT_RCV(inp->sctp_socket):0,
+	                              SCTP_MINIMAL_RWND));
+	init->init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
+	init->init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
+	init->init.initial_tsn = htonl(stcb->asoc.init_seq_number);
+
+	/* Adaptation layer indication parameter */
+	if (inp->sctp_ep.adaptation_layer_indicator_provided) {
+		parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
+		ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
+		ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
+		ali->ph.param_length = htons(parameter_len);
+		ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
+		chunk_len += parameter_len;
+	}
+
+	/* ECN parameter */
+	if (stcb->asoc.ecn_supported == 1) {
+		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
+		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
+		ph->param_type = htons(SCTP_ECN_CAPABLE);
+		ph->param_length = htons(parameter_len);
+		chunk_len += parameter_len;
+	}
+
+	/* PR-SCTP supported parameter */
+	if (stcb->asoc.prsctp_supported == 1) {
+		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
+		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
+		ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
+		ph->param_length = htons(parameter_len);
+		chunk_len += parameter_len;
+	}
+
+	/* Add NAT friendly parameter. */
+	if (SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly)) {
+		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
+		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
+		ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
+		ph->param_length = htons(parameter_len);
+		chunk_len += parameter_len;
+	}
+
+	/* And now tell the peer which extensions we support */
+	num_ext = 0;
+	pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
+	if (stcb->asoc.prsctp_supported == 1) {
+		pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
+		if (stcb->asoc.idata_supported) {
+			pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
+		}
+	}
+	if (stcb->asoc.auth_supported == 1) {
+		pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
+	}
+	if (stcb->asoc.asconf_supported == 1) {
+		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
+		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
+	}
+	if (stcb->asoc.reconfig_supported == 1) {
+		pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
+	}
+	if (stcb->asoc.idata_supported) {
+		pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
+	}
+	if (stcb->asoc.nrsack_supported == 1) {
+		pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
+	}
+	if (stcb->asoc.pktdrop_supported == 1) {
+		pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
+	}
+	if (num_ext > 0) {
+		parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
+		pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
+		pr_supported->ph.param_length = htons(parameter_len);
+		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+		chunk_len += parameter_len;
+	}
+	/* add authentication parameters */
+	if (stcb->asoc.auth_supported) {
+		/* attach RANDOM parameter, if available */
+		if (stcb->asoc.authinfo.random != NULL) {
+			struct sctp_auth_random *randp;
+
+			if (padding_len > 0) {
+				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+				chunk_len += padding_len;
+				padding_len = 0;
+			}
+			randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
+			parameter_len = (uint16_t)sizeof(struct sctp_auth_random) + stcb->asoc.authinfo.random_len;
+			/* random key already contains the header */
+			memcpy(randp, stcb->asoc.authinfo.random->key, parameter_len);
+			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+			chunk_len += parameter_len;
+		}
+		/* add HMAC_ALGO parameter */
+		if (stcb->asoc.local_hmacs != NULL) {
+			struct sctp_auth_hmac_algo *hmacs;
+
+			if (padding_len > 0) {
+				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+				chunk_len += padding_len;
+				padding_len = 0;
+			}
+			hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
+			parameter_len = (uint16_t)(sizeof(struct sctp_auth_hmac_algo) +
+			                           stcb->asoc.local_hmacs->num_algo * sizeof(uint16_t));
+			hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
+			hmacs->ph.param_length = htons(parameter_len);
+			sctp_serialize_hmaclist(stcb->asoc.local_hmacs, (uint8_t *)hmacs->hmac_ids);
+			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+			chunk_len += parameter_len;
+		}
+		/* add CHUNKS parameter */
+		if (stcb->asoc.local_auth_chunks != NULL) {
+			struct sctp_auth_chunk_list *chunks;
+
+			if (padding_len > 0) {
+				memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+				chunk_len += padding_len;
+				padding_len = 0;
+			}
+			chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
+			parameter_len = (uint16_t)(sizeof(struct sctp_auth_chunk_list) +
+			                           sctp_auth_get_chklist_size(stcb->asoc.local_auth_chunks));
+			chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
+			chunks->ph.param_length = htons(parameter_len);
+			sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks, chunks->chunk_types);
+			padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+			chunk_len += parameter_len;
+		}
+	}
+
+	/* now any cookie time extensions */
+	if (stcb->asoc.cookie_preserve_req) {
+		struct sctp_cookie_perserve_param *cookie_preserve;
+
+		if (padding_len > 0) {
+			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+			chunk_len += padding_len;
+			padding_len = 0;
+		}
+		parameter_len = (uint16_t)sizeof(struct sctp_cookie_perserve_param);
+		cookie_preserve = (struct sctp_cookie_perserve_param *)(mtod(m, caddr_t) + chunk_len);
+		cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
+		cookie_preserve->ph.param_length = htons(parameter_len);
+		cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
+		stcb->asoc.cookie_preserve_req = 0;
+		chunk_len += parameter_len;
+	}
+
+	if (stcb->asoc.scope.ipv4_addr_legal || stcb->asoc.scope.ipv6_addr_legal) {
+		uint8_t i;
+
+		if (padding_len > 0) {
+			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+			chunk_len += padding_len;
+			padding_len = 0;
+		}
+		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
+		if (stcb->asoc.scope.ipv4_addr_legal) {
+			parameter_len += (uint16_t)sizeof(uint16_t);
+		}
+		if (stcb->asoc.scope.ipv6_addr_legal) {
+			parameter_len += (uint16_t)sizeof(uint16_t);
+		}
+		sup_addr = (struct sctp_supported_addr_param *)(mtod(m, caddr_t) + chunk_len);
+		sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
+		sup_addr->ph.param_length = htons(parameter_len);
+		i = 0;
+		if (stcb->asoc.scope.ipv4_addr_legal) {
+			sup_addr->addr_type[i++] = htons(SCTP_IPV4_ADDRESS);
+		}
+		if (stcb->asoc.scope.ipv6_addr_legal) {
+			sup_addr->addr_type[i++] = htons(SCTP_IPV6_ADDRESS);
+		}
+		padding_len = 4 - 2 * i;
+		chunk_len += parameter_len;
+	}
+
+	SCTP_BUF_LEN(m) = chunk_len;
+	/* now the addresses */
+	/* To optimize this we could put the scoping stuff
+	 * into a structure and remove the individual uint8's from
+	 * the assoc structure. Then we could just sifa in the
+	 * address within the stcb. But for now this is a quick
+	 * hack to get the address stuff teased apart.
+	 */
+	m_last = sctp_add_addresses_to_i_ia(inp, stcb, &stcb->asoc.scope,
+	                                    m, cnt_inits_to,
+	                                    &padding_len, &chunk_len);
+
+	init->ch.chunk_length = htons(chunk_len);
+	if (padding_len > 0) {
+		if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
+			sctp_m_freem(m);
+			return;
+		}
+	}
+	SCTPDBG(SCTP_DEBUG_OUTPUT4, "Sending INIT - calls lowlevel_output\n");
+	if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
+	                                        (struct sockaddr *)&net->ro._l_addr,
+	                                        m, 0, NULL, 0, 0, 0, 0,
+	                                        inp->sctp_lport, stcb->rport, htonl(0),
+	                                        net->port, NULL,
+#if defined(__FreeBSD__)
+	                                        0, 0,
+#endif
+	                                        so_locked))) {
+		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
+		if (error == ENOBUFS) {
+			stcb->asoc.ifp_had_enobuf = 1;
+			SCTP_STAT_INCR(sctps_lowlevelerr);
+		}
+	} else {
+		stcb->asoc.ifp_had_enobuf = 0;
+	}
+	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+	(void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
+}
+
+struct mbuf *
+sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
+	int param_offset, int *abort_processing, struct sctp_chunkhdr *cp, int *nat_friendly)
+{
+	/*
+	 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
+	 * being equal to the beginning of the params i.e. (iphlen +
+	 * sizeof(struct sctp_init_msg) parse through the parameters to the
+	 * end of the mbuf verifying that all parameters are known.
+	 *
+	 * For unknown parameters build and return a mbuf with
+	 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
+	 * processing this chunk stop, and set *abort_processing to 1.
+	 *
+	 * By having param_offset be pre-set to where parameters begin it is
+	 * hoped that this routine may be reused in the future by new
+	 * features.
+	 */
+	struct sctp_paramhdr *phdr, params;
+
+	struct mbuf *mat, *op_err;
+	char tempbuf[SCTP_PARAM_BUFFER_SIZE];
+	int at, limit, pad_needed;
+	uint16_t ptype, plen, padded_size;
+	int err_at;
+
+	*abort_processing = 0;
+	mat = in_initpkt;
+	err_at = 0;
+	limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
+	at = param_offset;
+	op_err = NULL;
+	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Check for unrecognized param's\n");
+	phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
+	while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
+		ptype = ntohs(phdr->param_type);
+		plen = ntohs(phdr->param_length);
+		if ((plen > limit) || (plen < sizeof(struct sctp_paramhdr))) {
+			/* wacked parameter */
+			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error %d\n", plen);
+			goto invalid_size;
+		}
+		limit -= SCTP_SIZE32(plen);
+		/*-
+		 * All parameters for all chunks that we know/understand are
+		 * listed here. We process them other places and make
+		 * appropriate stop actions per the upper bits. However this
+		 * is the generic routine processor's can call to get back
+		 * an operr.. to either incorporate (init-ack) or send.
+		 */
+		padded_size = SCTP_SIZE32(plen);
+		switch (ptype) {
+			/* Param's with variable size */
+		case SCTP_HEARTBEAT_INFO:
+		case SCTP_STATE_COOKIE:
+		case SCTP_UNRECOG_PARAM:
+		case SCTP_ERROR_CAUSE_IND:
+			/* ok skip fwd */
+			at += padded_size;
+			break;
+			/* Param's with variable size within a range */
+		case SCTP_CHUNK_LIST:
+		case SCTP_SUPPORTED_CHUNK_EXT:
+			if (padded_size > (sizeof(struct sctp_supported_chunk_types_param) + (sizeof(uint8_t) * SCTP_MAX_SUPPORTED_EXT))) {
+				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error chklist %d\n", plen);
+				goto invalid_size;
+			}
+			at += padded_size;
+			break;
+		case SCTP_SUPPORTED_ADDRTYPE:
+			if (padded_size > SCTP_MAX_ADDR_PARAMS_SIZE) {
+				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error supaddrtype %d\n", plen);
+				goto invalid_size;
+			}
+			at += padded_size;
+			break;
+		case SCTP_RANDOM:
+			if (padded_size > (sizeof(struct sctp_auth_random) + SCTP_RANDOM_MAX_SIZE)) {
+				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error random %d\n", plen);
+				goto invalid_size;
+			}
+			at += padded_size;
+			break;
+		case SCTP_SET_PRIM_ADDR:
+		case SCTP_DEL_IP_ADDRESS:
+		case SCTP_ADD_IP_ADDRESS:
+			if ((padded_size != sizeof(struct sctp_asconf_addrv4_param)) &&
+			    (padded_size != sizeof(struct sctp_asconf_addr_param))) {
+				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error setprim %d\n", plen);
+				goto invalid_size;
+			}
+			at += padded_size;
+			break;
+			/* Param's with a fixed size */
+		case SCTP_IPV4_ADDRESS:
+			if (padded_size != sizeof(struct sctp_ipv4addr_param)) {
+				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv4 addr %d\n", plen);
+				goto invalid_size;
+			}
+			at += padded_size;
+			break;
+		case SCTP_IPV6_ADDRESS:
+			if (padded_size != sizeof(struct sctp_ipv6addr_param)) {
+				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ipv6 addr %d\n", plen);
+				goto invalid_size;
+			}
+			at += padded_size;
+			break;
+		case SCTP_COOKIE_PRESERVE:
+			if (padded_size != sizeof(struct sctp_cookie_perserve_param)) {
+				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error cookie-preserve %d\n", plen);
+				goto invalid_size;
+			}
+			at += padded_size;
+			break;
+		case SCTP_HAS_NAT_SUPPORT:
+			*nat_friendly = 1;
+			/* fall through */
+		case SCTP_PRSCTP_SUPPORTED:
+			if (padded_size != sizeof(struct sctp_paramhdr)) {
+				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error prsctp/nat support %d\n", plen);
+				goto invalid_size;
+			}
+			at += padded_size;
+			break;
+		case SCTP_ECN_CAPABLE:
+			if (padded_size != sizeof(struct sctp_paramhdr)) {
+				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error ecn %d\n", plen);
+				goto invalid_size;
+			}
+			at += padded_size;
+			break;
+		case SCTP_ULP_ADAPTATION:
+			if (padded_size != sizeof(struct sctp_adaptation_layer_indication)) {
+				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error adapatation %d\n", plen);
+				goto invalid_size;
+			}
+			at += padded_size;
+			break;
+		case SCTP_SUCCESS_REPORT:
+			if (padded_size != sizeof(struct sctp_asconf_paramhdr)) {
+				SCTPDBG(SCTP_DEBUG_OUTPUT1, "Invalid size - error success %d\n", plen);
+				goto invalid_size;
+			}
+			at += padded_size;
+			break;
+		case SCTP_HOSTNAME_ADDRESS:
+		{
+			/* We can NOT handle HOST NAME addresses!! */
+			int l_len;
+
+			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Can't handle hostname addresses.. abort processing\n");
+			*abort_processing = 1;
+			if (op_err == NULL) {
+				/* Ok need to try to get a mbuf */
+#ifdef INET6
+				l_len = SCTP_MIN_OVERHEAD;
+#else
+				l_len = SCTP_MIN_V4_OVERHEAD;
+#endif
+				l_len += sizeof(struct sctp_chunkhdr);
+				l_len += plen;
+				l_len += sizeof(struct sctp_paramhdr);
+				op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
+				if (op_err) {
+					SCTP_BUF_LEN(op_err) = 0;
+					/*
+					 * pre-reserve space for ip and sctp
+					 * header  and chunk hdr
+					 */
+#ifdef INET6
+					SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
+#else
+					SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
+#endif
+					SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
+					SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
+				}
+			}
+			if (op_err) {
+				/* If we have space */
+				struct sctp_paramhdr s;
+
+				if (err_at % 4) {
+					uint32_t cpthis = 0;
+
+					pad_needed = 4 - (err_at % 4);
+					m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
+					err_at += pad_needed;
+				}
+				s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
+				s.param_length = htons(sizeof(s) + plen);
+				m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
+				err_at += sizeof(s);
+				phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen));
+				if (phdr == NULL) {
+					sctp_m_freem(op_err);
+					/*
+					 * we are out of memory but we still
+					 * need to have a look at what to do
+					 * (the system is in trouble
+					 * though).
+					 */
+					return (NULL);
+				}
+				m_copyback(op_err, err_at, plen, (caddr_t)phdr);
+			}
+			return (op_err);
+			break;
+		}
+		default:
+			/*
+			 * we do not recognize the parameter figure out what
+			 * we do.
+			 */
+			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Hit default param %x\n", ptype);
+			if ((ptype & 0x4000) == 0x4000) {
+				/* Report bit is set?? */
+				SCTPDBG(SCTP_DEBUG_OUTPUT1, "report op err\n");
+				if (op_err == NULL) {
+					int l_len;
+					/* Ok need to try to get an mbuf */
+#ifdef INET6
+					l_len = SCTP_MIN_OVERHEAD;
+#else
+					l_len = SCTP_MIN_V4_OVERHEAD;
+#endif
+					l_len += sizeof(struct sctp_chunkhdr);
+					l_len += plen;
+					l_len += sizeof(struct sctp_paramhdr);
+					op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
+					if (op_err) {
+						SCTP_BUF_LEN(op_err) = 0;
+#ifdef INET6
+						SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
+#else
+						SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
+#endif
+						SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
+						SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
+					}
+				}
+				if (op_err) {
+					/* If we have space */
+					struct sctp_paramhdr s;
+
+					if (err_at % 4) {
+						uint32_t cpthis = 0;
+
+						pad_needed = 4 - (err_at % 4);
+						m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
+						err_at += pad_needed;
+					}
+					s.param_type = htons(SCTP_UNRECOG_PARAM);
+					s.param_length = htons(sizeof(s) + plen);
+					m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
+					err_at += sizeof(s);
+					if (plen > sizeof(tempbuf)) {
+						plen = sizeof(tempbuf);
+					}
+					phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, min(sizeof(tempbuf),plen));
+					if (phdr == NULL) {
+						sctp_m_freem(op_err);
+						/*
+						 * we are out of memory but
+						 * we still need to have a
+						 * look at what to do (the
+						 * system is in trouble
+						 * though).
+						 */
+						op_err = NULL;
+						goto more_processing;
+					}
+					m_copyback(op_err, err_at, plen, (caddr_t)phdr);
+					err_at += plen;
+				}
+			}
+		more_processing:
+			if ((ptype & 0x8000) == 0x0000) {
+				SCTPDBG(SCTP_DEBUG_OUTPUT1, "stop proc\n");
+				return (op_err);
+			} else {
+				/* skip this chunk and continue processing */
+				SCTPDBG(SCTP_DEBUG_OUTPUT1, "move on\n");
+				at += SCTP_SIZE32(plen);
+			}
+			break;
+
+		}
+		phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
+	}
+	return (op_err);
+ invalid_size:
+	SCTPDBG(SCTP_DEBUG_OUTPUT1, "abort flag set\n");
+	*abort_processing = 1;
+	if ((op_err == NULL) && phdr) {
+		int l_len;
+#ifdef INET6
+		l_len = SCTP_MIN_OVERHEAD;
+#else
+		l_len = SCTP_MIN_V4_OVERHEAD;
+#endif
+		l_len += sizeof(struct sctp_chunkhdr);
+		l_len += (2 * sizeof(struct sctp_paramhdr));
+		op_err = sctp_get_mbuf_for_msg(l_len, 0, M_NOWAIT, 1, MT_DATA);
+		if (op_err) {
+			SCTP_BUF_LEN(op_err) = 0;
+#ifdef INET6
+			SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
+#else
+			SCTP_BUF_RESV_UF(op_err, sizeof(struct ip));
+#endif
+			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
+			SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
+		}
+	}
+	if ((op_err) && phdr) {
+		struct sctp_paramhdr s;
+
+		if (err_at % 4) {
+			uint32_t cpthis = 0;
+
+			pad_needed = 4 - (err_at % 4);
+			m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
+			err_at += pad_needed;
+		}
+		s.param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
+		s.param_length = htons(sizeof(s) + sizeof(struct sctp_paramhdr));
+		m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
+		err_at += sizeof(s);
+		/* Only copy back the p-hdr that caused the issue */
+		m_copyback(op_err, err_at, sizeof(struct sctp_paramhdr), (caddr_t)phdr);
+	}
+	return (op_err);
+}
+
+static int
+sctp_are_there_new_addresses(struct sctp_association *asoc,
+    struct mbuf *in_initpkt, int offset, struct sockaddr *src)
+{
+	/*
+	 * Given a INIT packet, look through the packet to verify that there
+	 * are NO new addresses. As we go through the parameters add reports
+	 * of any un-understood parameters that require an error.  Also we
+	 * must return (1) to drop the packet if we see a un-understood
+	 * parameter that tells us to drop the chunk.
+	 */
+	struct sockaddr *sa_touse;
+	struct sockaddr *sa;
+	struct sctp_paramhdr *phdr, params;
+	uint16_t ptype, plen;
+	uint8_t fnd;
+	struct sctp_nets *net;
+	int check_src;
+#ifdef INET
+	struct sockaddr_in sin4, *sa4;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 sin6, *sa6;
+#endif
+#if defined(__Userspace__)
+	struct sockaddr_conn *sac;
+#endif
+
+#ifdef INET
+	memset(&sin4, 0, sizeof(sin4));
+	sin4.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+	sin4.sin_len = sizeof(sin4);
+#endif
+#endif
+#ifdef INET6
+	memset(&sin6, 0, sizeof(sin6));
+	sin6.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+	sin6.sin6_len = sizeof(sin6);
+#endif
+#endif
+	/* First what about the src address of the pkt ? */
+	check_src = 0;
+	switch (src->sa_family) {
+#ifdef INET
+	case AF_INET:
+		if (asoc->scope.ipv4_addr_legal) {
+			check_src = 1;
+		}
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		if (asoc->scope.ipv6_addr_legal) {
+			check_src = 1;
+		}
+		break;
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+		if (asoc->scope.conn_addr_legal) {
+			check_src = 1;
+		}
+		break;
+#endif
+	default:
+		/* TSNH */
+		break;
+	}
+	if (check_src) {
+		fnd = 0;
+		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+			sa = (struct sockaddr *)&net->ro._l_addr;
+			if (sa->sa_family == src->sa_family) {
+#ifdef INET
+				if (sa->sa_family == AF_INET) {
+					struct sockaddr_in *src4;
+
+					sa4 = (struct sockaddr_in *)sa;
+					src4 = (struct sockaddr_in *)src;
+					if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) {
+						fnd = 1;
+						break;
+					}
+				}
+#endif
+#ifdef INET6
+				if (sa->sa_family == AF_INET6) {
+					struct sockaddr_in6 *src6;
+
+					sa6 = (struct sockaddr_in6 *)sa;
+					src6 = (struct sockaddr_in6 *)src;
+					if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) {
+						fnd = 1;
+						break;
+					}
+				}
+#endif
+#if defined(__Userspace__)
+				if (sa->sa_family == AF_CONN) {
+					struct sockaddr_conn *srcc;
+
+					sac = (struct sockaddr_conn *)sa;
+					srcc = (struct sockaddr_conn *)src;
+					if (sac->sconn_addr == srcc->sconn_addr) {
+						fnd = 1;
+						break;
+					}
+				}
+#endif
+			}
+		}
+		if (fnd == 0) {
+			/* New address added! no need to look further. */
+			return (1);
+		}
+	}
+	/* Ok so far lets munge through the rest of the packet */
+	offset += sizeof(struct sctp_init_chunk);
+	phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
+	while (phdr) {
+		sa_touse = NULL;
+		ptype = ntohs(phdr->param_type);
+		plen = ntohs(phdr->param_length);
+		switch (ptype) {
+#ifdef INET
+		case SCTP_IPV4_ADDRESS:
+		{
+			struct sctp_ipv4addr_param *p4, p4_buf;
+
+			phdr = sctp_get_next_param(in_initpkt, offset,
+			    (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
+			if (plen != sizeof(struct sctp_ipv4addr_param) ||
+			    phdr == NULL) {
+				return (1);
+			}
+			if (asoc->scope.ipv4_addr_legal) {
+				p4 = (struct sctp_ipv4addr_param *)phdr;
+				sin4.sin_addr.s_addr = p4->addr;
+				sa_touse = (struct sockaddr *)&sin4;
+			}
+			break;
+		}
+#endif
+#ifdef INET6
+		case SCTP_IPV6_ADDRESS:
+		{
+			struct sctp_ipv6addr_param *p6, p6_buf;
+
+			phdr = sctp_get_next_param(in_initpkt, offset,
+			    (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
+			if (plen != sizeof(struct sctp_ipv6addr_param) ||
+			    phdr == NULL) {
+				return (1);
+			}
+			if (asoc->scope.ipv6_addr_legal) {
+				p6 = (struct sctp_ipv6addr_param *)phdr;
+				memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
+				       sizeof(p6->addr));
+				sa_touse = (struct sockaddr *)&sin6;
+			}
+			break;
+		}
+#endif
+		default:
+			sa_touse = NULL;
+			break;
+		}
+		if (sa_touse) {
+			/* ok, sa_touse points to one to check */
+			fnd = 0;
+			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+				sa = (struct sockaddr *)&net->ro._l_addr;
+				if (sa->sa_family != sa_touse->sa_family) {
+					continue;
+				}
+#ifdef INET
+				if (sa->sa_family == AF_INET) {
+					sa4 = (struct sockaddr_in *)sa;
+					if (sa4->sin_addr.s_addr ==
+					    sin4.sin_addr.s_addr) {
+						fnd = 1;
+						break;
+					}
+				}
+#endif
+#ifdef INET6
+				if (sa->sa_family == AF_INET6) {
+					sa6 = (struct sockaddr_in6 *)sa;
+					if (SCTP6_ARE_ADDR_EQUAL(
+					    sa6, &sin6)) {
+						fnd = 1;
+						break;
+					}
+				}
+#endif
+			}
+			if (!fnd) {
+				/* New addr added! no need to look further */
+				return (1);
+			}
+		}
+		offset += SCTP_SIZE32(plen);
+		phdr = sctp_get_next_param(in_initpkt, offset, &params, sizeof(params));
+	}
+	return (0);
+}
+
+/*
+ * Given a MBUF chain that was sent into us containing an INIT. Build a
+ * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
+ * a pullup to include IPv6/4header, SCTP header and initial part of INIT
+ * message (i.e. the struct sctp_init_msg).
+ */
+void
+sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+                       struct sctp_nets *src_net, struct mbuf *init_pkt,
+                       int iphlen, int offset,
+                       struct sockaddr *src, struct sockaddr *dst,
+                       struct sctphdr *sh, struct sctp_init_chunk *init_chk,
+#if defined(__FreeBSD__)
+		       uint8_t mflowtype, uint32_t mflowid,
+#endif
+                       uint32_t vrf_id, uint16_t port, int hold_inp_lock)
+{
+	struct sctp_association *asoc;
+	struct mbuf *m, *m_tmp, *m_last, *m_cookie, *op_err;
+	struct sctp_init_ack_chunk *initack;
+	struct sctp_adaptation_layer_indication *ali;
+	struct sctp_supported_chunk_types_param *pr_supported;
+	struct sctp_paramhdr *ph;
+	union sctp_sockstore *over_addr;
+	struct sctp_scoping scp;
+#ifdef INET
+	struct sockaddr_in *dst4 = (struct sockaddr_in *)dst;
+	struct sockaddr_in *src4 = (struct sockaddr_in *)src;
+	struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *dst6 = (struct sockaddr_in6 *)dst;
+	struct sockaddr_in6 *src6 = (struct sockaddr_in6 *)src;
+	struct sockaddr_in6 *sin6;
+#endif
+#if defined(__Userspace__)
+	struct sockaddr_conn *dstconn = (struct sockaddr_conn *)dst;
+	struct sockaddr_conn *srcconn = (struct sockaddr_conn *)src;
+	struct sockaddr_conn *sconn;
+#endif
+	struct sockaddr *to;
+	struct sctp_state_cookie stc;
+	struct sctp_nets *net = NULL;
+	uint8_t *signature = NULL;
+	int cnt_inits_to = 0;
+	uint16_t his_limit, i_want;
+	int abort_flag;
+	int nat_friendly = 0;
+	int error;
+	struct socket *so;
+	uint16_t num_ext, chunk_len, padding_len, parameter_len;
+
+	if (stcb) {
+		asoc = &stcb->asoc;
+	} else {
+		asoc = NULL;
+	}
+	if ((asoc != NULL) &&
+	    (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT)) {
+		if (sctp_are_there_new_addresses(asoc, init_pkt, offset, src)) {
+			/*
+			 * new addresses, out of here in non-cookie-wait states
+			 *
+			 * Send an ABORT, without the new address error cause.
+			 * This looks no different than if no listener
+			 * was present.
+			 */
+			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+			                             "Address added");
+			sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
+#if defined(__FreeBSD__)
+			                mflowtype, mflowid, inp->fibnum,
+#endif
+			                vrf_id, port);
+			return;
+		}
+		if (src_net != NULL && (src_net->port != port)) {
+			/*
+			 * change of remote encapsulation port, out of here in
+			 * non-cookie-wait states
+			 *
+			 * Send an ABORT, without an specific error cause.
+			 * This looks no different than if no listener
+			 * was present.
+			 */
+			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+			                             "Remote encapsulation port changed");
+			sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err,
+#if defined(__FreeBSD__)
+			                mflowtype, mflowid, inp->fibnum,
+#endif
+			                vrf_id, port);
+			return;
+		}
+	}
+	abort_flag = 0;
+	op_err = sctp_arethere_unrecognized_parameters(init_pkt,
+						       (offset + sizeof(struct sctp_init_chunk)),
+						       &abort_flag, (struct sctp_chunkhdr *)init_chk, &nat_friendly);
+	if (abort_flag) {
+	do_a_abort:
+		if (op_err == NULL) {
+			char msg[SCTP_DIAG_INFO_LEN];
+
+			snprintf(msg, sizeof(msg), "%s:%d at %s", __FILE__, __LINE__, __func__);
+			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+			                             msg);
+		}
+		sctp_send_abort(init_pkt, iphlen, src, dst, sh,
+				init_chk->init.initiate_tag, op_err,
+#if defined(__FreeBSD__)
+		                mflowtype, mflowid, inp->fibnum,
+#endif
+		                vrf_id, port);
+		return;
+	}
+	m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+	if (m == NULL) {
+		/* No memory, INIT timer will re-attempt. */
+		if (op_err)
+			sctp_m_freem(op_err);
+		return;
+	}
+	chunk_len = (uint16_t)sizeof(struct sctp_init_ack_chunk);
+	padding_len = 0;
+
+	/*
+	 * We might not overwrite the identification[] completely and on
+	 * some platforms time_entered will contain some padding.
+	 * Therefore zero out the cookie to avoid putting
+	 * uninitialized memory on the wire.
+	 */
+	memset(&stc, 0, sizeof(struct sctp_state_cookie));
+
+	/* the time I built cookie */
+	(void)SCTP_GETTIME_TIMEVAL(&stc.time_entered);
+
+	/* populate any tie tags */
+	if (asoc != NULL) {
+		/* unlock before tag selections */
+		stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
+		stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
+		stc.cookie_life = asoc->cookie_life;
+		net = asoc->primary_destination;
+	} else {
+		stc.tie_tag_my_vtag = 0;
+		stc.tie_tag_peer_vtag = 0;
+		/* life I will award this cookie */
+		stc.cookie_life = inp->sctp_ep.def_cookie_life;
+	}
+
+	/* copy in the ports for later check */
+	stc.myport = sh->dest_port;
+	stc.peerport = sh->src_port;
+
+	/*
+	 * If we wanted to honor cookie life extensions, we would add to
+	 * stc.cookie_life. For now we should NOT honor any extension
+	 */
+	stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+		stc.ipv6_addr_legal = 1;
+		if (SCTP_IPV6_V6ONLY(inp)) {
+			stc.ipv4_addr_legal = 0;
+		} else {
+			stc.ipv4_addr_legal = 1;
+		}
+#if defined(__Userspace__)
+		stc.conn_addr_legal = 0;
+#endif
+	} else {
+		stc.ipv6_addr_legal = 0;
+#if defined(__Userspace__)
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
+			stc.conn_addr_legal = 1;
+			stc.ipv4_addr_legal = 0;
+		} else {
+			stc.conn_addr_legal = 0;
+			stc.ipv4_addr_legal = 1;
+		}
+#else
+		stc.ipv4_addr_legal = 1;
+#endif
+	}
+	stc.ipv4_scope = 0;
+	if (net == NULL) {
+		to = src;
+		switch (dst->sa_family) {
+#ifdef INET
+		case AF_INET:
+		{
+			/* lookup address */
+			stc.address[0] = src4->sin_addr.s_addr;
+			stc.address[1] = 0;
+			stc.address[2] = 0;
+			stc.address[3] = 0;
+			stc.addr_type = SCTP_IPV4_ADDRESS;
+			/* local from address */
+			stc.laddress[0] = dst4->sin_addr.s_addr;
+			stc.laddress[1] = 0;
+			stc.laddress[2] = 0;
+			stc.laddress[3] = 0;
+			stc.laddr_type = SCTP_IPV4_ADDRESS;
+			/* scope_id is only for v6 */
+			stc.scope_id = 0;
+			if ((IN4_ISPRIVATE_ADDRESS(&src4->sin_addr)) ||
+			    (IN4_ISPRIVATE_ADDRESS(&dst4->sin_addr))){
+				stc.ipv4_scope = 1;
+			}
+			/* Must use the address in this case */
+			if (sctp_is_address_on_local_host(src, vrf_id)) {
+				stc.loopback_scope = 1;
+				stc.ipv4_scope = 1;
+				stc.site_scope = 1;
+				stc.local_scope = 0;
+			}
+			break;
+		}
+#endif
+#ifdef INET6
+		case AF_INET6:
+		{
+			stc.addr_type = SCTP_IPV6_ADDRESS;
+			memcpy(&stc.address, &src6->sin6_addr, sizeof(struct in6_addr));
+#if defined(__FreeBSD__) && (((__FreeBSD_version < 900000) && (__FreeBSD_version >= 804000)) || (__FreeBSD_version > 900000))
+			stc.scope_id = ntohs(in6_getscope(&src6->sin6_addr));
+#else
+			stc.scope_id = 0;
+#endif
+			if (sctp_is_address_on_local_host(src, vrf_id)) {
+				stc.loopback_scope = 1;
+				stc.local_scope = 0;
+				stc.site_scope = 1;
+				stc.ipv4_scope = 1;
+			} else if (IN6_IS_ADDR_LINKLOCAL(&src6->sin6_addr) ||
+			           IN6_IS_ADDR_LINKLOCAL(&dst6->sin6_addr)) {
+				/*
+				 * If the new destination or source is a
+				 * LINK_LOCAL we must have common both site and
+				 * local scope. Don't set local scope though
+				 * since we must depend on the source to be
+				 * added implicitly. We cannot assure just
+				 * because we share one link that all links are
+				 * common.
+				 */
+#if defined(__APPLE__)
+				/* Mac OS X currently doesn't have in6_getscope() */
+				stc.scope_id = src6->sin6_addr.s6_addr16[1];
+#endif
+				stc.local_scope = 0;
+				stc.site_scope = 1;
+				stc.ipv4_scope = 1;
+				/*
+				 * we start counting for the private address
+				 * stuff at 1. since the link local we
+				 * source from won't show up in our scoped
+				 * count.
+				 */
+				cnt_inits_to = 1;
+				/* pull out the scope_id from incoming pkt */
+			} else if (IN6_IS_ADDR_SITELOCAL(&src6->sin6_addr) ||
+			           IN6_IS_ADDR_SITELOCAL(&dst6->sin6_addr)) {
+				/*
+				 * If the new destination or source is
+				 * SITE_LOCAL then we must have site scope in
+				 * common.
+				 */
+				stc.site_scope = 1;
+			}
+			memcpy(&stc.laddress, &dst6->sin6_addr, sizeof(struct in6_addr));
+			stc.laddr_type = SCTP_IPV6_ADDRESS;
+			break;
+		}
+#endif
+#if defined(__Userspace__)
+		case AF_CONN:
+		{
+			/* lookup address */
+			stc.address[0] = 0;
+			stc.address[1] = 0;
+			stc.address[2] = 0;
+			stc.address[3] = 0;
+			memcpy(&stc.address, &srcconn->sconn_addr, sizeof(void *));
+			stc.addr_type = SCTP_CONN_ADDRESS;
+			/* local from address */
+			stc.laddress[0] = 0;
+			stc.laddress[1] = 0;
+			stc.laddress[2] = 0;
+			stc.laddress[3] = 0;
+			memcpy(&stc.laddress, &dstconn->sconn_addr, sizeof(void *));
+			stc.laddr_type = SCTP_CONN_ADDRESS;
+			/* scope_id is only for v6 */
+			stc.scope_id = 0;
+			break;
+		}
+#endif
+		default:
+			/* TSNH */
+			goto do_a_abort;
+			break;
+		}
+	} else {
+		/* set the scope per the existing tcb */
+
+#ifdef INET6
+		struct sctp_nets *lnet;
+#endif
+
+		stc.loopback_scope = asoc->scope.loopback_scope;
+		stc.ipv4_scope = asoc->scope.ipv4_local_scope;
+		stc.site_scope = asoc->scope.site_scope;
+		stc.local_scope = asoc->scope.local_scope;
+#ifdef INET6
+		/* Why do we not consider IPv4 LL addresses? */
+		TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
+			if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
+				if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
+					/*
+					 * if we have a LL address, start
+					 * counting at 1.
+					 */
+					cnt_inits_to = 1;
+				}
+			}
+		}
+#endif
+		/* use the net pointer */
+		to = (struct sockaddr *)&net->ro._l_addr;
+		switch (to->sa_family) {
+#ifdef INET
+		case AF_INET:
+			sin = (struct sockaddr_in *)to;
+			stc.address[0] = sin->sin_addr.s_addr;
+			stc.address[1] = 0;
+			stc.address[2] = 0;
+			stc.address[3] = 0;
+			stc.addr_type = SCTP_IPV4_ADDRESS;
+			if (net->src_addr_selected == 0) {
+				/*
+				 * strange case here, the INIT should have
+				 * did the selection.
+				 */
+				net->ro._s_addr = sctp_source_address_selection(inp,
+										stcb, (sctp_route_t *)&net->ro,
+										net, 0, vrf_id);
+				if (net->ro._s_addr == NULL)
+					return;
+
+				net->src_addr_selected = 1;
+
+			}
+			stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
+			stc.laddress[1] = 0;
+			stc.laddress[2] = 0;
+			stc.laddress[3] = 0;
+			stc.laddr_type = SCTP_IPV4_ADDRESS;
+			/* scope_id is only for v6 */
+			stc.scope_id = 0;
+			break;
+#endif
+#ifdef INET6
+		case AF_INET6:
+			sin6 = (struct sockaddr_in6 *)to;
+			memcpy(&stc.address, &sin6->sin6_addr,
+			       sizeof(struct in6_addr));
+			stc.addr_type = SCTP_IPV6_ADDRESS;
+			stc.scope_id = sin6->sin6_scope_id;
+			if (net->src_addr_selected == 0) {
+				/*
+				 * strange case here, the INIT should have
+				 * done the selection.
+				 */
+				net->ro._s_addr = sctp_source_address_selection(inp,
+										stcb, (sctp_route_t *)&net->ro,
+										net, 0, vrf_id);
+				if (net->ro._s_addr == NULL)
+					return;
+
+				net->src_addr_selected = 1;
+			}
+			memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
+			       sizeof(struct in6_addr));
+			stc.laddr_type = SCTP_IPV6_ADDRESS;
+			break;
+#endif
+#if defined(__Userspace__)
+		case AF_CONN:
+			sconn = (struct sockaddr_conn *)to;
+			stc.address[0] = 0;
+			stc.address[1] = 0;
+			stc.address[2] = 0;
+			stc.address[3] = 0;
+			memcpy(&stc.address, &sconn->sconn_addr, sizeof(void *));
+			stc.addr_type = SCTP_CONN_ADDRESS;
+			stc.laddress[0] = 0;
+			stc.laddress[1] = 0;
+			stc.laddress[2] = 0;
+			stc.laddress[3] = 0;
+			memcpy(&stc.laddress, &sconn->sconn_addr, sizeof(void *));
+			stc.laddr_type = SCTP_CONN_ADDRESS;
+			stc.scope_id = 0;
+			break;
+#endif
+		}
+	}
+	/* Now lets put the SCTP header in place */
+	initack = mtod(m, struct sctp_init_ack_chunk *);
+	/* Save it off for quick ref */
+	stc.peers_vtag = ntohl(init_chk->init.initiate_tag);
+	/* who are we */
+	memcpy(stc.identification, SCTP_VERSION_STRING,
+	       min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
+	memset(stc.reserved, 0, SCTP_RESERVE_SPACE);
+	/* now the chunk header */
+	initack->ch.chunk_type = SCTP_INITIATION_ACK;
+	initack->ch.chunk_flags = 0;
+	/* fill in later from mbuf we build */
+	initack->ch.chunk_length = 0;
+	/* place in my tag */
+	if ((asoc != NULL) &&
+	    ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
+	     (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
+	     (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
+		/* re-use the v-tags and init-seq here */
+		initack->init.initiate_tag = htonl(asoc->my_vtag);
+		initack->init.initial_tsn = htonl(asoc->init_seq_number);
+	} else {
+		uint32_t vtag, itsn;
+		if (hold_inp_lock) {
+			SCTP_INP_INCR_REF(inp);
+			SCTP_INP_RUNLOCK(inp);
+		}
+		if (asoc) {
+			atomic_add_int(&asoc->refcnt, 1);
+			SCTP_TCB_UNLOCK(stcb);
+		new_tag:
+			vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
+			if ((asoc->peer_supports_nat)  && (vtag == asoc->my_vtag)) {
+				/* Got a duplicate vtag on some guy behind a nat
+				 * make sure we don't use it.
+				 */
+				goto new_tag;
+			}
+			initack->init.initiate_tag = htonl(vtag);
+			/* get a TSN to use too */
+			itsn = sctp_select_initial_TSN(&inp->sctp_ep);
+			initack->init.initial_tsn = htonl(itsn);
+			SCTP_TCB_LOCK(stcb);
+			atomic_add_int(&asoc->refcnt, -1);
+		} else {
+			vtag = sctp_select_a_tag(inp, inp->sctp_lport, sh->src_port, 1);
+			initack->init.initiate_tag = htonl(vtag);
+			/* get a TSN to use too */
+			initack->init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
+		}
+		if (hold_inp_lock) {
+			SCTP_INP_RLOCK(inp);
+			SCTP_INP_DECR_REF(inp);
+		}
+	}
+	/* save away my tag to */
+	stc.my_vtag = initack->init.initiate_tag;
+
+	/* set up some of the credits. */
+	so = inp->sctp_socket;
+	if (so == NULL) {
+		/* memory problem */
+		sctp_m_freem(m);
+		return;
+	} else {
+		initack->init.a_rwnd = htonl(max(SCTP_SB_LIMIT_RCV(so), SCTP_MINIMAL_RWND));
+	}
+	/* set what I want */
+	his_limit = ntohs(init_chk->init.num_inbound_streams);
+	/* choose what I want */
+	if (asoc != NULL) {
+		if (asoc->streamoutcnt > asoc->pre_open_streams) {
+			i_want = asoc->streamoutcnt;
+		} else {
+			i_want = asoc->pre_open_streams;
+		}
+	} else {
+		i_want = inp->sctp_ep.pre_open_stream_count;
+	}
+	if (his_limit < i_want) {
+		/* I Want more :< */
+		initack->init.num_outbound_streams = init_chk->init.num_inbound_streams;
+	} else {
+		/* I can have what I want :> */
+		initack->init.num_outbound_streams = htons(i_want);
+	}
+	/* tell him his limit. */
+	initack->init.num_inbound_streams =
+		htons(inp->sctp_ep.max_open_streams_intome);
+
+	/* adaptation layer indication parameter */
+	if (inp->sctp_ep.adaptation_layer_indicator_provided) {
+		parameter_len = (uint16_t)sizeof(struct sctp_adaptation_layer_indication);
+		ali = (struct sctp_adaptation_layer_indication *)(mtod(m, caddr_t) + chunk_len);
+		ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
+		ali->ph.param_length = htons(parameter_len);
+		ali->indication = htonl(inp->sctp_ep.adaptation_layer_indicator);
+		chunk_len += parameter_len;
+	}
+
+	/* ECN parameter */
+	if (((asoc != NULL) && (asoc->ecn_supported == 1)) ||
+	    ((asoc == NULL) && (inp->ecn_supported == 1))) {
+		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
+		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
+		ph->param_type = htons(SCTP_ECN_CAPABLE);
+		ph->param_length = htons(parameter_len);
+		chunk_len += parameter_len;
+	}
+
+	/* PR-SCTP supported parameter */
+	if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
+	    ((asoc == NULL) && (inp->prsctp_supported == 1))) {
+		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
+		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
+		ph->param_type = htons(SCTP_PRSCTP_SUPPORTED);
+		ph->param_length = htons(parameter_len);
+		chunk_len += parameter_len;
+	}
+
+	/* Add NAT friendly parameter */
+	if (nat_friendly) {
+		parameter_len = (uint16_t)sizeof(struct sctp_paramhdr);
+		ph = (struct sctp_paramhdr *)(mtod(m, caddr_t) + chunk_len);
+		ph->param_type = htons(SCTP_HAS_NAT_SUPPORT);
+		ph->param_length = htons(parameter_len);
+		chunk_len += parameter_len;
+	}
+
+	/* And now tell the peer which extensions we support */
+	num_ext = 0;
+	pr_supported = (struct sctp_supported_chunk_types_param *)(mtod(m, caddr_t) + chunk_len);
+	if (((asoc != NULL) && (asoc->prsctp_supported == 1)) ||
+	    ((asoc == NULL) && (inp->prsctp_supported == 1))) {
+		pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
+		if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
+		    ((asoc == NULL) && (inp->idata_supported == 1))) {
+			pr_supported->chunk_types[num_ext++] = SCTP_IFORWARD_CUM_TSN;
+		}
+	}
+	if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
+	    ((asoc == NULL) && (inp->auth_supported == 1))) {
+		pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
+	}
+	if (((asoc != NULL) && (asoc->asconf_supported == 1)) ||
+	    ((asoc == NULL) && (inp->asconf_supported == 1))) {
+		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
+		pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
+	}
+	if (((asoc != NULL) && (asoc->reconfig_supported == 1)) ||
+	    ((asoc == NULL) && (inp->reconfig_supported == 1))) {
+		pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
+	}
+	if (((asoc != NULL) && (asoc->idata_supported == 1)) ||
+	    ((asoc == NULL) && (inp->idata_supported == 1))) {
+		pr_supported->chunk_types[num_ext++] = SCTP_IDATA;
+	}
+	if (((asoc != NULL) && (asoc->nrsack_supported == 1)) ||
+	    ((asoc == NULL) && (inp->nrsack_supported == 1))) {
+		pr_supported->chunk_types[num_ext++] = SCTP_NR_SELECTIVE_ACK;
+	}
+	if (((asoc != NULL) && (asoc->pktdrop_supported == 1)) ||
+	    ((asoc == NULL) && (inp->pktdrop_supported == 1))) {
+		pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
+	}
+	if (num_ext > 0) {
+		parameter_len = (uint16_t)sizeof(struct sctp_supported_chunk_types_param) + num_ext;
+		pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
+		pr_supported->ph.param_length = htons(parameter_len);
+		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+		chunk_len += parameter_len;
+	}
+
+	/* add authentication parameters */
+	if (((asoc != NULL) && (asoc->auth_supported == 1)) ||
+	    ((asoc == NULL) && (inp->auth_supported == 1))) {
+		struct sctp_auth_random *randp;
+		struct sctp_auth_hmac_algo *hmacs;
+		struct sctp_auth_chunk_list *chunks;
+
+		if (padding_len > 0) {
+			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+			chunk_len += padding_len;
+			padding_len = 0;
+		}
+		/* generate and add RANDOM parameter */
+		randp = (struct sctp_auth_random *)(mtod(m, caddr_t) + chunk_len);
+		parameter_len = (uint16_t)sizeof(struct sctp_auth_random) +
+		                SCTP_AUTH_RANDOM_SIZE_DEFAULT;
+		randp->ph.param_type = htons(SCTP_RANDOM);
+		randp->ph.param_length = htons(parameter_len);
+		SCTP_READ_RANDOM(randp->random_data, SCTP_AUTH_RANDOM_SIZE_DEFAULT);
+		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+		chunk_len += parameter_len;
+
+		if (padding_len > 0) {
+			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+			chunk_len += padding_len;
+			padding_len = 0;
+		}
+		/* add HMAC_ALGO parameter */
+		hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t) + chunk_len);
+		parameter_len = (uint16_t)sizeof(struct sctp_auth_hmac_algo) +
+		                sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
+		                                        (uint8_t *)hmacs->hmac_ids);
+		hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
+		hmacs->ph.param_length = htons(parameter_len);
+		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+		chunk_len += parameter_len;
+
+		if (padding_len > 0) {
+			memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+			chunk_len += padding_len;
+			padding_len = 0;
+		}
+		/* add CHUNKS parameter */
+		chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t) + chunk_len);
+		parameter_len = (uint16_t)sizeof(struct sctp_auth_chunk_list) +
+		                sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
+		                                           chunks->chunk_types);
+		chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
+		chunks->ph.param_length = htons(parameter_len);
+		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+		chunk_len += parameter_len;
+	}
+	SCTP_BUF_LEN(m) = chunk_len;
+	m_last = m;
+	/* now the addresses */
+	/* To optimize this we could put the scoping stuff
+	 * into a structure and remove the individual uint8's from
+	 * the stc structure. Then we could just sifa in the
+	 * address within the stc.. but for now this is a quick
+	 * hack to get the address stuff teased apart.
+	 */
+	scp.ipv4_addr_legal = stc.ipv4_addr_legal;
+	scp.ipv6_addr_legal = stc.ipv6_addr_legal;
+#if defined(__Userspace__)
+	scp.conn_addr_legal = stc.conn_addr_legal;
+#endif
+	scp.loopback_scope = stc.loopback_scope;
+	scp.ipv4_local_scope = stc.ipv4_scope;
+	scp.local_scope = stc.local_scope;
+	scp.site_scope = stc.site_scope;
+	m_last = sctp_add_addresses_to_i_ia(inp, stcb, &scp, m_last,
+	                                    cnt_inits_to,
+	                                    &padding_len, &chunk_len);
+	/* padding_len can only be positive, if no addresses have been added */
+	if (padding_len > 0) {
+		memset(mtod(m, caddr_t) + chunk_len, 0, padding_len);
+		chunk_len += padding_len;
+		SCTP_BUF_LEN(m) += padding_len;
+		padding_len = 0;
+	}
+
+	/* tack on the operational error if present */
+	if (op_err) {
+		parameter_len = 0;
+		for (m_tmp = op_err; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
+			parameter_len += SCTP_BUF_LEN(m_tmp);
+		}
+		padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+		SCTP_BUF_NEXT(m_last) = op_err;
+		while (SCTP_BUF_NEXT(m_last) != NULL) {
+			m_last = SCTP_BUF_NEXT(m_last);
+		}
+		chunk_len += parameter_len;
+	}
+	if (padding_len > 0) {
+		m_last = sctp_add_pad_tombuf(m_last, padding_len);
+		if (m_last == NULL) {
+			/* Houston we have a problem, no space */
+			sctp_m_freem(m);
+			return;
+		}
+		chunk_len += padding_len;
+		padding_len = 0;
+	}
+	/* Now we must build a cookie */
+	m_cookie = sctp_add_cookie(init_pkt, offset, m, 0, &stc, &signature);
+	if (m_cookie == NULL) {
+		/* memory problem */
+		sctp_m_freem(m);
+		return;
+	}
+	/* Now append the cookie to the end and update the space/size */
+	SCTP_BUF_NEXT(m_last) = m_cookie;
+	parameter_len = 0;
+	for (m_tmp = m_cookie; m_tmp != NULL; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
+		parameter_len += SCTP_BUF_LEN(m_tmp);
+		if (SCTP_BUF_NEXT(m_tmp) == NULL) {
+			m_last = m_tmp;
+		}
+	}
+	padding_len = SCTP_SIZE32(parameter_len) - parameter_len;
+	chunk_len += parameter_len;
+
+	/* Place in the size, but we don't include
+	 * the last pad (if any) in the INIT-ACK.
+	 */
+	initack->ch.chunk_length = htons(chunk_len);
+
+	/* Time to sign the cookie, we don't sign over the cookie
+	 * signature though thus we set trailer.
+	 */
+	(void)sctp_hmac_m(SCTP_HMAC,
+			  (uint8_t *)inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
+			  SCTP_SECRET_SIZE, m_cookie, sizeof(struct sctp_paramhdr),
+			  (uint8_t *)signature, SCTP_SIGNATURE_SIZE);
+	/*
+	 * We sifa 0 here to NOT set IP_DF if its IPv4, we ignore the return
+	 * here since the timer will drive a retranmission.
+	 */
+	if (padding_len > 0) {
+		if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
+			sctp_m_freem(m);
+			return;
+		}
+	}
+	if (stc.loopback_scope) {
+		over_addr = (union sctp_sockstore *)dst;
+	} else {
+		over_addr = NULL;
+	}
+
+	if ((error = sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
+	                                        0, 0,
+	                                        inp->sctp_lport, sh->src_port, init_chk->init.initiate_tag,
+	                                        port, over_addr,
+#if defined(__FreeBSD__)
+	                                        mflowtype, mflowid,
+#endif
+	                                        SCTP_SO_NOT_LOCKED))) {
+		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak send error %d\n", error);
+		if (error == ENOBUFS) {
+			if (asoc != NULL) {
+				asoc->ifp_had_enobuf = 1;
+			}
+			SCTP_STAT_INCR(sctps_lowlevelerr);
+		}
+	} else {
+		if (asoc != NULL) {
+			asoc->ifp_had_enobuf = 0;
+		}
+	}
+	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+}
+
+
+static void
+sctp_prune_prsctp(struct sctp_tcb *stcb,
+    struct sctp_association *asoc,
+    struct sctp_sndrcvinfo *srcv,
+    int dataout)
+{
+	int freed_spc = 0;
+	struct sctp_tmit_chunk *chk, *nchk;
+
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	if ((asoc->prsctp_supported) &&
+	    (asoc->sent_queue_cnt_removeable > 0)) {
+		TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+			/*
+			 * Look for chunks marked with the PR_SCTP flag AND
+			 * the buffer space flag. If the one being sent is
+			 * equal or greater priority then purge the old one
+			 * and free some space.
+			 */
+			if (PR_SCTP_BUF_ENABLED(chk->flags)) {
+				/*
+				 * This one is PR-SCTP AND buffer space
+				 * limited type
+				 */
+				if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
+					/*
+					 * Lower numbers equates to higher
+					 * priority so if the one we are
+					 * looking at has a larger or equal
+					 * priority we want to drop the data
+					 * and NOT retransmit it.
+					 */
+					if (chk->data) {
+						/*
+						 * We release the book_size
+						 * if the mbuf is here
+						 */
+						int ret_spc;
+						uint8_t sent;
+
+						if (chk->sent > SCTP_DATAGRAM_UNSENT)
+							sent = 1;
+						else
+							sent = 0;
+						ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
+						    sent,
+						    SCTP_SO_LOCKED);
+						freed_spc += ret_spc;
+						if (freed_spc >= dataout) {
+							return;
+						}
+					}	/* if chunk was present */
+				}	/* if of sufficient priority */
+			}	/* if chunk has enabled */
+		}		/* tailqforeach */
+
+		TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
+			/* Here we must move to the sent queue and mark */
+			if (PR_SCTP_BUF_ENABLED(chk->flags)) {
+				if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
+					if (chk->data) {
+						/*
+						 * We release the book_size
+						 * if the mbuf is here
+						 */
+						int ret_spc;
+
+						ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
+						    0, SCTP_SO_LOCKED);
+
+						freed_spc += ret_spc;
+						if (freed_spc >= dataout) {
+							return;
+						}
+					}	/* end if chk->data */
+				}	/* end if right class */
+			}	/* end if chk pr-sctp */
+		}		/* tailqforeachsafe (chk) */
+	}			/* if enabled in asoc */
+}
+
+int
+sctp_get_frag_point(struct sctp_tcb *stcb,
+    struct sctp_association *asoc)
+{
+	int siz, ovh;
+
+	/*
+	 * For endpoints that have both v6 and v4 addresses we must reserve
+	 * room for the ipv6 header, for those that are only dealing with V4
+	 * we use a larger frag point.
+	 */
+	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+		ovh = SCTP_MIN_OVERHEAD;
+	} else {
+		ovh = SCTP_MIN_V4_OVERHEAD;
+	}
+	if (stcb->asoc.idata_supported) {
+		ovh += sizeof(struct sctp_idata_chunk);
+	} else {
+		ovh += sizeof(struct sctp_data_chunk);
+	}
+	if (stcb->asoc.sctp_frag_point > asoc->smallest_mtu)
+		siz = asoc->smallest_mtu - ovh;
+	else
+		siz = (stcb->asoc.sctp_frag_point - ovh);
+	/*
+	 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
+	 */
+	/* A data chunk MUST fit in a cluster */
+	/* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
+	/* } */
+
+	/* adjust for an AUTH chunk if DATA requires auth */
+	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
+		siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+
+	if (siz % 4) {
+		/* make it an even word boundary please */
+		siz -= (siz % 4);
+	}
+	return (siz);
+}
+
+static void
+sctp_set_prsctp_policy(struct sctp_stream_queue_pending *sp)
+{
+	/*
+	 * We assume that the user wants PR_SCTP_TTL if the user
+	 * provides a positive lifetime but does not specify any
+	 * PR_SCTP policy.
+	 */
+	if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
+		sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
+	} else if (sp->timetolive > 0) {
+		sp->sinfo_flags |= SCTP_PR_SCTP_TTL;
+		sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
+	} else {
+		return;
+	}
+	switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
+	case CHUNK_FLAGS_PR_SCTP_BUF:
+		/*
+		 * Time to live is a priority stored in tv_sec when
+		 * doing the buffer drop thing.
+		 */
+		sp->ts.tv_sec = sp->timetolive;
+		sp->ts.tv_usec = 0;
+		break;
+	case CHUNK_FLAGS_PR_SCTP_TTL:
+	{
+		struct timeval tv;
+		(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
+		tv.tv_sec = sp->timetolive / 1000;
+		tv.tv_usec = (sp->timetolive * 1000) % 1000000;
+		/* TODO sctp_constants.h needs alternative time macros when
+		 *  _KERNEL is undefined.
+		 */
+#ifndef __FreeBSD__
+		timeradd(&sp->ts, &tv, &sp->ts);
+#else
+		timevaladd(&sp->ts, &tv);
+#endif
+	}
+		break;
+	case CHUNK_FLAGS_PR_SCTP_RTX:
+		/*
+		 * Time to live is a the number or retransmissions
+		 * stored in tv_sec.
+		 */
+		sp->ts.tv_sec = sp->timetolive;
+		sp->ts.tv_usec = 0;
+		break;
+	default:
+		SCTPDBG(SCTP_DEBUG_USRREQ1,
+			"Unknown PR_SCTP policy %u.\n",
+			PR_SCTP_POLICY(sp->sinfo_flags));
+		break;
+	}
+}
+
+static int
+sctp_msg_append(struct sctp_tcb *stcb,
+		struct sctp_nets *net,
+		struct mbuf *m,
+		struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
+{
+	int error = 0;
+	struct mbuf *at;
+	struct sctp_stream_queue_pending *sp = NULL;
+	struct sctp_stream_out *strm;
+
+	/* Given an mbuf chain, put it
+	 * into the association send queue and
+	 * place it on the wheel
+	 */
+	if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
+		/* Invalid stream number */
+		SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+		error = EINVAL;
+		goto out_now;
+	}
+	if ((stcb->asoc.stream_locked) &&
+	    (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
+		SCTP_LTRACE_ERR_RET_PKT(m, NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+		error = EINVAL;
+		goto out_now;
+	}
+	strm = &stcb->asoc.strmout[srcv->sinfo_stream];
+	/* Now can we send this? */
+	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
+	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
+	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
+	    (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
+		/* got data while shutting down */
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
+		error = ECONNRESET;
+		goto out_now;
+	}
+	sctp_alloc_a_strmoq(stcb, sp);
+	if (sp == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+		error = ENOMEM;
+		goto out_now;
+	}
+	sp->sinfo_flags = srcv->sinfo_flags;
+	sp->timetolive = srcv->sinfo_timetolive;
+	sp->ppid = srcv->sinfo_ppid;
+	sp->context = srcv->sinfo_context;
+	sp->fsn = 0;
+	if (sp->sinfo_flags & SCTP_ADDR_OVER) {
+		sp->net = net;
+		atomic_add_int(&sp->net->ref_count, 1);
+	} else {
+		sp->net = NULL;
+	}
+	(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
+	sp->sid = srcv->sinfo_stream;
+	sp->msg_is_complete = 1;
+	sp->sender_all_done = 1;
+	sp->some_taken = 0;
+	sp->data = m;
+	sp->tail_mbuf = NULL;
+	sctp_set_prsctp_policy(sp);
+	/* We could in theory (for sendall) sifa the length
+	 * in, but we would still have to hunt through the
+	 * chain since we need to setup the tail_mbuf
+	 */
+	sp->length = 0;
+	for (at = m; at; at = SCTP_BUF_NEXT(at)) {
+		if (SCTP_BUF_NEXT(at) == NULL)
+			sp->tail_mbuf = at;
+		sp->length += SCTP_BUF_LEN(at);
+	}
+	if (srcv->sinfo_keynumber_valid) {
+		sp->auth_keyid = srcv->sinfo_keynumber;
+	} else {
+		sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
+	}
+	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
+		sctp_auth_key_acquire(stcb, sp->auth_keyid);
+		sp->holds_key_ref = 1;
+	}
+	if (hold_stcb_lock == 0) {
+		SCTP_TCB_SEND_LOCK(stcb);
+	}
+	sctp_snd_sb_alloc(stcb, sp->length);
+	atomic_add_int(&stcb->asoc.stream_queue_cnt, 1);
+	TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
+	stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc, strm, sp, 1);
+	m = NULL;
+	if (hold_stcb_lock == 0) {
+		SCTP_TCB_SEND_UNLOCK(stcb);
+	}
+out_now:
+	if (m) {
+		sctp_m_freem(m);
+	}
+	return (error);
+}
+
+
+static struct mbuf *
+sctp_copy_mbufchain(struct mbuf *clonechain,
+		    struct mbuf *outchain,
+		    struct mbuf **endofchain,
+		    int can_take_mbuf,
+		    int sizeofcpy,
+		    uint8_t copy_by_ref)
+{
+	struct mbuf *m;
+	struct mbuf *appendchain;
+	caddr_t cp;
+	int len;
+
+	if (endofchain == NULL) {
+		/* error */
+	error_out:
+		if (outchain)
+			sctp_m_freem(outchain);
+		return (NULL);
+	}
+	if (can_take_mbuf) {
+		appendchain = clonechain;
+	} else {
+		if (!copy_by_ref &&
+#if defined(__Panda__)
+		    0
+#else
+		    (sizeofcpy <= (int)((((SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) - 1) * MLEN) + MHLEN)))
+#endif
+		    ) {
+			/* Its not in a cluster */
+			if (*endofchain == NULL) {
+				/* lets get a mbuf cluster */
+				if (outchain == NULL) {
+					/* This is the general case */
+				new_mbuf:
+					outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
+					if (outchain == NULL) {
+						goto error_out;
+					}
+					SCTP_BUF_LEN(outchain) = 0;
+					*endofchain = outchain;
+					/* get the prepend space */
+					SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV+4));
+				} else {
+					/* We really should not get a NULL in endofchain */
+					/* find end */
+					m = outchain;
+					while (m) {
+						if (SCTP_BUF_NEXT(m) == NULL) {
+							*endofchain = m;
+							break;
+						}
+						m = SCTP_BUF_NEXT(m);
+					}
+					/* sanity */
+					if (*endofchain == NULL) {
+						/* huh, TSNH XXX maybe we should panic */
+						sctp_m_freem(outchain);
+						goto new_mbuf;
+					}
+				}
+				/* get the new end of length */
+				len = (int)M_TRAILINGSPACE(*endofchain);
+			} else {
+				/* how much is left at the end? */
+				len = (int)M_TRAILINGSPACE(*endofchain);
+			}
+			/* Find the end of the data, for appending */
+			cp = (mtod((*endofchain), caddr_t) + SCTP_BUF_LEN((*endofchain)));
+
+			/* Now lets copy it out */
+			if (len >= sizeofcpy) {
+				/* It all fits, copy it in */
+				m_copydata(clonechain, 0, sizeofcpy, cp);
+				SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
+			} else {
+				/* fill up the end of the chain */
+				if (len > 0) {
+					m_copydata(clonechain, 0, len, cp);
+					SCTP_BUF_LEN((*endofchain)) += len;
+					/* now we need another one */
+					sizeofcpy -= len;
+				}
+				m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_HEADER);
+				if (m == NULL) {
+					/* We failed */
+					goto error_out;
+				}
+				SCTP_BUF_NEXT((*endofchain)) = m;
+				*endofchain = m;
+				cp = mtod((*endofchain), caddr_t);
+				m_copydata(clonechain, len, sizeofcpy, cp);
+				SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
+			}
+			return (outchain);
+		} else {
+			/* copy the old fashion way */
+			appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_NOWAIT);
+#ifdef SCTP_MBUF_LOGGING
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+				sctp_log_mbc(appendchain, SCTP_MBUF_ICOPY);
+			}
+#endif
+		}
+	}
+	if (appendchain == NULL) {
+		/* error */
+		if (outchain)
+			sctp_m_freem(outchain);
+		return (NULL);
+	}
+	if (outchain) {
+		/* tack on to the end */
+		if (*endofchain != NULL) {
+			SCTP_BUF_NEXT(((*endofchain))) = appendchain;
+		} else {
+			m = outchain;
+			while (m) {
+				if (SCTP_BUF_NEXT(m) == NULL) {
+					SCTP_BUF_NEXT(m) = appendchain;
+					break;
+				}
+				m = SCTP_BUF_NEXT(m);
+			}
+		}
+		/*
+		 * save off the end and update the end-chain
+		 * position
+		 */
+		m = appendchain;
+		while (m) {
+			if (SCTP_BUF_NEXT(m) == NULL) {
+				*endofchain = m;
+				break;
+			}
+			m = SCTP_BUF_NEXT(m);
+		}
+		return (outchain);
+	} else {
+		/* save off the end and update the end-chain position */
+		m = appendchain;
+		while (m) {
+			if (SCTP_BUF_NEXT(m) == NULL) {
+				*endofchain = m;
+				break;
+			}
+			m = SCTP_BUF_NEXT(m);
+		}
+		return (appendchain);
+	}
+}
+
+static int
+sctp_med_chunk_output(struct sctp_inpcb *inp,
+		      struct sctp_tcb *stcb,
+		      struct sctp_association *asoc,
+		      int *num_out,
+		      int *reason_code,
+		      int control_only, int from_where,
+		      struct timeval *now, int *now_filled, int frag_point, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+		      SCTP_UNUSED
+#endif
+                      );
+
+static void
+sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
+    uint32_t val SCTP_UNUSED)
+{
+	struct sctp_copy_all *ca;
+	struct mbuf *m;
+	int ret = 0;
+	int added_control = 0;
+	int un_sent, do_chunk_output = 1;
+	struct sctp_association *asoc;
+	struct sctp_nets *net;
+
+	ca = (struct sctp_copy_all *)ptr;
+	if (ca->m == NULL) {
+		return;
+	}
+	if (ca->inp != inp) {
+		/* TSNH */
+		return;
+	}
+	if (ca->sndlen > 0) {
+		m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_NOWAIT);
+		if (m == NULL) {
+			/* can't copy so we are done */
+			ca->cnt_failed++;
+			return;
+		}
+#ifdef SCTP_MBUF_LOGGING
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+			sctp_log_mbc(m, SCTP_MBUF_ICOPY);
+		}
+#endif
+	} else {
+		m = NULL;
+	}
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	if (stcb->asoc.alternate) {
+		net = stcb->asoc.alternate;
+	} else {
+		net = stcb->asoc.primary_destination;
+	}
+	if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
+		/* Abort this assoc with m as the user defined reason */
+		if (m != NULL) {
+			SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_NOWAIT);
+		} else {
+			m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
+			                          0, M_NOWAIT, 1, MT_DATA);
+			SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
+		}
+		if (m != NULL) {
+			struct sctp_paramhdr *ph;
+
+			ph = mtod(m, struct sctp_paramhdr *);
+			ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+			ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + ca->sndlen));
+		}
+		/* We add one here to keep the assoc from
+		 * dis-appearing on us.
+		 */
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		sctp_abort_an_association(inp, stcb, m, SCTP_SO_NOT_LOCKED);
+		/* sctp_abort_an_association calls sctp_free_asoc()
+		 * free association will NOT free it since we
+		 * incremented the refcnt .. we do this to prevent
+		 * it being freed and things getting tricky since
+		 * we could end up (from free_asoc) calling inpcb_free
+		 * which would get a recursive lock call to the
+		 * iterator lock.. But as a consequence of that the
+		 * stcb will return to us un-locked.. since free_asoc
+		 * returns with either no TCB or the TCB unlocked, we
+		 * must relock.. to unlock in the iterator timer :-0
+		 */
+		SCTP_TCB_LOCK(stcb);
+		atomic_add_int(&stcb->asoc.refcnt, -1);
+		goto no_chunk_output;
+	} else {
+		if (m) {
+			ret = sctp_msg_append(stcb, net, m,
+					      &ca->sndrcv, 1);
+		}
+		asoc = &stcb->asoc;
+		if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
+			/* shutdown this assoc */
+			if (TAILQ_EMPTY(&asoc->send_queue) &&
+			    TAILQ_EMPTY(&asoc->sent_queue) &&
+			    sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED) == 0) {
+				if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
+					goto abort_anyway;
+				}
+				/* there is nothing queued to send, so I'm done... */
+				if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+					/* only send SHUTDOWN the first time through */
+					if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
+						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+					}
+					SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+					SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+					sctp_stop_timers_for_shutdown(stcb);
+					sctp_send_shutdown(stcb, net);
+					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
+							 net);
+					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+							 asoc->primary_destination);
+					added_control = 1;
+					do_chunk_output = 0;
+				}
+			} else {
+				/*
+				 * we still got (or just got) data to send, so set
+				 * SHUTDOWN_PENDING
+				 */
+				/*
+				 * XXX sockets draft says that SCTP_EOF should be
+				 * sent with no data.  currently, we will allow user
+				 * data to be sent first and move to
+				 * SHUTDOWN-PENDING
+				 */
+				if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+					if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
+						asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+					}
+					asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
+					if (TAILQ_EMPTY(&asoc->send_queue) &&
+					    TAILQ_EMPTY(&asoc->sent_queue) &&
+					    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+						struct mbuf *op_err;
+						char msg[SCTP_DIAG_INFO_LEN];
+
+					abort_anyway:
+						snprintf(msg, sizeof(msg),
+						         "%s:%d at %s", __FILE__, __LINE__, __func__);
+						op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+						                             msg);
+						atomic_add_int(&stcb->asoc.refcnt, 1);
+						sctp_abort_an_association(stcb->sctp_ep, stcb,
+									  op_err, SCTP_SO_NOT_LOCKED);
+						atomic_add_int(&stcb->asoc.refcnt, -1);
+						goto no_chunk_output;
+					}
+					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+							 asoc->primary_destination);
+				}
+			}
+
+		}
+	}
+	un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
+		   (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
+
+	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
+	    (stcb->asoc.total_flight > 0) &&
+	    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
+		do_chunk_output = 0;
+	}
+	if (do_chunk_output)
+		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_NOT_LOCKED);
+	else if (added_control) {
+		int num_out, reason, now_filled = 0;
+		struct timeval now;
+		int frag_point;
+
+		frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
+		(void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
+				      &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_NOT_LOCKED);
+	}
+ no_chunk_output:
+	if (ret) {
+		ca->cnt_failed++;
+	} else {
+		ca->cnt_sent++;
+	}
+}
+
+static void
+sctp_sendall_completes(void *ptr, uint32_t val SCTP_UNUSED)
+{
+	struct sctp_copy_all *ca;
+
+	ca = (struct sctp_copy_all *)ptr;
+	/*
+	 * Do a notify here? Kacheong suggests that the notify be done at
+	 * the send time.. so you would push up a notification if any send
+	 * failed. Don't know if this is feasible since the only failures we
+	 * have is "memory" related and if you cannot get an mbuf to send
+	 * the data you surely can't get an mbuf to send up to notify the
+	 * user you can't send the data :->
+	 */
+
+	/* now free everything */
+	sctp_m_freem(ca->m);
+	SCTP_FREE(ca, SCTP_M_COPYAL);
+}
+
+static struct mbuf *
+sctp_copy_out_all(struct uio *uio, int len)
+{
+	struct mbuf *ret, *at;
+	int left, willcpy, cancpy, error;
+
+	ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAITOK, 1, MT_DATA);
+	if (ret == NULL) {
+		/* TSNH */
+		return (NULL);
+	}
+	left = len;
+	SCTP_BUF_LEN(ret) = 0;
+	/* save space for the data chunk header */
+	cancpy = (int)M_TRAILINGSPACE(ret);
+	willcpy = min(cancpy, left);
+	at = ret;
+	while (left > 0) {
+		/* Align data to the end */
+		error = uiomove(mtod(at, caddr_t), willcpy, uio);
+		if (error) {
+	err_out_now:
+			sctp_m_freem(at);
+			return (NULL);
+		}
+		SCTP_BUF_LEN(at) = willcpy;
+		SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
+		left -= willcpy;
+		if (left > 0) {
+			SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 1, MT_DATA);
+			if (SCTP_BUF_NEXT(at) == NULL) {
+				goto err_out_now;
+			}
+			at = SCTP_BUF_NEXT(at);
+			SCTP_BUF_LEN(at) = 0;
+			cancpy = (int)M_TRAILINGSPACE(at);
+			willcpy = min(cancpy, left);
+		}
+	}
+	return (ret);
+}
+
+static int
+sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
+    struct sctp_sndrcvinfo *srcv)
+{
+	int ret;
+	struct sctp_copy_all *ca;
+
+	SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
+		    SCTP_M_COPYAL);
+	if (ca == NULL) {
+		sctp_m_freem(m);
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+		return (ENOMEM);
+	}
+	memset(ca, 0, sizeof(struct sctp_copy_all));
+
+	ca->inp = inp;
+	if (srcv) {
+		memcpy(&ca->sndrcv, srcv, sizeof(struct sctp_nonpad_sndrcvinfo));
+	}
+	/*
+	 * take off the sendall flag, it would be bad if we failed to do
+	 * this :-0
+	 */
+	ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
+	/* get length and mbuf chain */
+	if (uio) {
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+		ca->sndlen = uio->uio_resid;
+#else
+		ca->sndlen = uio_resid(uio);
+#endif
+#else
+		ca->sndlen = (int)uio->uio_resid;
+#endif
+#if defined(__APPLE__)
+		SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 0);
+#endif
+		ca->m = sctp_copy_out_all(uio, ca->sndlen);
+#if defined(__APPLE__)
+		SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 0);
+#endif
+		if (ca->m == NULL) {
+			SCTP_FREE(ca, SCTP_M_COPYAL);
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+			return (ENOMEM);
+		}
+	} else {
+		/* Gather the length of the send */
+		struct mbuf *mat;
+
+		ca->sndlen = 0;
+		for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
+			ca->sndlen += SCTP_BUF_LEN(mat);
+		}
+	}
+	ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
+				     SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES,
+				     SCTP_ASOC_ANY_STATE,
+				     (void *)ca, 0,
+				     sctp_sendall_completes, inp, 1);
+	if (ret) {
+		SCTP_PRINTF("Failed to initiate iterator for sendall\n");
+		SCTP_FREE(ca, SCTP_M_COPYAL);
+		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
+		return (EFAULT);
+	}
+	return (0);
+}
+
+
+void
+sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
+{
+	struct sctp_tmit_chunk *chk, *nchk;
+
+	TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
+		if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+			if (chk->data) {
+				sctp_m_freem(chk->data);
+				chk->data = NULL;
+			}
+			asoc->ctrl_queue_cnt--;
+			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+		}
+	}
+}
+
+void
+sctp_toss_old_asconf(struct sctp_tcb *stcb)
+{
+	struct sctp_association *asoc;
+	struct sctp_tmit_chunk *chk, *nchk;
+	struct sctp_asconf_chunk *acp;
+
+	asoc = &stcb->asoc;
+	TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
+		/* find SCTP_ASCONF chunk in queue */
+		if (chk->rec.chunk_id.id == SCTP_ASCONF) {
+			if (chk->data) {
+				acp = mtod(chk->data, struct sctp_asconf_chunk *);
+				if (SCTP_TSN_GT(ntohl(acp->serial_number), asoc->asconf_seq_out_acked)) {
+					/* Not Acked yet */
+					break;
+				}
+			}
+			TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
+			if (chk->data) {
+				sctp_m_freem(chk->data);
+				chk->data = NULL;
+			}
+			asoc->ctrl_queue_cnt--;
+			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+		}
+	}
+}
+
+
+static void
+sctp_clean_up_datalist(struct sctp_tcb *stcb,
+    struct sctp_association *asoc,
+    struct sctp_tmit_chunk **data_list,
+    int bundle_at,
+    struct sctp_nets *net)
+{
+	int i;
+	struct sctp_tmit_chunk *tp1;
+
+	for (i = 0; i < bundle_at; i++) {
+		/* off of the send queue */
+		TAILQ_REMOVE(&asoc->send_queue, data_list[i], sctp_next);
+		asoc->send_queue_cnt--;
+		if (i > 0) {
+			/*
+			 * Any chunk NOT 0 you zap the time chunk 0 gets
+			 * zapped or set based on if a RTO measurment is
+			 * needed.
+			 */
+			data_list[i]->do_rtt = 0;
+		}
+		/* record time */
+		data_list[i]->sent_rcv_time = net->last_sent_time;
+		data_list[i]->rec.data.cwnd_at_send = net->cwnd;
+		data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.tsn;
+		if (data_list[i]->whoTo == NULL) {
+			data_list[i]->whoTo = net;
+			atomic_add_int(&net->ref_count, 1);
+		}
+		/* on to the sent queue */
+		tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
+		if ((tp1) && SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
+			struct sctp_tmit_chunk *tpp;
+
+			/* need to move back */
+		back_up_more:
+			tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
+			if (tpp == NULL) {
+				TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
+				goto all_done;
+			}
+			tp1 = tpp;
+			if (SCTP_TSN_GT(tp1->rec.data.tsn, data_list[i]->rec.data.tsn)) {
+				goto back_up_more;
+			}
+			TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
+		} else {
+			TAILQ_INSERT_TAIL(&asoc->sent_queue,
+					  data_list[i],
+					  sctp_next);
+		}
+	all_done:
+		/* This does not lower until the cum-ack passes it */
+		asoc->sent_queue_cnt++;
+		if ((asoc->peers_rwnd <= 0) &&
+		    (asoc->total_flight == 0) &&
+		    (bundle_at == 1)) {
+			/* Mark the chunk as being a window probe */
+			SCTP_STAT_INCR(sctps_windowprobed);
+		}
+#ifdef SCTP_AUDITING_ENABLED
+		sctp_audit_log(0xC2, 3);
+#endif
+		data_list[i]->sent = SCTP_DATAGRAM_SENT;
+		data_list[i]->snd_count = 1;
+		data_list[i]->rec.data.chunk_was_revoked = 0;
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+			sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
+				       data_list[i]->whoTo->flight_size,
+				       data_list[i]->book_size,
+				       (uint32_t)(uintptr_t)data_list[i]->whoTo,
+				       data_list[i]->rec.data.tsn);
+		}
+		sctp_flight_size_increase(data_list[i]);
+		sctp_total_flight_increase(stcb, data_list[i]);
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+			sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
+			      asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
+		}
+		asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
+						    (uint32_t) (data_list[i]->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
+		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+			/* SWS sender side engages */
+			asoc->peers_rwnd = 0;
+		}
+	}
+	if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
+		(*asoc->cc_functions.sctp_cwnd_update_packet_transmitted)(stcb, net);
+	}
+}
+
+static void
+sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+	SCTP_UNUSED
+#endif
+)
+{
+	struct sctp_tmit_chunk *chk, *nchk;
+
+	TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
+		if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
+		    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) ||	/* EY */
+		    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
+		    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
+		    (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) ||
+		    (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
+		    (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
+		    (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
+		    (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
+		    (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
+		    (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
+		    (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
+			/* Stray chunks must be cleaned up */
+	clean_up_anyway:
+			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+			if (chk->data) {
+				sctp_m_freem(chk->data);
+				chk->data = NULL;
+			}
+			asoc->ctrl_queue_cnt--;
+			if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
+				asoc->fwd_tsn_cnt--;
+			sctp_free_a_chunk(stcb, chk, so_locked);
+		} else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
+			/* special handling, we must look into the param */
+			if (chk != asoc->str_reset) {
+				goto clean_up_anyway;
+			}
+		}
+	}
+}
+
+
+static int
+sctp_can_we_split_this(struct sctp_tcb *stcb,
+                       uint32_t length,
+                       uint32_t goal_mtu, uint32_t frag_point, int eeor_on)
+{
+	/* Make a decision on if I should split a
+	 * msg into multiple parts. This is only asked of
+	 * incomplete messages.
+	 */
+	if (eeor_on) {
+		/* If we are doing EEOR we need to always send
+		 * it if its the entire thing, since it might
+		 * be all the guy is putting in the hopper.
+		 */
+		if (goal_mtu >= length) {
+			/*-
+			 * If we have data outstanding,
+			 * we get another chance when the sack
+			 * arrives to transmit - wait for more data
+			 */
+			if (stcb->asoc.total_flight == 0) {
+				/* If nothing is in flight, we zero
+				 * the packet counter.
+				 */
+				return (length);
+			}
+			return (0);
+
+		} else {
+			/* You can fill the rest */
+			return (goal_mtu);
+		}
+	}
+	/*-
+	 * For those strange folk that make the send buffer
+	 * smaller than our fragmentation point, we can't
+	 * get a full msg in so we have to allow splitting.
+	 */
+	if (SCTP_SB_LIMIT_SND(stcb->sctp_socket) < frag_point) {
+		return (length);
+	}
+
+	if ((length <= goal_mtu) ||
+	    ((length - goal_mtu) < SCTP_BASE_SYSCTL(sctp_min_residual))) {
+		/* Sub-optimial residual don't split in non-eeor mode. */
+		return (0);
+	}
+	/* If we reach here length is larger
+	 * than the goal_mtu. Do we wish to split
+	 * it for the sake of packet putting together?
+	 */
+	if (goal_mtu >= min(SCTP_BASE_SYSCTL(sctp_min_split_point), frag_point)) {
+		/* Its ok to split it */
+		return (min(goal_mtu, frag_point));
+	}
+	/* Nope, can't split */
+	return (0);
+
+}
+
+static uint32_t
+sctp_move_to_outqueue(struct sctp_tcb *stcb,
+                      struct sctp_stream_out *strq,
+                      uint32_t goal_mtu,
+                      uint32_t frag_point,
+                      int *giveup,
+                      int eeor_mode,
+                      int *bail,
+                      int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+                      SCTP_UNUSED
+#endif
+	)
+{
+	/* Move from the stream to the send_queue keeping track of the total */
+	struct sctp_association *asoc;
+	struct sctp_stream_queue_pending *sp;
+	struct sctp_tmit_chunk *chk;
+	struct sctp_data_chunk *dchkh=NULL;
+	struct sctp_idata_chunk *ndchkh=NULL;
+	uint32_t to_move, length;
+	int leading;
+	uint8_t rcv_flags = 0;
+	uint8_t some_taken;
+	uint8_t send_lock_up = 0;
+
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	asoc = &stcb->asoc;
+one_more_time:
+	/*sa_ignore FREED_MEMORY*/
+	sp = TAILQ_FIRST(&strq->outqueue);
+	if (sp == NULL) {
+		if (send_lock_up == 0) {
+			SCTP_TCB_SEND_LOCK(stcb);
+			send_lock_up = 1;
+		}
+		sp = TAILQ_FIRST(&strq->outqueue);
+		if (sp) {
+			goto one_more_time;
+		}
+		if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_EXPLICIT_EOR) == 0) &&
+		    (stcb->asoc.idata_supported == 0) &&
+		    (strq->last_msg_incomplete)) {
+			SCTP_PRINTF("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
+			            strq->sid,
+			            strq->last_msg_incomplete);
+			strq->last_msg_incomplete = 0;
+		}
+		to_move = 0;
+		if (send_lock_up) {
+			SCTP_TCB_SEND_UNLOCK(stcb);
+			send_lock_up = 0;
+		}
+		goto out_of;
+	}
+	if ((sp->msg_is_complete) && (sp->length == 0)) {
+		if (sp->sender_all_done) {
+			/* We are doing differed cleanup. Last
+			 * time through when we took all the data
+			 * the sender_all_done was not set.
+			 */
+			if ((sp->put_last_out == 0) && (sp->discard_rest == 0)) {
+				SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n");
+				SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
+				            sp->sender_all_done,
+				            sp->length,
+				            sp->msg_is_complete,
+				            sp->put_last_out,
+				            send_lock_up);
+			}
+			if ((TAILQ_NEXT(sp, next) == NULL) && (send_lock_up  == 0)) {
+				SCTP_TCB_SEND_LOCK(stcb);
+				send_lock_up = 1;
+			}
+			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
+			TAILQ_REMOVE(&strq->outqueue, sp, next);
+			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
+			if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
+			    (strq->chunks_on_queues == 0) &&
+			    TAILQ_EMPTY(&strq->outqueue)) {
+				stcb->asoc.trigger_reset = 1;
+			}
+			if (sp->net) {
+				sctp_free_remote_addr(sp->net);
+				sp->net = NULL;
+			}
+			if (sp->data) {
+				sctp_m_freem(sp->data);
+				sp->data = NULL;
+			}
+			sctp_free_a_strmoq(stcb, sp, so_locked);
+			/* we can't be locked to it */
+			if (send_lock_up) {
+				SCTP_TCB_SEND_UNLOCK(stcb);
+				send_lock_up = 0;
+			}
+			/* back to get the next msg */
+			goto one_more_time;
+		} else {
+			/* sender just finished this but
+			 * still holds a reference
+			 */
+			*giveup = 1;
+			to_move = 0;
+			goto out_of;
+		}
+	} else {
+		/* is there some to get */
+		if (sp->length == 0) {
+			/* no */
+			*giveup = 1;
+			to_move = 0;
+			goto out_of;
+		} else if (sp->discard_rest) {
+			if (send_lock_up == 0) {
+				SCTP_TCB_SEND_LOCK(stcb);
+				send_lock_up = 1;
+			}
+			/* Whack down the size */
+			atomic_subtract_int(&stcb->asoc.total_output_queue_size, sp->length);
+			if ((stcb->sctp_socket != NULL) &&
+			    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
+				atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
+			}
+			if (sp->data) {
+				sctp_m_freem(sp->data);
+				sp->data = NULL;
+				sp->tail_mbuf = NULL;
+			}
+			sp->length = 0;
+			sp->some_taken = 1;
+			*giveup = 1;
+			to_move = 0;
+			goto out_of;
+		}
+	}
+	some_taken = sp->some_taken;
+re_look:
+	length = sp->length;
+	if (sp->msg_is_complete) {
+		/* The message is complete */
+		to_move = min(length, frag_point);
+		if (to_move == length) {
+			/* All of it fits in the MTU */
+			if (sp->some_taken) {
+				rcv_flags |= SCTP_DATA_LAST_FRAG;
+			} else {
+				rcv_flags |= SCTP_DATA_NOT_FRAG;
+			}
+			sp->put_last_out = 1;
+			if (sp->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
+				rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
+			}
+		} else {
+			/* Not all of it fits, we fragment */
+			if (sp->some_taken == 0) {
+				rcv_flags |= SCTP_DATA_FIRST_FRAG;
+			}
+			sp->some_taken = 1;
+		}
+	} else {
+		to_move = sctp_can_we_split_this(stcb, length, goal_mtu, frag_point, eeor_mode);
+		if (to_move) {
+			/*-
+			 * We use a snapshot of length in case it
+			 * is expanding during the compare.
+			 */
+			uint32_t llen;
+
+			llen = length;
+			if (to_move >= llen) {
+				to_move = llen;
+				if (send_lock_up == 0) {
+					/*-
+					 * We are taking all of an incomplete msg
+					 * thus we need a send lock.
+					 */
+					SCTP_TCB_SEND_LOCK(stcb);
+					send_lock_up = 1;
+					if (sp->msg_is_complete) {
+						/* the sender finished the msg */
+						goto re_look;
+					}
+				}
+			}
+			if (sp->some_taken == 0) {
+				rcv_flags |= SCTP_DATA_FIRST_FRAG;
+				sp->some_taken = 1;
+			}
+		} else {
+			/* Nothing to take. */
+			*giveup = 1;
+			to_move = 0;
+			goto out_of;
+		}
+	}
+
+	/* If we reach here, we can copy out a chunk */
+	sctp_alloc_a_chunk(stcb, chk);
+	if (chk == NULL) {
+		/* No chunk memory */
+		*giveup = 1;
+		to_move = 0;
+		goto out_of;
+	}
+	/* Setup for unordered if needed by looking
+	 * at the user sent info flags.
+	 */
+	if (sp->sinfo_flags & SCTP_UNORDERED) {
+		rcv_flags |= SCTP_DATA_UNORDERED;
+	}
+	if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
+	    (sp->sinfo_flags & SCTP_EOF) == SCTP_EOF) {
+		rcv_flags |= SCTP_DATA_SACK_IMMEDIATELY;
+	}
+	/* clear out the chunk before setting up */
+	memset(chk, 0, sizeof(*chk));
+	chk->rec.data.rcv_flags = rcv_flags;
+
+	if (to_move >= length) {
+		/* we think we can steal the whole thing */
+		if ((sp->sender_all_done == 0) && (send_lock_up == 0)) {
+			SCTP_TCB_SEND_LOCK(stcb);
+			send_lock_up = 1;
+		}
+		if (to_move < sp->length) {
+			/* bail, it changed */
+			goto dont_do_it;
+		}
+		chk->data = sp->data;
+		chk->last_mbuf = sp->tail_mbuf;
+		/* register the stealing */
+		sp->data = sp->tail_mbuf = NULL;
+	} else {
+		struct mbuf *m;
+	dont_do_it:
+		chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_NOWAIT);
+		chk->last_mbuf = NULL;
+		if (chk->data == NULL) {
+			sp->some_taken = some_taken;
+			sctp_free_a_chunk(stcb, chk, so_locked);
+			*bail = 1;
+			to_move = 0;
+			goto out_of;
+		}
+#ifdef SCTP_MBUF_LOGGING
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+			sctp_log_mbc(chk->data, SCTP_MBUF_ICOPY);
+		}
+#endif
+		/* Pull off the data */
+		m_adj(sp->data, to_move);
+		/* Now lets work our way down and compact it */
+		m = sp->data;
+		while (m && (SCTP_BUF_LEN(m) == 0)) {
+			sp->data  = SCTP_BUF_NEXT(m);
+			SCTP_BUF_NEXT(m) = NULL;
+			if (sp->tail_mbuf == m) {
+				/*-
+				 * Freeing tail? TSNH since
+				 * we supposedly were taking less
+				 * than the sp->length.
+				 */
+#ifdef INVARIANTS
+				panic("Huh, freing tail? - TSNH");
+#else
+				SCTP_PRINTF("Huh, freeing tail? - TSNH\n");
+				sp->tail_mbuf = sp->data = NULL;
+				sp->length = 0;
+#endif
+
+			}
+			sctp_m_free(m);
+			m = sp->data;
+		}
+	}
+	if (SCTP_BUF_IS_EXTENDED(chk->data)) {
+		chk->copy_by_ref = 1;
+	} else {
+		chk->copy_by_ref = 0;
+	}
+	/* get last_mbuf and counts of mb usage
+	 * This is ugly but hopefully its only one mbuf.
+	 */
+	if (chk->last_mbuf == NULL) {
+		chk->last_mbuf = chk->data;
+		while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
+			chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
+		}
+	}
+
+	if (to_move > length) {
+		/*- This should not happen either
+		 * since we always lower to_move to the size
+		 * of sp->length if its larger.
+		 */
+#ifdef INVARIANTS
+		panic("Huh, how can to_move be larger?");
+#else
+		SCTP_PRINTF("Huh, how can to_move be larger?\n");
+		sp->length = 0;
+#endif
+	} else {
+		atomic_subtract_int(&sp->length, to_move);
+	}
+	if (stcb->asoc.idata_supported == 0) {
+		leading = sizeof(struct sctp_data_chunk);
+	} else {
+		leading = sizeof(struct sctp_idata_chunk);
+	}
+	if (M_LEADINGSPACE(chk->data) < leading) {
+		/* Not enough room for a chunk header, get some */
+		struct mbuf *m;
+
+		m = sctp_get_mbuf_for_msg(1, 0, M_NOWAIT, 0, MT_DATA);
+		if (m == NULL) {
+			/*
+			 * we're in trouble here. _PREPEND below will free
+			 * all the data if there is no leading space, so we
+			 * must put the data back and restore.
+			 */
+			if (send_lock_up == 0) {
+				SCTP_TCB_SEND_LOCK(stcb);
+				send_lock_up = 1;
+			}
+			if (sp->data == NULL) {
+				/* unsteal the data */
+				sp->data = chk->data;
+				sp->tail_mbuf = chk->last_mbuf;
+			} else {
+				struct mbuf *m_tmp;
+				/* reassemble the data */
+				m_tmp = sp->data;
+				sp->data = chk->data;
+				SCTP_BUF_NEXT(chk->last_mbuf) = m_tmp;
+			}
+			sp->some_taken = some_taken;
+			atomic_add_int(&sp->length, to_move);
+			chk->data = NULL;
+			*bail = 1;
+			sctp_free_a_chunk(stcb, chk, so_locked);
+			to_move = 0;
+			goto out_of;
+		} else {
+			SCTP_BUF_LEN(m) = 0;
+			SCTP_BUF_NEXT(m) = chk->data;
+			chk->data = m;
+			M_ALIGN(chk->data, 4);
+		}
+	}
+	if (stcb->asoc.idata_supported == 0) {
+		SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_NOWAIT);
+	} else {
+		SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_idata_chunk), M_NOWAIT);
+	}
+	if (chk->data == NULL) {
+		/* HELP, TSNH since we assured it would not above? */
+#ifdef INVARIANTS
+		panic("prepend failes HELP?");
+#else
+		SCTP_PRINTF("prepend fails HELP?\n");
+		sctp_free_a_chunk(stcb, chk, so_locked);
+#endif
+		*bail = 1;
+		to_move = 0;
+		goto out_of;
+	}
+	if (stcb->asoc.idata_supported == 0) {
+		sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
+		chk->book_size = chk->send_size = (uint16_t)(to_move + sizeof(struct sctp_data_chunk));
+	} else {
+		sctp_snd_sb_alloc(stcb, sizeof(struct sctp_idata_chunk));
+		chk->book_size = chk->send_size = (uint16_t)(to_move + sizeof(struct sctp_idata_chunk));
+	}
+	chk->book_size_scale = 0;
+	chk->sent = SCTP_DATAGRAM_UNSENT;
+
+	chk->flags = 0;
+	chk->asoc = &stcb->asoc;
+	chk->pad_inplace = 0;
+	chk->no_fr_allowed = 0;
+	if (stcb->asoc.idata_supported == 0) {
+		if (rcv_flags & SCTP_DATA_UNORDERED) {
+			/* Just use 0. The receiver ignores the values. */
+			chk->rec.data.mid = 0;
+		} else {
+			chk->rec.data.mid = strq->next_mid_ordered;
+			if (rcv_flags & SCTP_DATA_LAST_FRAG) {
+				strq->next_mid_ordered++;
+			}
+		}
+	} else {
+		if (rcv_flags & SCTP_DATA_UNORDERED) {
+			chk->rec.data.mid = strq->next_mid_unordered;
+			if (rcv_flags & SCTP_DATA_LAST_FRAG) {
+				strq->next_mid_unordered++;
+			}
+		} else {
+			chk->rec.data.mid = strq->next_mid_ordered;
+			if (rcv_flags & SCTP_DATA_LAST_FRAG) {
+				strq->next_mid_ordered++;
+			}
+		}
+	}
+	chk->rec.data.sid = sp->sid;
+	chk->rec.data.ppid = sp->ppid;
+	chk->rec.data.context = sp->context;
+	chk->rec.data.doing_fast_retransmit = 0;
+
+	chk->rec.data.timetodrop = sp->ts;
+	chk->flags = sp->act_flags;
+
+	if (sp->net) {
+		chk->whoTo = sp->net;
+		atomic_add_int(&chk->whoTo->ref_count, 1);
+	} else
+		chk->whoTo = NULL;
+
+	if (sp->holds_key_ref) {
+		chk->auth_keyid = sp->auth_keyid;
+		sctp_auth_key_acquire(stcb, chk->auth_keyid);
+		chk->holds_key_ref = 1;
+	}
+#if defined(__FreeBSD__) || defined(__Panda__)
+	chk->rec.data.tsn = atomic_fetchadd_int(&asoc->sending_seq, 1);
+#else
+	chk->rec.data.tsn = asoc->sending_seq++;
+#endif
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_AT_SEND_2_OUTQ) {
+		sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
+		               (uint32_t)(uintptr_t)stcb, sp->length,
+		               (uint32_t)((chk->rec.data.sid << 16) | (0x0000ffff & chk->rec.data.mid)),
+		               chk->rec.data.tsn);
+	}
+	if (stcb->asoc.idata_supported == 0) {
+		dchkh = mtod(chk->data, struct sctp_data_chunk *);
+	} else {
+		ndchkh = mtod(chk->data, struct sctp_idata_chunk *);
+	}
+	/*
+	 * Put the rest of the things in place now. Size was done
+	 * earlier in previous loop prior to padding.
+	 */
+
+#ifdef SCTP_ASOCLOG_OF_TSNS
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
+		asoc->tsn_out_at = 0;
+		asoc->tsn_out_wrapped = 1;
+	}
+	asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.tsn;
+	asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.sid;
+	asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.mid;
+	asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
+	asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
+	asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
+	asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
+	asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
+	asoc->tsn_out_at++;
+#endif
+	if (stcb->asoc.idata_supported == 0) {
+		dchkh->ch.chunk_type = SCTP_DATA;
+		dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
+		dchkh->dp.tsn = htonl(chk->rec.data.tsn);
+		dchkh->dp.sid = htons(strq->sid);
+		dchkh->dp.ssn = htons((uint16_t)chk->rec.data.mid);
+		dchkh->dp.ppid = chk->rec.data.ppid;
+		dchkh->ch.chunk_length = htons(chk->send_size);
+	} else {
+		ndchkh->ch.chunk_type = SCTP_IDATA;
+		ndchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
+		ndchkh->dp.tsn = htonl(chk->rec.data.tsn);
+		ndchkh->dp.sid = htons(strq->sid);
+		ndchkh->dp.reserved = htons(0);
+		ndchkh->dp.mid = htonl(chk->rec.data.mid);
+		if (sp->fsn == 0)
+			ndchkh->dp.ppid_fsn.ppid = chk->rec.data.ppid;
+		else
+			ndchkh->dp.ppid_fsn.fsn = htonl(sp->fsn);
+		sp->fsn++;
+		ndchkh->ch.chunk_length = htons(chk->send_size);
+	}
+	/* Now advance the chk->send_size by the actual pad needed. */
+	if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
+		/* need a pad */
+		struct mbuf *lm;
+		int pads;
+
+		pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
+		lm = sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf);
+		if (lm != NULL) {
+			chk->last_mbuf = lm;
+			chk->pad_inplace = 1;
+		}
+		chk->send_size += pads;
+	}
+	if (PR_SCTP_ENABLED(chk->flags)) {
+		asoc->pr_sctp_cnt++;
+	}
+	if (sp->msg_is_complete && (sp->length == 0) && (sp->sender_all_done)) {
+		/* All done pull and kill the message */
+		if (sp->put_last_out == 0) {
+			SCTP_PRINTF("Gak, put out entire msg with NO end!-2\n");
+			SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d send_lock:%d\n",
+			            sp->sender_all_done,
+			            sp->length,
+			            sp->msg_is_complete,
+			            sp->put_last_out,
+			            send_lock_up);
+		}
+		if ((send_lock_up == 0) && (TAILQ_NEXT(sp, next) == NULL)) {
+			SCTP_TCB_SEND_LOCK(stcb);
+			send_lock_up = 1;
+		}
+		atomic_subtract_int(&asoc->stream_queue_cnt, 1);
+		TAILQ_REMOVE(&strq->outqueue, sp, next);
+		stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, strq, sp, send_lock_up);
+		if ((strq->state == SCTP_STREAM_RESET_PENDING) &&
+		    (strq->chunks_on_queues == 0) &&
+		    TAILQ_EMPTY(&strq->outqueue)) {
+			stcb->asoc.trigger_reset = 1;
+		}
+		if (sp->net) {
+			sctp_free_remote_addr(sp->net);
+			sp->net = NULL;
+		}
+		if (sp->data) {
+			sctp_m_freem(sp->data);
+			sp->data = NULL;
+		}
+		sctp_free_a_strmoq(stcb, sp, so_locked);
+	}
+	asoc->chunks_on_out_queue++;
+	strq->chunks_on_queues++;
+	TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
+	asoc->send_queue_cnt++;
+out_of:
+	if (send_lock_up) {
+		SCTP_TCB_SEND_UNLOCK(stcb);
+	}
+	return (to_move);
+}
+
+
+static void
+sctp_fill_outqueue(struct sctp_tcb *stcb,
+    struct sctp_nets *net, int frag_point, int eeor_mode, int *quit_now, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+	SCTP_UNUSED
+#endif
+)
+{
+	struct sctp_association *asoc;
+	struct sctp_stream_out *strq;
+	int goal_mtu, moved_how_much, total_moved = 0, bail = 0;
+	int giveup;
+
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	asoc = &stcb->asoc;
+	switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+		case AF_INET:
+			goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+			break;
+#endif
+#ifdef INET6
+		case AF_INET6:
+			goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
+			break;
+#endif
+#if defined(__Userspace__)
+		case AF_CONN:
+			goal_mtu = net->mtu - sizeof(struct sctphdr);
+			break;
+#endif
+		default:
+			/* TSNH */
+			goal_mtu = net->mtu;
+			break;
+	}
+	/* Need an allowance for the data chunk header too */
+	if (stcb->asoc.idata_supported == 0) {
+		goal_mtu -= sizeof(struct sctp_data_chunk);
+	} else {
+		goal_mtu -= sizeof(struct sctp_idata_chunk);
+	}
+
+	/* must make even word boundary */
+	goal_mtu &= 0xfffffffc;
+	strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
+	while ((goal_mtu > 0) && strq) {
+		giveup = 0;
+		bail = 0;
+		moved_how_much = sctp_move_to_outqueue(stcb, strq, goal_mtu, frag_point, 
+						       &giveup, eeor_mode, &bail, so_locked);
+		stcb->asoc.ss_functions.sctp_ss_scheduled(stcb, net, asoc, strq, moved_how_much);
+
+		if ((giveup) || bail) {
+			break;
+		}
+		strq = stcb->asoc.ss_functions.sctp_ss_select_stream(stcb, net, asoc);
+		if (strq == NULL) {
+			break;
+		}
+		total_moved += moved_how_much;
+		goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
+		goal_mtu &= 0xfffffffc;
+	}
+	if (bail)
+		*quit_now = 1;
+
+	stcb->asoc.ss_functions.sctp_ss_packet_done(stcb, net, asoc);
+
+	if (total_moved == 0) {
+		if ((stcb->asoc.sctp_cmt_on_off == 0) &&
+		    (net == stcb->asoc.primary_destination)) {
+			/* ran dry for primary network net */
+			SCTP_STAT_INCR(sctps_primary_randry);
+		} else if (stcb->asoc.sctp_cmt_on_off > 0) {
+			/* ran dry with CMT on */
+			SCTP_STAT_INCR(sctps_cmt_randry);
+		}
+	}
+}
+
+void
+sctp_fix_ecn_echo(struct sctp_association *asoc)
+{
+	struct sctp_tmit_chunk *chk;
+
+	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+		if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
+			chk->sent = SCTP_DATAGRAM_UNSENT;
+		}
+	}
+}
+
+void
+sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	struct sctp_association *asoc;
+	struct sctp_tmit_chunk *chk;
+	struct sctp_stream_queue_pending *sp;
+	unsigned int i;
+
+	if (net == NULL) {
+		return;
+	}
+	asoc = &stcb->asoc;
+	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+		TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
+			if (sp->net == net) {
+				sctp_free_remote_addr(sp->net);
+				sp->net = NULL;
+			}
+		}
+	}
+	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
+		if (chk->whoTo == net) {
+			sctp_free_remote_addr(chk->whoTo);
+			chk->whoTo = NULL;
+		}
+	}
+}
+
+int
+sctp_med_chunk_output(struct sctp_inpcb *inp,
+		      struct sctp_tcb *stcb,
+		      struct sctp_association *asoc,
+		      int *num_out,
+		      int *reason_code,
+		      int control_only, int from_where,
+		      struct timeval *now, int *now_filled, int frag_point, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+		      SCTP_UNUSED
+#endif
+	)
+{
+	/**
+	 * Ok this is the generic chunk service queue. we must do the
+	 * following:
+	 * - Service the stream queue that is next, moving any
+	 *   message (note I must get a complete message i.e. FIRST/MIDDLE and
+	 *   LAST to the out queue in one pass) and assigning TSN's. This
+	 *   only applys though if the peer does not support NDATA. For NDATA
+	 *   chunks its ok to not send the entire message ;-)
+	 * - Check to see if the cwnd/rwnd allows any output, if so we go ahead and
+	 *   fomulate and send the low level chunks. Making sure to combine
+	 *   any control in the control chunk queue also.
+	 */
+	struct sctp_nets *net, *start_at, *sack_goes_to = NULL, *old_start_at = NULL;
+	struct mbuf *outchain, *endoutchain;
+	struct sctp_tmit_chunk *chk, *nchk;
+
+	/* temp arrays for unlinking */
+	struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
+	int no_fragmentflg, error;
+	unsigned int max_rwnd_per_dest, max_send_per_dest;
+	int one_chunk, hbflag, skip_data_for_this_net;
+	int asconf, cookie, no_out_cnt;
+	int bundle_at, ctl_cnt, no_data_chunks, eeor_mode;
+	unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
+	int tsns_sent = 0;
+	uint32_t auth_offset = 0;
+	struct sctp_auth_chunk *auth = NULL;
+	uint16_t auth_keyid;
+	int override_ok = 1;
+	int skip_fill_up = 0;
+	int data_auth_reqd = 0;
+	/* JRS 5/14/07 - Add flag for whether a heartbeat is sent to
+	   the destination. */
+	int quit_now = 0;
+
+#if defined(__APPLE__)
+	if (so_locked) {
+		sctp_lock_assert(SCTP_INP_SO(inp));
+	} else {
+		sctp_unlock_assert(SCTP_INP_SO(inp));
+	}
+#endif
+	*num_out = 0;
+	*reason_code = 0;
+	auth_keyid = stcb->asoc.authinfo.active_keyid;
+	if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
+	    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
+	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
+		eeor_mode = 1;
+	} else {
+		eeor_mode = 0;
+	}
+	ctl_cnt = no_out_cnt = asconf = cookie = 0;
+	/*
+	 * First lets prime the pump. For each destination, if there is room
+	 * in the flight size, attempt to pull an MTU's worth out of the
+	 * stream queues into the general send_queue
+	 */
+#ifdef SCTP_AUDITING_ENABLED
+	sctp_audit_log(0xC2, 2);
+#endif
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	hbflag = 0;
+	if (control_only)
+		no_data_chunks = 1;
+	else
+		no_data_chunks = 0;
+
+	/* Nothing to possible to send? */
+	if ((TAILQ_EMPTY(&asoc->control_send_queue) ||
+	     (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) &&
+	    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
+	    TAILQ_EMPTY(&asoc->send_queue) &&
+	    sctp_is_there_unsent_data(stcb, so_locked) == 0) {
+	nothing_to_send:
+		*reason_code = 9;
+		return (0);
+	}
+	if (asoc->peers_rwnd == 0) {
+		/* No room in peers rwnd */
+		*reason_code = 1;
+		if (asoc->total_flight > 0) {
+			/* we are allowed one chunk in flight */
+			no_data_chunks = 1;
+		}
+	}
+	if (stcb->asoc.ecn_echo_cnt_onq) {
+		/* Record where a sack goes, if any */
+		if (no_data_chunks &&
+		    (asoc->ctrl_queue_cnt == stcb->asoc.ecn_echo_cnt_onq)) {
+			/* Nothing but ECNe to send - we don't do that */
+			goto nothing_to_send;
+		}
+		TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+			if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
+			    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
+				sack_goes_to = chk->whoTo;
+				break;
+			}
+		}
+	}
+	max_rwnd_per_dest = ((asoc->peers_rwnd + asoc->total_flight) / asoc->numnets);
+	if (stcb->sctp_socket)
+		max_send_per_dest = SCTP_SB_LIMIT_SND(stcb->sctp_socket) / asoc->numnets;
+	else
+		max_send_per_dest = 0;
+	if (no_data_chunks == 0) {
+		/* How many non-directed chunks are there? */
+		TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
+			if (chk->whoTo == NULL) {
+				/* We already have non-directed
+				 * chunks on the queue, no need
+				 * to do a fill-up.
+				 */
+				skip_fill_up = 1;
+				break;
+			}
+		}
+
+	}
+	if ((no_data_chunks == 0) &&
+	    (skip_fill_up == 0) &&
+	    (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc))) {
+		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+			/*
+			 * This for loop we are in takes in
+			 * each net, if its's got space in cwnd and
+			 * has data sent to it (when CMT is off) then it
+			 * calls sctp_fill_outqueue for the net. This gets
+			 * data on the send queue for that network.
+			 *
+			 * In sctp_fill_outqueue TSN's are assigned and
+			 * data is copied out of the stream buffers. Note
+			 * mostly copy by reference (we hope).
+			 */
+			net->window_probe = 0;
+			if ((net != stcb->asoc.alternate) &&
+			    ((net->dest_state & SCTP_ADDR_PF) ||
+			     (!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
+			     (net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+					sctp_log_cwnd(stcb, net, 1,
+						      SCTP_CWND_LOG_FILL_OUTQ_CALLED);
+				}
+			        continue;
+			}
+			if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
+			    (net->flight_size == 0)) {
+				(*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net);
+			}
+			if (net->flight_size >= net->cwnd) {
+				/* skip this network, no room - can't fill */
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+					sctp_log_cwnd(stcb, net, 3,
+						      SCTP_CWND_LOG_FILL_OUTQ_CALLED);
+				}
+				continue;
+			}
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+				sctp_log_cwnd(stcb, net, 4, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
+			}
+			sctp_fill_outqueue(stcb, net, frag_point, eeor_mode, &quit_now, so_locked);
+			if (quit_now) {
+				/* memory alloc failure */
+				no_data_chunks = 1;
+				break;
+			}
+		}
+	}
+	/* now service each destination and send out what we can for it */
+	/* Nothing to send? */
+	if (TAILQ_EMPTY(&asoc->control_send_queue) &&
+	    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
+	    TAILQ_EMPTY(&asoc->send_queue)) {
+		*reason_code = 8;
+		return (0);
+	}
+
+	if (asoc->sctp_cmt_on_off > 0) {
+		/* get the last start point */
+		start_at = asoc->last_net_cmt_send_started;
+		if (start_at == NULL) {
+			/* null so to beginning */
+			start_at = TAILQ_FIRST(&asoc->nets);
+		} else {
+			start_at = TAILQ_NEXT(asoc->last_net_cmt_send_started, sctp_next);
+			if (start_at == NULL) {
+				start_at = TAILQ_FIRST(&asoc->nets);
+			}
+		}
+		asoc->last_net_cmt_send_started = start_at;
+	} else {
+		start_at = TAILQ_FIRST(&asoc->nets);
+	}
+	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+		if (chk->whoTo == NULL) {
+			if (asoc->alternate) {
+				chk->whoTo = asoc->alternate;
+			} else {
+				chk->whoTo = asoc->primary_destination;
+			}
+			atomic_add_int(&chk->whoTo->ref_count, 1);
+		}
+	}
+	old_start_at = NULL;
+again_one_more_time:
+	for (net = start_at ; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
+		/* how much can we send? */
+		/* SCTPDBG("Examine for sending net:%x\n", (uint32_t)net); */
+		if (old_start_at && (old_start_at == net)) {
+			/* through list ocmpletely. */
+			break;
+		}
+		tsns_sent = 0xa;
+		if (TAILQ_EMPTY(&asoc->control_send_queue) &&
+		    TAILQ_EMPTY(&asoc->asconf_send_queue) &&
+		    (net->flight_size >= net->cwnd)) {
+			/* Nothing on control or asconf and flight is full, we can skip
+			 * even in the CMT case.
+			 */
+			continue;
+		}
+		bundle_at = 0;
+		endoutchain = outchain = NULL;
+		no_fragmentflg = 1;
+		one_chunk = 0;
+		if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
+			skip_data_for_this_net = 1;
+		} else {
+			skip_data_for_this_net = 0;
+		}
+		switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
+#ifdef INET
+		case AF_INET:
+			mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+			break;
+#endif
+#ifdef INET6
+		case AF_INET6:
+			mtu = net->mtu - SCTP_MIN_OVERHEAD;
+			break;
+#endif
+#if defined(__Userspace__)
+		case AF_CONN:
+			mtu = net->mtu - sizeof(struct sctphdr);
+			break;
+#endif
+		default:
+			/* TSNH */
+			mtu = net->mtu;
+			break;
+		}
+		mx_mtu = mtu;
+		to_out = 0;
+		if (mtu > asoc->peers_rwnd) {
+			if (asoc->total_flight > 0) {
+				/* We have a packet in flight somewhere */
+				r_mtu = asoc->peers_rwnd;
+			} else {
+				/* We are always allowed to send one MTU out */
+				one_chunk = 1;
+				r_mtu = mtu;
+			}
+		} else {
+			r_mtu = mtu;
+		}
+		error = 0;
+		/************************/
+		/* ASCONF transmission */
+		/************************/
+		/* Now first lets go through the asconf queue */
+		TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
+			if (chk->rec.chunk_id.id != SCTP_ASCONF) {
+				continue;
+			}
+			if (chk->whoTo == NULL) {
+				if (asoc->alternate == NULL) {
+					if (asoc->primary_destination != net) {
+						break;
+					}
+				} else {
+					if (asoc->alternate != net) {
+						break;
+					}
+				}
+			} else {
+				if (chk->whoTo != net) {
+					break;
+				}
+			}
+			if (chk->data == NULL) {
+				break;
+			}
+			if (chk->sent != SCTP_DATAGRAM_UNSENT &&
+			    chk->sent != SCTP_DATAGRAM_RESEND) {
+				break;
+			}
+			/*
+			 * if no AUTH is yet included and this chunk
+			 * requires it, make sure to account for it.  We
+			 * don't apply the size until the AUTH chunk is
+			 * actually added below in case there is no room for
+			 * this chunk. NOTE: we overload the use of "omtu"
+			 * here
+			 */
+			if ((auth == NULL) &&
+			    sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+							stcb->asoc.peer_auth_chunks)) {
+				omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+			} else
+				omtu = 0;
+			/* Here we do NOT factor the r_mtu */
+			if ((chk->send_size < (int)(mtu - omtu)) ||
+			    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
+				/*
+				 * We probably should glom the mbuf chain
+				 * from the chk->data for control but the
+				 * problem is it becomes yet one more level
+				 * of tracking to do if for some reason
+				 * output fails. Then I have got to
+				 * reconstruct the merged control chain.. el
+				 * yucko.. for now we take the easy way and
+				 * do the copy
+				 */
+				/*
+				 * Add an AUTH chunk, if chunk requires it
+				 * save the offset into the chain for AUTH
+				 */
+				if ((auth == NULL) &&
+				    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+								 stcb->asoc.peer_auth_chunks))) {
+					outchain = sctp_add_auth_chunk(outchain,
+								       &endoutchain,
+								       &auth,
+								       &auth_offset,
+								       stcb,
+								       chk->rec.chunk_id.id);
+					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+				}
+				outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
+							       (int)chk->rec.chunk_id.can_take_data,
+							       chk->send_size, chk->copy_by_ref);
+				if (outchain == NULL) {
+					*reason_code = 8;
+					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+					return (ENOMEM);
+				}
+				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+				/* update our MTU size */
+				if (mtu > (chk->send_size + omtu))
+					mtu -= (chk->send_size + omtu);
+				else
+					mtu = 0;
+				to_out += (chk->send_size + omtu);
+				/* Do clear IP_DF ? */
+				if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+					no_fragmentflg = 0;
+				}
+				if (chk->rec.chunk_id.can_take_data)
+					chk->data = NULL;
+				/*
+				 * set hb flag since we can
+				 * use these for RTO
+				 */
+				hbflag = 1;
+				asconf = 1;
+				/*
+				 * should sysctl this: don't
+				 * bundle data with ASCONF
+				 * since it requires AUTH
+				 */
+				no_data_chunks = 1;
+				chk->sent = SCTP_DATAGRAM_SENT;
+				if (chk->whoTo == NULL) {
+					chk->whoTo = net;
+					atomic_add_int(&net->ref_count, 1);
+				}
+				chk->snd_count++;
+				if (mtu == 0) {
+					/*
+					 * Ok we are out of room but we can
+					 * output without effecting the
+					 * flight size since this little guy
+					 * is a control only packet.
+					 */
+					sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
+					/*
+					 * do NOT clear the asconf
+					 * flag as it is used to do
+					 * appropriate source address
+					 * selection.
+					 */
+					if (*now_filled == 0) {
+						(void)SCTP_GETTIME_TIMEVAL(now);
+						*now_filled = 1;
+					}
+					net->last_sent_time = *now;
+					hbflag = 0;
+					if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
+					                                        (struct sockaddr *)&net->ro._l_addr,
+					                                        outchain, auth_offset, auth,
+					                                        stcb->asoc.authinfo.active_keyid,
+					                                        no_fragmentflg, 0, asconf,
+					                                        inp->sctp_lport, stcb->rport,
+					                                        htonl(stcb->asoc.peer_vtag),
+					                                        net->port, NULL,
+#if defined(__FreeBSD__)
+					                                        0, 0,
+#endif
+					                                        so_locked))) {
+						/* error, we could not output */
+						SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
+						if (from_where == 0) {
+							SCTP_STAT_INCR(sctps_lowlevelerrusr);
+						}
+						if (error == ENOBUFS) {
+							asoc->ifp_had_enobuf = 1;
+							SCTP_STAT_INCR(sctps_lowlevelerr);
+						}
+						/* error, could not output */
+						if (error == EHOSTUNREACH) {
+							/*
+							 * Destination went
+							 * unreachable
+							 * during this send
+							 */
+							sctp_move_chunks_from_net(stcb, net);
+						}
+						*reason_code = 7;
+						break;
+					} else {
+						asoc->ifp_had_enobuf = 0;
+					}
+					/*
+					 * increase the number we sent, if a
+					 * cookie is sent we don't tell them
+					 * any was sent out.
+					 */
+					outchain = endoutchain = NULL;
+					auth = NULL;
+					auth_offset = 0;
+					if (!no_out_cnt)
+						*num_out += ctl_cnt;
+					/* recalc a clean slate and setup */
+					switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+						case AF_INET:
+							mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+							break;
+#endif
+#ifdef INET6
+						case AF_INET6:
+							mtu = net->mtu - SCTP_MIN_OVERHEAD;
+							break;
+#endif
+#if defined(__Userspace__)
+						case AF_CONN:
+							mtu = net->mtu - sizeof(struct sctphdr);
+							break;
+#endif
+						default:
+							/* TSNH */
+							mtu = net->mtu;
+							break;
+					}
+					to_out = 0;
+					no_fragmentflg = 1;
+				}
+			}
+		}
+		if (error != 0) {
+			/* try next net */
+			continue;
+		}
+		/************************/
+		/* Control transmission */
+		/************************/
+		/* Now first lets go through the control queue */
+		TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
+			if ((sack_goes_to) &&
+			    (chk->rec.chunk_id.id == SCTP_ECN_ECHO) &&
+			    (chk->whoTo != sack_goes_to)) {
+				/*
+				 * if we have a sack in queue, and we are looking at an
+				 * ecn echo that is NOT queued to where the sack is going..
+				 */
+				if (chk->whoTo == net) {
+					/* Don't transmit it to where its going (current net) */
+					continue;
+				} else if (sack_goes_to == net) {
+					/* But do transmit it to this address */
+					goto skip_net_check;
+				}
+			}
+			if (chk->whoTo == NULL) {
+				if (asoc->alternate == NULL) {
+					if (asoc->primary_destination != net) {
+						continue;
+					}
+				} else {
+					if (asoc->alternate != net) {
+						continue;
+					}
+				}
+			} else {
+				if (chk->whoTo != net) {
+					continue;
+				}
+			}
+		skip_net_check:
+			if (chk->data == NULL) {
+				continue;
+			}
+			if (chk->sent != SCTP_DATAGRAM_UNSENT) {
+				/*
+				 * It must be unsent. Cookies and ASCONF's
+				 * hang around but there timers will force
+				 * when marked for resend.
+				 */
+				continue;
+			}
+			/*
+			 * if no AUTH is yet included and this chunk
+			 * requires it, make sure to account for it.  We
+			 * don't apply the size until the AUTH chunk is
+			 * actually added below in case there is no room for
+			 * this chunk. NOTE: we overload the use of "omtu"
+			 * here
+			 */
+			if ((auth == NULL) &&
+			    sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+							stcb->asoc.peer_auth_chunks)) {
+				omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+			} else
+				omtu = 0;
+			/* Here we do NOT factor the r_mtu */
+			if ((chk->send_size <= (int)(mtu - omtu)) ||
+			    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
+				/*
+				 * We probably should glom the mbuf chain
+				 * from the chk->data for control but the
+				 * problem is it becomes yet one more level
+				 * of tracking to do if for some reason
+				 * output fails. Then I have got to
+				 * reconstruct the merged control chain.. el
+				 * yucko.. for now we take the easy way and
+				 * do the copy
+				 */
+				/*
+				 * Add an AUTH chunk, if chunk requires it
+				 * save the offset into the chain for AUTH
+				 */
+				if ((auth == NULL) &&
+				    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+								 stcb->asoc.peer_auth_chunks))) {
+					outchain = sctp_add_auth_chunk(outchain,
+								       &endoutchain,
+								       &auth,
+								       &auth_offset,
+								       stcb,
+								       chk->rec.chunk_id.id);
+					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+				}
+				outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
+							       (int)chk->rec.chunk_id.can_take_data,
+							       chk->send_size, chk->copy_by_ref);
+				if (outchain == NULL) {
+					*reason_code = 8;
+					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+					return (ENOMEM);
+				}
+				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+				/* update our MTU size */
+				if (mtu > (chk->send_size + omtu))
+					mtu -= (chk->send_size + omtu);
+				else
+					mtu = 0;
+				to_out += (chk->send_size + omtu);
+				/* Do clear IP_DF ? */
+				if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+					no_fragmentflg = 0;
+				}
+				if (chk->rec.chunk_id.can_take_data)
+					chk->data = NULL;
+				/* Mark things to be removed, if needed */
+				if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
+				    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) || /* EY */
+				    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
+				    (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
+				    (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
+				    (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
+				    (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
+				    (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
+				    (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
+				    (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
+				    (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
+					if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) {
+						hbflag = 1;
+					}
+					/* remove these chunks at the end */
+					if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
+					    (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK)) {
+						/* turn off the timer */
+						if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
+							sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+									inp, stcb, net,
+							                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
+						}
+					}
+					ctl_cnt++;
+				} else {
+					/*
+					 * Other chunks, since they have
+					 * timers running (i.e. COOKIE)
+					 * we just "trust" that it
+					 * gets sent or retransmitted.
+					 */
+					ctl_cnt++;
+					if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+						cookie = 1;
+						no_out_cnt = 1;
+					} else if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
+						/*
+						 * Increment ecne send count here
+						 * this means we may be over-zealous in
+						 * our counting if the send fails, but its
+						 * the best place to do it (we used to do
+						 * it in the queue of the chunk, but that did
+						 * not tell how many times it was sent.
+						 */
+						SCTP_STAT_INCR(sctps_sendecne);
+					}
+					chk->sent = SCTP_DATAGRAM_SENT;
+					if (chk->whoTo == NULL) {
+						chk->whoTo = net;
+						atomic_add_int(&net->ref_count, 1);
+					}
+					chk->snd_count++;
+				}
+				if (mtu == 0) {
+					/*
+					 * Ok we are out of room but we can
+					 * output without effecting the
+					 * flight size since this little guy
+					 * is a control only packet.
+					 */
+					if (asconf) {
+						sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
+						/*
+						 * do NOT clear the asconf
+						 * flag as it is used to do
+						 * appropriate source address
+						 * selection.
+						 */
+					}
+					if (cookie) {
+						sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
+						cookie = 0;
+					}
+					/* Only HB or ASCONF advances time */
+					if (hbflag) {
+						if (*now_filled == 0) {
+							(void)SCTP_GETTIME_TIMEVAL(now);
+							*now_filled = 1;
+						}
+						net->last_sent_time = *now;
+						hbflag = 0;
+					}
+					if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
+					                                        (struct sockaddr *)&net->ro._l_addr,
+					                                        outchain,
+					                                        auth_offset, auth,
+					                                        stcb->asoc.authinfo.active_keyid,
+					                                        no_fragmentflg, 0, asconf,
+					                                        inp->sctp_lport, stcb->rport,
+					                                        htonl(stcb->asoc.peer_vtag),
+					                                        net->port, NULL,
+#if defined(__FreeBSD__)
+					                                        0, 0,
+#endif
+					                                        so_locked))) {
+						/* error, we could not output */
+						SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
+						if (from_where == 0) {
+							SCTP_STAT_INCR(sctps_lowlevelerrusr);
+						}
+						if (error == ENOBUFS) {
+							asoc->ifp_had_enobuf = 1;
+							SCTP_STAT_INCR(sctps_lowlevelerr);
+						}
+						if (error == EHOSTUNREACH) {
+							/*
+							 * Destination went
+							 * unreachable
+							 * during this send
+							 */
+							sctp_move_chunks_from_net(stcb, net);
+						}
+						*reason_code = 7;
+						break;
+					} else {
+						asoc->ifp_had_enobuf = 0;
+					}
+					/*
+					 * increase the number we sent, if a
+					 * cookie is sent we don't tell them
+					 * any was sent out.
+					 */
+					outchain = endoutchain = NULL;
+					auth = NULL;
+					auth_offset = 0;
+					if (!no_out_cnt)
+						*num_out += ctl_cnt;
+					/* recalc a clean slate and setup */
+					switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+						case AF_INET:
+							mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+							break;
+#endif
+#ifdef INET6
+						case AF_INET6:
+							mtu = net->mtu - SCTP_MIN_OVERHEAD;
+							break;
+#endif
+#if defined(__Userspace__)
+						case AF_CONN:
+							mtu = net->mtu - sizeof(struct sctphdr);
+							break;
+#endif
+						default:
+							/* TSNH */
+							mtu = net->mtu;
+							break;
+					}
+					to_out = 0;
+					no_fragmentflg = 1;
+				}
+			}
+		}
+		if (error != 0) {
+			/* try next net */
+			continue;
+		}
+		/* JRI: if dest is in PF state, do not send data to it */
+		if ((asoc->sctp_cmt_on_off > 0) &&
+		    (net != stcb->asoc.alternate) &&
+		    (net->dest_state & SCTP_ADDR_PF)) {
+			goto no_data_fill;
+		}
+		if (net->flight_size >= net->cwnd) {
+			goto no_data_fill;
+		}
+		if ((asoc->sctp_cmt_on_off > 0) &&
+		    (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_RECV_BUFFER_SPLITTING) &&
+		    (net->flight_size > max_rwnd_per_dest)) {
+			goto no_data_fill;
+		}
+		/*
+		 * We need a specific accounting for the usage of the
+		 * send buffer. We also need to check the number of messages
+		 * per net. For now, this is better than nothing and it
+		 * disabled by default...
+		 */
+		if ((asoc->sctp_cmt_on_off > 0) &&
+		    (SCTP_BASE_SYSCTL(sctp_buffer_splitting) & SCTP_SEND_BUFFER_SPLITTING) &&
+		    (max_send_per_dest > 0) &&
+		    (net->flight_size > max_send_per_dest)) {
+			goto no_data_fill;
+		}
+		/*********************/
+		/* Data transmission */
+		/*********************/
+		/*
+		 * if AUTH for DATA is required and no AUTH has been added
+		 * yet, account for this in the mtu now... if no data can be
+		 * bundled, this adjustment won't matter anyways since the
+		 * packet will be going out...
+		 */
+		data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA,
+							     stcb->asoc.peer_auth_chunks);
+		if (data_auth_reqd && (auth == NULL)) {
+			mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+		}
+		/* now lets add any data within the MTU constraints */
+		switch (((struct sockaddr *)&net->ro._l_addr)->sa_family) {
+#ifdef INET
+		case AF_INET:
+			if (net->mtu > SCTP_MIN_V4_OVERHEAD)
+				omtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+			else
+				omtu = 0;
+			break;
+#endif
+#ifdef INET6
+		case AF_INET6:
+			if (net->mtu > SCTP_MIN_OVERHEAD)
+				omtu = net->mtu - SCTP_MIN_OVERHEAD;
+			else
+				omtu = 0;
+			break;
+#endif
+#if defined(__Userspace__)
+		case AF_CONN:
+			if (net->mtu > sizeof(struct sctphdr)) {
+				omtu = net->mtu - sizeof(struct sctphdr);
+			} else {
+				omtu = 0;
+			}
+			break;
+#endif
+		default:
+			/* TSNH */
+			omtu = 0;
+			break;
+		}
+		if ((((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+		      (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
+		     (skip_data_for_this_net == 0)) ||
+		    (cookie)) {
+			TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
+				if (no_data_chunks) {
+					/* let only control go out */
+					*reason_code = 1;
+					break;
+				}
+				if (net->flight_size >= net->cwnd) {
+					/* skip this net, no room for data */
+					*reason_code = 2;
+					break;
+				}
+				if ((chk->whoTo != NULL) &&
+				    (chk->whoTo != net)) {
+					/* Don't send the chunk on this net */
+					continue;
+				}
+
+				if (asoc->sctp_cmt_on_off == 0) {
+					if ((asoc->alternate) &&
+					    (asoc->alternate != net) &&
+					    (chk->whoTo == NULL)) {
+						continue;
+					} else if ((net != asoc->primary_destination) &&
+						   (asoc->alternate == NULL) &&
+						   (chk->whoTo == NULL)) {
+						continue;
+					}
+				}
+				if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
+					/*-
+					 * strange, we have a chunk that is
+					 * to big for its destination and
+					 * yet no fragment ok flag.
+					 * Something went wrong when the
+					 * PMTU changed...we did not mark
+					 * this chunk for some reason?? I
+					 * will fix it here by letting IP
+					 * fragment it for now and printing
+					 * a warning. This really should not
+					 * happen ...
+					 */
+					SCTP_PRINTF("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
+						    chk->send_size, mtu);
+					chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+				}
+				if (SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) &&
+				    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) == SCTP_STATE_SHUTDOWN_PENDING)) {
+					struct sctp_data_chunk *dchkh;
+
+					dchkh = mtod(chk->data, struct sctp_data_chunk *);
+					dchkh->ch.chunk_flags |= SCTP_DATA_SACK_IMMEDIATELY;
+				}
+				if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
+				    ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
+					/* ok we will add this one */
+
+					/*
+					 * Add an AUTH chunk, if chunk
+					 * requires it, save the offset into
+					 * the chain for AUTH
+					 */
+					if (data_auth_reqd) {
+						if (auth == NULL) {
+							outchain = sctp_add_auth_chunk(outchain,
+										       &endoutchain,
+										       &auth,
+										       &auth_offset,
+										       stcb,
+										       SCTP_DATA);
+							auth_keyid = chk->auth_keyid;
+							override_ok = 0;
+							SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+						} else if (override_ok) {
+							/* use this data's keyid */
+							auth_keyid = chk->auth_keyid;
+							override_ok = 0;
+						} else if (auth_keyid != chk->auth_keyid) {
+							/* different keyid, so done bundling */
+							break;
+						}
+					}
+					outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
+								       chk->send_size, chk->copy_by_ref);
+					if (outchain == NULL) {
+						SCTPDBG(SCTP_DEBUG_OUTPUT3, "No memory?\n");
+						if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+							sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+						}
+						*reason_code = 3;
+						SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+						return (ENOMEM);
+					}
+					/* upate our MTU size */
+					/* Do clear IP_DF ? */
+					if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+						no_fragmentflg = 0;
+					}
+					/* unsigned subtraction of mtu */
+					if (mtu > chk->send_size)
+						mtu -= chk->send_size;
+					else
+						mtu = 0;
+					/* unsigned subtraction of r_mtu */
+					if (r_mtu > chk->send_size)
+						r_mtu -= chk->send_size;
+					else
+						r_mtu = 0;
+
+					to_out += chk->send_size;
+					if ((to_out > mx_mtu) && no_fragmentflg) {
+#ifdef INVARIANTS
+						panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
+#else
+						SCTP_PRINTF("Exceeding mtu of %d out size is %d\n",
+							    mx_mtu, to_out);
+#endif
+					}
+					chk->window_probe = 0;
+					data_list[bundle_at++] = chk;
+					if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
+						break;
+					}
+					if (chk->sent == SCTP_DATAGRAM_UNSENT) {
+						if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
+							SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
+						} else {
+							SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
+						}
+						if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
+						    ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
+							/* Count number of user msg's that were fragmented
+							 * we do this by counting when we see a LAST fragment
+							 * only.
+							 */
+							SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
+					}
+					if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
+						if ((one_chunk) && (stcb->asoc.total_flight == 0)) {
+							data_list[0]->window_probe = 1;
+							net->window_probe = 1;
+						}
+						break;
+					}
+				} else {
+					/*
+					 * Must be sent in order of the
+					 * TSN's (on a network)
+					 */
+					break;
+				}
+			}	/* for (chunk gather loop for this net) */
+		}		/* if asoc.state OPEN */
+	no_data_fill:
+		/* Is there something to send for this destination? */
+		if (outchain) {
+			/* We may need to start a control timer or two */
+			if (asconf) {
+				sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp,
+						 stcb, net);
+				/*
+				 * do NOT clear the asconf flag as it is used
+				 * to do appropriate source address selection.
+				 */
+			}
+			if (cookie) {
+				sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
+				cookie = 0;
+			}
+			/* must start a send timer if data is being sent */
+			if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
+				/*
+				 * no timer running on this destination
+				 * restart it.
+				 */
+				sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+			}
+			if (bundle_at || hbflag) {
+				/* For data/asconf and hb set time */
+				if (*now_filled == 0) {
+					(void)SCTP_GETTIME_TIMEVAL(now);
+					*now_filled = 1;
+				}
+				net->last_sent_time = *now;
+			}
+			/* Now send it, if there is anything to send :> */
+			if ((error = sctp_lowlevel_chunk_output(inp,
+			                                        stcb,
+			                                        net,
+			                                        (struct sockaddr *)&net->ro._l_addr,
+			                                        outchain,
+			                                        auth_offset,
+			                                        auth,
+			                                        auth_keyid,
+			                                        no_fragmentflg,
+			                                        bundle_at,
+			                                        asconf,
+			                                        inp->sctp_lport, stcb->rport,
+			                                        htonl(stcb->asoc.peer_vtag),
+			                                        net->port, NULL,
+#if defined(__FreeBSD__)
+			                                        0, 0,
+#endif
+			                                        so_locked))) {
+				/* error, we could not output */
+				SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
+				if (from_where == 0) {
+					SCTP_STAT_INCR(sctps_lowlevelerrusr);
+				}
+				if (error == ENOBUFS) {
+					asoc->ifp_had_enobuf = 1;
+					SCTP_STAT_INCR(sctps_lowlevelerr);
+				}
+				if (error == EHOSTUNREACH) {
+					/*
+					 * Destination went unreachable
+					 * during this send
+					 */
+					sctp_move_chunks_from_net(stcb, net);
+				}
+				*reason_code = 6;
+				/*-
+				 * I add this line to be paranoid. As far as
+				 * I can tell the continue, takes us back to
+				 * the top of the for, but just to make sure
+				 * I will reset these again here.
+				 */
+				ctl_cnt = bundle_at = 0;
+				continue; /* This takes us back to the for() for the nets. */
+			} else {
+				asoc->ifp_had_enobuf = 0;
+			}
+			endoutchain = NULL;
+			auth = NULL;
+			auth_offset = 0;
+			if (!no_out_cnt) {
+				*num_out += (ctl_cnt + bundle_at);
+			}
+			if (bundle_at) {
+				/* setup for a RTO measurement */
+				tsns_sent = data_list[0]->rec.data.tsn;
+				/* fill time if not already filled */
+				if (*now_filled == 0) {
+					(void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
+					*now_filled = 1;
+					*now = asoc->time_last_sent;
+				} else {
+					asoc->time_last_sent = *now;
+				}
+				if (net->rto_needed) {
+					data_list[0]->do_rtt = 1;
+					net->rto_needed = 0;
+				}
+				SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
+				sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
+			}
+			if (one_chunk) {
+				break;
+			}
+		}
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+			sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
+		}
+	}
+	if (old_start_at == NULL) {
+		old_start_at = start_at;
+		start_at = TAILQ_FIRST(&asoc->nets);
+		if (old_start_at)
+			goto again_one_more_time;
+	}
+
+	/*
+	 * At the end there should be no NON timed chunks hanging on this
+	 * queue.
+	 */
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+		sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
+	}
+	if ((*num_out == 0) && (*reason_code == 0)) {
+		*reason_code = 4;
+	} else {
+		*reason_code = 5;
+	}
+	sctp_clean_up_ctl(stcb, asoc, so_locked);
+	return (0);
+}
+
+void
+sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
+{
+	/*-
+	 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
+	 * the control chunk queue.
+	 */
+	struct sctp_chunkhdr *hdr;
+	struct sctp_tmit_chunk *chk;
+	struct mbuf *mat, *last_mbuf;
+	uint32_t chunk_length;
+	uint16_t padding_length;
+
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_NOWAIT);
+	if (op_err == NULL) {
+		return;
+	}
+	last_mbuf = NULL;
+	chunk_length = 0;
+	for (mat = op_err; mat != NULL; mat = SCTP_BUF_NEXT(mat)) {
+		chunk_length += SCTP_BUF_LEN(mat);
+		if (SCTP_BUF_NEXT(mat) == NULL) {
+			last_mbuf = mat;
+		}
+	}
+	if (chunk_length > SCTP_MAX_CHUNK_LENGTH) {
+		sctp_m_freem(op_err);
+		return;
+	}
+	padding_length = chunk_length % 4;
+	if (padding_length != 0) {
+		padding_length = 4 - padding_length;
+	}
+	if (padding_length != 0) {
+		if (sctp_add_pad_tombuf(last_mbuf, padding_length) == NULL) {
+			sctp_m_freem(op_err);
+			return;
+		}
+	}
+	sctp_alloc_a_chunk(stcb, chk);
+	if (chk == NULL) {
+		/* no memory */
+		sctp_m_freem(op_err);
+		return;
+	}
+	chk->copy_by_ref = 0;
+	chk->send_size = (uint16_t)chunk_length;
+	chk->sent = SCTP_DATAGRAM_UNSENT;
+	chk->snd_count = 0;
+	chk->asoc = &stcb->asoc;
+	chk->data = op_err;
+	chk->whoTo = NULL;
+	chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
+	chk->rec.chunk_id.can_take_data = 0;
+	hdr = mtod(op_err, struct sctp_chunkhdr *);
+	hdr->chunk_type = SCTP_OPERATION_ERROR;
+	hdr->chunk_flags = 0;
+	hdr->chunk_length = htons(chk->send_size);
+	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+	chk->asoc->ctrl_queue_cnt++;
+}
+
+int
+sctp_send_cookie_echo(struct mbuf *m,
+    int offset,
+    struct sctp_tcb *stcb,
+    struct sctp_nets *net)
+{
+	/*-
+	 * pull out the cookie and put it at the front of the control chunk
+	 * queue.
+	 */
+	int at;
+	struct mbuf *cookie;
+	struct sctp_paramhdr parm, *phdr;
+	struct sctp_chunkhdr *hdr;
+	struct sctp_tmit_chunk *chk;
+	uint16_t ptype, plen;
+
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	/* First find the cookie in the param area */
+	cookie = NULL;
+	at = offset + sizeof(struct sctp_init_chunk);
+	for (;;) {
+		phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
+		if (phdr == NULL) {
+			return (-3);
+		}
+		ptype = ntohs(phdr->param_type);
+		plen = ntohs(phdr->param_length);
+		if (ptype == SCTP_STATE_COOKIE) {
+			int pad;
+
+			/* found the cookie */
+			if ((pad = (plen % 4))) {
+				plen += 4 - pad;
+			}
+			cookie = SCTP_M_COPYM(m, at, plen, M_NOWAIT);
+			if (cookie == NULL) {
+				/* No memory */
+				return (-2);
+			}
+#ifdef SCTP_MBUF_LOGGING
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+				sctp_log_mbc(cookie, SCTP_MBUF_ICOPY);
+			}
+#endif
+			break;
+		}
+		at += SCTP_SIZE32(plen);
+	}
+	/* ok, we got the cookie lets change it into a cookie echo chunk */
+	/* first the change from param to cookie */
+	hdr = mtod(cookie, struct sctp_chunkhdr *);
+	hdr->chunk_type = SCTP_COOKIE_ECHO;
+	hdr->chunk_flags = 0;
+	/* get the chunk stuff now and place it in the FRONT of the queue */
+	sctp_alloc_a_chunk(stcb, chk);
+	if (chk == NULL) {
+		/* no memory */
+		sctp_m_freem(cookie);
+		return (-5);
+	}
+	chk->copy_by_ref = 0;
+	chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
+	chk->rec.chunk_id.can_take_data = 0;
+	chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
+	chk->send_size = plen;
+	chk->sent = SCTP_DATAGRAM_UNSENT;
+	chk->snd_count = 0;
+	chk->asoc = &stcb->asoc;
+	chk->data = cookie;
+	chk->whoTo = net;
+	atomic_add_int(&chk->whoTo->ref_count, 1);
+	TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
+	chk->asoc->ctrl_queue_cnt++;
+	return (0);
+}
+
+void
+sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
+    struct mbuf *m,
+    int offset,
+    int chk_length,
+    struct sctp_nets *net)
+{
+	/*
+	 * take a HB request and make it into a HB ack and send it.
+	 */
+	struct mbuf *outchain;
+	struct sctp_chunkhdr *chdr;
+	struct sctp_tmit_chunk *chk;
+
+
+	if (net == NULL)
+		/* must have a net pointer */
+		return;
+
+	outchain = SCTP_M_COPYM(m, offset, chk_length, M_NOWAIT);
+	if (outchain == NULL) {
+		/* gak out of memory */
+		return;
+	}
+#ifdef SCTP_MBUF_LOGGING
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+		sctp_log_mbc(outchain, SCTP_MBUF_ICOPY);
+	}
+#endif
+	chdr = mtod(outchain, struct sctp_chunkhdr *);
+	chdr->chunk_type = SCTP_HEARTBEAT_ACK;
+	chdr->chunk_flags = 0;
+	if (chk_length % 4) {
+		/* need pad */
+		uint32_t cpthis = 0;
+		int padlen;
+
+		padlen = 4 - (chk_length % 4);
+		m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
+	}
+	sctp_alloc_a_chunk(stcb, chk);
+	if (chk == NULL) {
+		/* no memory */
+		sctp_m_freem(outchain);
+		return;
+	}
+	chk->copy_by_ref = 0;
+	chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
+	chk->rec.chunk_id.can_take_data = 1;
+	chk->flags = 0;
+	chk->send_size = chk_length;
+	chk->sent = SCTP_DATAGRAM_UNSENT;
+	chk->snd_count = 0;
+	chk->asoc = &stcb->asoc;
+	chk->data = outchain;
+	chk->whoTo = net;
+	atomic_add_int(&chk->whoTo->ref_count, 1);
+	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+	chk->asoc->ctrl_queue_cnt++;
+}
+
+void
+sctp_send_cookie_ack(struct sctp_tcb *stcb)
+{
+	/* formulate and queue a cookie-ack back to sender */
+	struct mbuf *cookie_ack;
+	struct sctp_chunkhdr *hdr;
+	struct sctp_tmit_chunk *chk;
+
+	SCTP_TCB_LOCK_ASSERT(stcb);
+
+	cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
+	if (cookie_ack == NULL) {
+		/* no mbuf's */
+		return;
+	}
+	SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
+	sctp_alloc_a_chunk(stcb, chk);
+	if (chk == NULL) {
+		/* no memory */
+		sctp_m_freem(cookie_ack);
+		return;
+	}
+	chk->copy_by_ref = 0;
+	chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
+	chk->rec.chunk_id.can_take_data = 1;
+	chk->flags = 0;
+	chk->send_size = sizeof(struct sctp_chunkhdr);
+	chk->sent = SCTP_DATAGRAM_UNSENT;
+	chk->snd_count = 0;
+	chk->asoc = &stcb->asoc;
+	chk->data = cookie_ack;
+	if (chk->asoc->last_control_chunk_from != NULL) {
+		chk->whoTo = chk->asoc->last_control_chunk_from;
+		atomic_add_int(&chk->whoTo->ref_count, 1);
+	} else {
+		chk->whoTo = NULL;
+	}
+	hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
+	hdr->chunk_type = SCTP_COOKIE_ACK;
+	hdr->chunk_flags = 0;
+	hdr->chunk_length = htons(chk->send_size);
+	SCTP_BUF_LEN(cookie_ack) = chk->send_size;
+	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+	chk->asoc->ctrl_queue_cnt++;
+	return;
+}
+
+
+void
+sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	/* formulate and queue a SHUTDOWN-ACK back to the sender */
+	struct mbuf *m_shutdown_ack;
+	struct sctp_shutdown_ack_chunk *ack_cp;
+	struct sctp_tmit_chunk *chk;
+
+	m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_NOWAIT, 1, MT_HEADER);
+	if (m_shutdown_ack == NULL) {
+		/* no mbuf's */
+		return;
+	}
+	SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
+	sctp_alloc_a_chunk(stcb, chk);
+	if (chk == NULL) {
+		/* no memory */
+		sctp_m_freem(m_shutdown_ack);
+		return;
+	}
+	chk->copy_by_ref = 0;
+	chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
+	chk->rec.chunk_id.can_take_data = 1;
+	chk->flags = 0;
+	chk->send_size = sizeof(struct sctp_chunkhdr);
+	chk->sent = SCTP_DATAGRAM_UNSENT;
+	chk->snd_count = 0;
+	chk->flags = 0;
+	chk->asoc = &stcb->asoc;
+	chk->data = m_shutdown_ack;
+	chk->whoTo = net;
+	if (chk->whoTo) {
+		atomic_add_int(&chk->whoTo->ref_count, 1);
+	}
+	ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
+	ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
+	ack_cp->ch.chunk_flags = 0;
+	ack_cp->ch.chunk_length = htons(chk->send_size);
+	SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
+	TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+	chk->asoc->ctrl_queue_cnt++;
+	return;
+}
+
+void
+sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	/* formulate and queue a SHUTDOWN to the sender */
+	struct mbuf *m_shutdown;
+	struct sctp_shutdown_chunk *shutdown_cp;
+	struct sctp_tmit_chunk *chk;
+
+	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
+		if (chk->rec.chunk_id.id == SCTP_SHUTDOWN) {
+			/* We already have a SHUTDOWN queued. Reuse it. */
+			if (chk->whoTo) {
+				sctp_free_remote_addr(chk->whoTo);
+				chk->whoTo = NULL;
+			}
+			break;
+		}
+	}
+	if (chk == NULL) {
+		m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_NOWAIT, 1, MT_HEADER);
+		if (m_shutdown == NULL) {
+			/* no mbuf's */
+			return;
+		}
+		SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
+		sctp_alloc_a_chunk(stcb, chk);
+		if (chk == NULL) {
+			/* no memory */
+			sctp_m_freem(m_shutdown);
+			return;
+		}
+		chk->copy_by_ref = 0;
+		chk->rec.chunk_id.id = SCTP_SHUTDOWN;
+		chk->rec.chunk_id.can_take_data = 1;
+		chk->flags = 0;
+		chk->send_size = sizeof(struct sctp_shutdown_chunk);
+		chk->sent = SCTP_DATAGRAM_UNSENT;
+		chk->snd_count = 0;
+		chk->flags = 0;
+		chk->asoc = &stcb->asoc;
+		chk->data = m_shutdown;
+		chk->whoTo = net;
+		if (chk->whoTo) {
+			atomic_add_int(&chk->whoTo->ref_count, 1);
+		}
+		shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
+		shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
+		shutdown_cp->ch.chunk_flags = 0;
+		shutdown_cp->ch.chunk_length = htons(chk->send_size);
+		shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
+		SCTP_BUF_LEN(m_shutdown) = chk->send_size;
+		TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+		chk->asoc->ctrl_queue_cnt++;
+	} else {
+		TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, sctp_next);
+		chk->whoTo = net;
+		if (chk->whoTo) {
+			atomic_add_int(&chk->whoTo->ref_count, 1);
+		}
+		shutdown_cp = mtod(chk->data, struct sctp_shutdown_chunk *);
+		shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
+		TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
+	}
+	return;
+}
+
+void
+sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net, int addr_locked)
+{
+	/*
+	 * formulate and queue an ASCONF to the peer.
+	 * ASCONF parameters should be queued on the assoc queue.
+	 */
+	struct sctp_tmit_chunk *chk;
+	struct mbuf *m_asconf;
+	int len;
+
+	SCTP_TCB_LOCK_ASSERT(stcb);
+
+	if ((!TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) &&
+	    (!sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS))) {
+		/* can't send a new one if there is one in flight already */
+		return;
+	}
+
+	/* compose an ASCONF chunk, maximum length is PMTU */
+	m_asconf = sctp_compose_asconf(stcb, &len, addr_locked);
+	if (m_asconf == NULL) {
+		return;
+	}
+
+	sctp_alloc_a_chunk(stcb, chk);
+	if (chk == NULL) {
+		/* no memory */
+		sctp_m_freem(m_asconf);
+		return;
+	}
+
+	chk->copy_by_ref = 0;
+	chk->rec.chunk_id.id = SCTP_ASCONF;
+	chk->rec.chunk_id.can_take_data = 0;
+	chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
+	chk->data = m_asconf;
+	chk->send_size = len;
+	chk->sent = SCTP_DATAGRAM_UNSENT;
+	chk->snd_count = 0;
+	chk->asoc = &stcb->asoc;
+	chk->whoTo = net;
+	if (chk->whoTo) {
+		atomic_add_int(&chk->whoTo->ref_count, 1);
+	}
+	TAILQ_INSERT_TAIL(&chk->asoc->asconf_send_queue, chk, sctp_next);
+	chk->asoc->ctrl_queue_cnt++;
+	return;
+}
+
+void
+sctp_send_asconf_ack(struct sctp_tcb *stcb)
+{
+	/*
+	 * formulate and queue a asconf-ack back to sender.
+	 * the asconf-ack must be stored in the tcb.
+	 */
+	struct sctp_tmit_chunk *chk;
+	struct sctp_asconf_ack *ack, *latest_ack;
+	struct mbuf *m_ack;
+	struct sctp_nets *net = NULL;
+
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	/* Get the latest ASCONF-ACK */
+	latest_ack = TAILQ_LAST(&stcb->asoc.asconf_ack_sent, sctp_asconf_ackhead);
+	if (latest_ack == NULL) {
+		return;
+	}
+	if (latest_ack->last_sent_to != NULL &&
+	    latest_ack->last_sent_to == stcb->asoc.last_control_chunk_from) {
+		/* we're doing a retransmission */
+		net = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
+		if (net == NULL) {
+			/* no alternate */
+			if (stcb->asoc.last_control_chunk_from == NULL) {
+				if (stcb->asoc.alternate) {
+					net = stcb->asoc.alternate;
+				} else {
+					net = stcb->asoc.primary_destination;
+				}
+			} else {
+				net = stcb->asoc.last_control_chunk_from;
+			}
+		}
+	} else {
+		/* normal case */
+		if (stcb->asoc.last_control_chunk_from == NULL) {
+			if (stcb->asoc.alternate) {
+				net = stcb->asoc.alternate;
+			} else {
+				net = stcb->asoc.primary_destination;
+			}
+		} else {
+			net = stcb->asoc.last_control_chunk_from;
+		}
+	}
+	latest_ack->last_sent_to = net;
+
+	TAILQ_FOREACH(ack, &stcb->asoc.asconf_ack_sent, next) {
+		if (ack->data == NULL) {
+			continue;
+		}
+
+		/* copy the asconf_ack */
+		m_ack = SCTP_M_COPYM(ack->data, 0, M_COPYALL, M_NOWAIT);
+		if (m_ack == NULL) {
+			/* couldn't copy it */
+			return;
+		}
+#ifdef SCTP_MBUF_LOGGING
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+			sctp_log_mbc(m_ack, SCTP_MBUF_ICOPY);
+		}
+#endif
+
+		sctp_alloc_a_chunk(stcb, chk);
+		if (chk == NULL) {
+			/* no memory */
+			if (m_ack)
+				sctp_m_freem(m_ack);
+			return;
+		}
+		chk->copy_by_ref = 0;
+		chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
+		chk->rec.chunk_id.can_take_data = 1;
+		chk->flags = CHUNK_FLAGS_FRAGMENT_OK;
+		chk->whoTo = net;
+		if (chk->whoTo) {
+			atomic_add_int(&chk->whoTo->ref_count, 1);
+		}
+		chk->data = m_ack;
+		chk->send_size = ack->len;
+		chk->sent = SCTP_DATAGRAM_UNSENT;
+		chk->snd_count = 0;
+		chk->asoc = &stcb->asoc;
+
+		TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
+		chk->asoc->ctrl_queue_cnt++;
+	}
+	return;
+}
+
+
+static int
+sctp_chunk_retransmission(struct sctp_inpcb *inp,
+    struct sctp_tcb *stcb,
+    struct sctp_association *asoc,
+    int *cnt_out, struct timeval *now, int *now_filled, int *fr_done, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+    )
+{
+	/*-
+	 * send out one MTU of retransmission. If fast_retransmit is
+	 * happening we ignore the cwnd. Otherwise we obey the cwnd and
+	 * rwnd. For a Cookie or Asconf in the control chunk queue we
+	 * retransmit them by themselves.
+	 *
+	 * For data chunks we will pick out the lowest TSN's in the sent_queue
+	 * marked for resend and bundle them all together (up to a MTU of
+	 * destination). The address to send to should have been
+	 * selected/changed where the retransmission was marked (i.e. in FR
+	 * or t3-timeout routines).
+	 */
+	struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
+	struct sctp_tmit_chunk *chk, *fwd;
+	struct mbuf *m, *endofchain;
+	struct sctp_nets *net = NULL;
+	uint32_t tsns_sent = 0;
+	int no_fragmentflg, bundle_at, cnt_thru;
+	unsigned int mtu;
+	int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
+	struct sctp_auth_chunk *auth = NULL;
+	uint32_t auth_offset = 0;
+	uint16_t auth_keyid;
+	int override_ok = 1;
+	int data_auth_reqd = 0;
+	uint32_t dmtu = 0;
+
+#if defined(__APPLE__)
+	if (so_locked) {
+		sctp_lock_assert(SCTP_INP_SO(inp));
+	} else {
+		sctp_unlock_assert(SCTP_INP_SO(inp));
+	}
+#endif
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	tmr_started = ctl_cnt = bundle_at = error = 0;
+	no_fragmentflg = 1;
+	fwd_tsn = 0;
+	*cnt_out = 0;
+	fwd = NULL;
+	endofchain = m = NULL;
+	auth_keyid = stcb->asoc.authinfo.active_keyid;
+#ifdef SCTP_AUDITING_ENABLED
+	sctp_audit_log(0xC3, 1);
+#endif
+	if ((TAILQ_EMPTY(&asoc->sent_queue)) &&
+	    (TAILQ_EMPTY(&asoc->control_send_queue))) {
+		SCTPDBG(SCTP_DEBUG_OUTPUT1,"SCTP hits empty queue with cnt set to %d?\n",
+			asoc->sent_queue_retran_cnt);
+		asoc->sent_queue_cnt = 0;
+		asoc->sent_queue_cnt_removeable = 0;
+		/* send back 0/0 so we enter normal transmission */
+		*cnt_out = 0;
+		return (0);
+	}
+	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+		if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
+		    (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
+		    (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
+			if (chk->sent != SCTP_DATAGRAM_RESEND) {
+				continue;
+			}
+			if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
+				if (chk != asoc->str_reset) {
+					/*
+					 * not eligible for retran if its
+					 * not ours
+					 */
+					continue;
+				}
+			}
+			ctl_cnt++;
+			if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
+				fwd_tsn = 1;
+			}
+			/*
+			 * Add an AUTH chunk, if chunk requires it save the
+			 * offset into the chain for AUTH
+			 */
+			if ((auth == NULL) &&
+			    (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
+							 stcb->asoc.peer_auth_chunks))) {
+				m = sctp_add_auth_chunk(m, &endofchain,
+							&auth, &auth_offset,
+							stcb,
+							chk->rec.chunk_id.id);
+				SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+			}
+			m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
+			break;
+		}
+	}
+	one_chunk = 0;
+	cnt_thru = 0;
+	/* do we have control chunks to retransmit? */
+	if (m != NULL) {
+		/* Start a timer no matter if we succeed or fail */
+		if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+			sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
+		} else if (chk->rec.chunk_id.id == SCTP_ASCONF)
+			sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
+		chk->snd_count++;	/* update our count */
+		if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
+		                                        (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
+		                                        auth_offset, auth, stcb->asoc.authinfo.active_keyid,
+		                                        no_fragmentflg, 0, 0,
+		                                        inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
+		                                        chk->whoTo->port, NULL,
+#if defined(__FreeBSD__)
+		                                        0, 0,
+#endif
+		                                        so_locked))) {
+			SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
+			if (error == ENOBUFS) {
+				asoc->ifp_had_enobuf = 1;
+				SCTP_STAT_INCR(sctps_lowlevelerr);
+			}
+			return (error);
+		} else {
+			asoc->ifp_had_enobuf = 0;
+		}
+		endofchain = NULL;
+		auth = NULL;
+		auth_offset = 0;
+		/*
+		 * We don't want to mark the net->sent time here since this
+		 * we use this for HB and retrans cannot measure RTT
+		 */
+		/* (void)SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
+		*cnt_out += 1;
+		chk->sent = SCTP_DATAGRAM_SENT;
+		sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
+		if (fwd_tsn == 0) {
+			return (0);
+		} else {
+			/* Clean up the fwd-tsn list */
+			sctp_clean_up_ctl(stcb, asoc, so_locked);
+			return (0);
+		}
+	}
+	/*
+	 * Ok, it is just data retransmission we need to do or that and a
+	 * fwd-tsn with it all.
+	 */
+	if (TAILQ_EMPTY(&asoc->sent_queue)) {
+		return (SCTP_RETRAN_DONE);
+	}
+	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
+	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
+		/* not yet open, resend the cookie and that is it */
+		return (1);
+	}
+#ifdef SCTP_AUDITING_ENABLED
+	sctp_auditing(20, inp, stcb, NULL);
+#endif
+	data_auth_reqd = sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks);
+	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+		if (chk->sent != SCTP_DATAGRAM_RESEND) {
+			/* No, not sent to this net or not ready for rtx */
+			continue;
+		}
+		if (chk->data == NULL) {
+			SCTP_PRINTF("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
+			            chk->rec.data.tsn, chk->snd_count, chk->sent);
+			continue;
+		}
+		if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
+		    (chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
+			struct mbuf *op_err;
+			char msg[SCTP_DIAG_INFO_LEN];
+
+			snprintf(msg, sizeof(msg), "TSN %8.8x retransmitted %d times, giving up",
+				 chk->rec.data.tsn, chk->snd_count);
+			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+			                             msg);
+			atomic_add_int(&stcb->asoc.refcnt, 1);
+			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err,
+			                          so_locked);
+			SCTP_TCB_LOCK(stcb);
+			atomic_subtract_int(&stcb->asoc.refcnt, 1);
+			return (SCTP_RETRAN_EXIT);
+		}
+		/* pick up the net */
+		net = chk->whoTo;
+		switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+			case AF_INET:
+				mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
+				break;
+#endif
+#ifdef INET6
+			case AF_INET6:
+				mtu = net->mtu - SCTP_MIN_OVERHEAD;
+				break;
+#endif
+#if defined(__Userspace__)
+			case AF_CONN:
+				mtu = net->mtu - sizeof(struct sctphdr);
+				break;
+#endif
+			default:
+				/* TSNH */
+				mtu = net->mtu;
+				break;
+		}
+
+		if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
+			/* No room in peers rwnd */
+			uint32_t tsn;
+
+			tsn = asoc->last_acked_seq + 1;
+			if (tsn == chk->rec.data.tsn) {
+				/*
+				 * we make a special exception for this
+				 * case. The peer has no rwnd but is missing
+				 * the lowest chunk.. which is probably what
+				 * is holding up the rwnd.
+				 */
+				goto one_chunk_around;
+			}
+			return (1);
+		}
+	one_chunk_around:
+		if (asoc->peers_rwnd < mtu) {
+			one_chunk = 1;
+			if ((asoc->peers_rwnd == 0) &&
+			    (asoc->total_flight == 0)) {
+				chk->window_probe = 1;
+				chk->whoTo->window_probe = 1;
+			}
+		}
+#ifdef SCTP_AUDITING_ENABLED
+		sctp_audit_log(0xC3, 2);
+#endif
+		bundle_at = 0;
+		m = NULL;
+		net->fast_retran_ip = 0;
+		if (chk->rec.data.doing_fast_retransmit == 0) {
+			/*
+			 * if no FR in progress skip destination that have
+			 * flight_size > cwnd.
+			 */
+			if (net->flight_size >= net->cwnd) {
+				continue;
+			}
+		} else {
+			/*
+			 * Mark the destination net to have FR recovery
+			 * limits put on it.
+			 */
+			*fr_done = 1;
+			net->fast_retran_ip = 1;
+		}
+
+		/*
+		 * if no AUTH is yet included and this chunk requires it,
+		 * make sure to account for it.  We don't apply the size
+		 * until the AUTH chunk is actually added below in case
+		 * there is no room for this chunk.
+		 */
+		if (data_auth_reqd && (auth == NULL)) {
+			dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+		} else
+			dmtu = 0;
+
+		if ((chk->send_size <= (mtu - dmtu)) ||
+		    (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
+			/* ok we will add this one */
+			if (data_auth_reqd) {
+				if (auth == NULL) {
+					m = sctp_add_auth_chunk(m,
+								&endofchain,
+								&auth,
+								&auth_offset,
+								stcb,
+								SCTP_DATA);
+					auth_keyid = chk->auth_keyid;
+					override_ok = 0;
+					SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+				} else if (override_ok) {
+					auth_keyid = chk->auth_keyid;
+					override_ok = 0;
+				} else if (chk->auth_keyid != auth_keyid) {
+					/* different keyid, so done bundling */
+					break;
+				}
+			}
+			m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
+			if (m == NULL) {
+				SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+				return (ENOMEM);
+			}
+			/* Do clear IP_DF ? */
+			if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+				no_fragmentflg = 0;
+			}
+			/* upate our MTU size */
+			if (mtu > (chk->send_size + dmtu))
+				mtu -= (chk->send_size + dmtu);
+			else
+				mtu = 0;
+			data_list[bundle_at++] = chk;
+			if (one_chunk && (asoc->total_flight <= 0)) {
+				SCTP_STAT_INCR(sctps_windowprobed);
+			}
+		}
+		if (one_chunk == 0) {
+			/*
+			 * now are there anymore forward from chk to pick
+			 * up?
+			 */
+			for (fwd = TAILQ_NEXT(chk, sctp_next); fwd != NULL; fwd = TAILQ_NEXT(fwd, sctp_next)) {
+				if (fwd->sent != SCTP_DATAGRAM_RESEND) {
+					/* Nope, not for retran */
+					continue;
+				}
+				if (fwd->whoTo != net) {
+					/* Nope, not the net in question */
+					continue;
+				}
+				if (data_auth_reqd && (auth == NULL)) {
+					dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+				} else
+					dmtu = 0;
+				if (fwd->send_size <= (mtu - dmtu)) {
+					if (data_auth_reqd) {
+						if (auth == NULL) {
+							m = sctp_add_auth_chunk(m,
+										&endofchain,
+										&auth,
+										&auth_offset,
+										stcb,
+										SCTP_DATA);
+							auth_keyid = fwd->auth_keyid;
+							override_ok = 0;
+							SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+						} else if (override_ok) {
+							auth_keyid = fwd->auth_keyid;
+							override_ok = 0;
+						} else if (fwd->auth_keyid != auth_keyid) {
+							/* different keyid, so done bundling */
+							break;
+						}
+					}
+					m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
+					if (m == NULL) {
+						SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+						return (ENOMEM);
+					}
+					/* Do clear IP_DF ? */
+					if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
+						no_fragmentflg = 0;
+					}
+					/* upate our MTU size */
+					if (mtu > (fwd->send_size + dmtu))
+						mtu -= (fwd->send_size + dmtu);
+					else
+						mtu = 0;
+					data_list[bundle_at++] = fwd;
+					if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
+						break;
+					}
+				} else {
+					/* can't fit so we are done */
+					break;
+				}
+			}
+		}
+		/* Is there something to send for this destination? */
+		if (m) {
+			/*
+			 * No matter if we fail/or succeed we should start a
+			 * timer. A failure is like a lost IP packet :-)
+			 */
+			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+				/*
+				 * no timer running on this destination
+				 * restart it.
+				 */
+				sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+				tmr_started = 1;
+			}
+			/* Now lets send it, if there is anything to send :> */
+			if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
+			                                        (struct sockaddr *)&net->ro._l_addr, m,
+			                                        auth_offset, auth, auth_keyid,
+			                                        no_fragmentflg, 0, 0,
+			                                        inp->sctp_lport, stcb->rport, htonl(stcb->asoc.peer_vtag),
+			                                        net->port, NULL,
+#if defined(__FreeBSD__)
+			                                        0, 0,
+#endif
+			                                        so_locked))) {
+				/* error, we could not output */
+				SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
+				if (error == ENOBUFS) {
+					asoc->ifp_had_enobuf = 1;
+					SCTP_STAT_INCR(sctps_lowlevelerr);
+				}
+				return (error);
+			} else {
+				asoc->ifp_had_enobuf = 0;
+			}
+			endofchain = NULL;
+			auth = NULL;
+			auth_offset = 0;
+			/* For HB's */
+			/*
+			 * We don't want to mark the net->sent time here
+			 * since this we use this for HB and retrans cannot
+			 * measure RTT
+			 */
+			/* (void)SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
+
+			/* For auto-close */
+			cnt_thru++;
+			if (*now_filled == 0) {
+				(void)SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
+				*now = asoc->time_last_sent;
+				*now_filled = 1;
+			} else {
+				asoc->time_last_sent = *now;
+			}
+			*cnt_out += bundle_at;
+#ifdef SCTP_AUDITING_ENABLED
+			sctp_audit_log(0xC4, bundle_at);
+#endif
+			if (bundle_at) {
+				tsns_sent = data_list[0]->rec.data.tsn;
+			}
+			for (i = 0; i < bundle_at; i++) {
+				SCTP_STAT_INCR(sctps_sendretransdata);
+				data_list[i]->sent = SCTP_DATAGRAM_SENT;
+				/*
+				 * When we have a revoked data, and we
+				 * retransmit it, then we clear the revoked
+				 * flag since this flag dictates if we
+				 * subtracted from the fs
+				 */
+				if (data_list[i]->rec.data.chunk_was_revoked) {
+					/* Deflate the cwnd */
+					data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
+					data_list[i]->rec.data.chunk_was_revoked = 0;
+				}
+				data_list[i]->snd_count++;
+				sctp_ucount_decr(asoc->sent_queue_retran_cnt);
+				/* record the time */
+				data_list[i]->sent_rcv_time = asoc->time_last_sent;
+				if (data_list[i]->book_size_scale) {
+					/*
+					 * need to double the book size on
+					 * this one
+					 */
+					data_list[i]->book_size_scale = 0;
+					/* Since we double the booksize, we must
+					 * also double the output queue size, since this
+					 * get shrunk when we free by this amount.
+					 */
+					atomic_add_int(&((asoc)->total_output_queue_size),data_list[i]->book_size);
+					data_list[i]->book_size *= 2;
+
+
+				} else {
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
+						sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
+						      asoc->peers_rwnd, data_list[i]->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
+					}
+					asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
+									    (uint32_t) (data_list[i]->send_size +
+											SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)));
+				}
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_RSND,
+						       data_list[i]->whoTo->flight_size,
+						       data_list[i]->book_size,
+						       (uint32_t)(uintptr_t)data_list[i]->whoTo,
+						       data_list[i]->rec.data.tsn);
+				}
+				sctp_flight_size_increase(data_list[i]);
+				sctp_total_flight_increase(stcb, data_list[i]);
+				if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
+					/* SWS sender side engages */
+					asoc->peers_rwnd = 0;
+				}
+				if ((i == 0) &&
+				    (data_list[i]->rec.data.doing_fast_retransmit)) {
+					SCTP_STAT_INCR(sctps_sendfastretrans);
+					if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
+					    (tmr_started == 0)) {
+						/*-
+						 * ok we just fast-retrans'd
+						 * the lowest TSN, i.e the
+						 * first on the list. In
+						 * this case we want to give
+						 * some more time to get a
+						 * SACK back without a
+						 * t3-expiring.
+						 */
+						sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
+								SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
+						sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+					}
+				}
+			}
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+				sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
+			}
+#ifdef SCTP_AUDITING_ENABLED
+			sctp_auditing(21, inp, stcb, NULL);
+#endif
+		} else {
+			/* None will fit */
+			return (1);
+		}
+		if (asoc->sent_queue_retran_cnt <= 0) {
+			/* all done we have no more to retran */
+			asoc->sent_queue_retran_cnt = 0;
+			break;
+		}
+		if (one_chunk) {
+			/* No more room in rwnd */
+			return (1);
+		}
+		/* stop the for loop here. we sent out a packet */
+		break;
+	}
+	return (0);
+}
+
+static void
+sctp_timer_validation(struct sctp_inpcb *inp,
+    struct sctp_tcb *stcb,
+    struct sctp_association *asoc)
+{
+	struct sctp_nets *net;
+
+	/* Validate that a timer is running somewhere */
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+		if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
+			/* Here is a timer */
+			return;
+		}
+	}
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	/* Gak, we did not have a timer somewhere */
+	SCTPDBG(SCTP_DEBUG_OUTPUT3, "Deadlock avoided starting timer on a dest at retran\n");
+	if (asoc->alternate) {
+		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->alternate);
+	} else {
+		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
+	}
+	return;
+}
+
+void
+sctp_chunk_output (struct sctp_inpcb *inp,
+    struct sctp_tcb *stcb,
+    int from_where,
+    int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+    )
+{
+	/*-
+	 * Ok this is the generic chunk service queue. we must do the
+	 * following:
+	 * - See if there are retransmits pending, if so we must
+	 *   do these first.
+	 * - Service the stream queue that is next, moving any
+	 *   message (note I must get a complete message i.e.
+	 *   FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
+	 *   TSN's
+	 * - Check to see if the cwnd/rwnd allows any output, if so we
+	 *   go ahead and fomulate and send the low level chunks. Making sure
+	 *   to combine any control in the control chunk queue also.
+	 */
+	struct sctp_association *asoc;
+	struct sctp_nets *net;
+	int error = 0, num_out, tot_out = 0, ret = 0, reason_code;
+	unsigned int burst_cnt = 0;
+	struct timeval now;
+	int now_filled = 0;
+	int nagle_on;
+	int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
+	int un_sent = 0;
+	int fr_done;
+	unsigned int tot_frs = 0;
+
+#if defined(__APPLE__)
+	if (so_locked) {
+		sctp_lock_assert(SCTP_INP_SO(inp));
+	} else {
+		sctp_unlock_assert(SCTP_INP_SO(inp));
+	}
+#endif
+	asoc = &stcb->asoc;
+do_it_again:
+	/* The Nagle algorithm is only applied when handling a send call. */
+	if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
+			nagle_on = 0;
+		} else {
+			nagle_on = 1;
+		}
+	} else {
+		nagle_on = 0;
+	}
+	SCTP_TCB_LOCK_ASSERT(stcb);
+
+	un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
+
+	if ((un_sent <= 0) &&
+	    (TAILQ_EMPTY(&asoc->control_send_queue)) &&
+	    (TAILQ_EMPTY(&asoc->asconf_send_queue)) &&
+	    (asoc->sent_queue_retran_cnt == 0) &&
+	    (asoc->trigger_reset == 0)) {
+		/* Nothing to do unless there is something to be sent left */
+		return;
+	}
+	/* Do we have something to send, data or control AND
+	 * a sack timer running, if so piggy-back the sack.
+	 */
+	if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
+		sctp_send_sack(stcb, so_locked);
+		(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
+	}
+	while (asoc->sent_queue_retran_cnt) {
+		/*-
+		 * Ok, it is retransmission time only, we send out only ONE
+		 * packet with a single call off to the retran code.
+		 */
+		if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
+			/*-
+			 * Special hook for handling cookiess discarded
+			 * by peer that carried data. Send cookie-ack only
+			 * and then the next call with get the retran's.
+			 */
+			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
+						    from_where,
+						    &now, &now_filled, frag_point, so_locked);
+			return;
+		} else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
+			/* if its not from a HB then do it */
+			fr_done = 0;
+			ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done, so_locked);
+			if (fr_done) {
+				tot_frs++;
+			}
+		} else {
+			/*
+			 * its from any other place, we don't allow retran
+			 * output (only control)
+			 */
+			ret = 1;
+		}
+		if (ret > 0) {
+			/* Can't send anymore */
+			/*-
+			 * now lets push out control by calling med-level
+			 * output once. this assures that we WILL send HB's
+			 * if queued too.
+			 */
+			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
+						    from_where,
+						    &now, &now_filled, frag_point, so_locked);
+#ifdef SCTP_AUDITING_ENABLED
+			sctp_auditing(8, inp, stcb, NULL);
+#endif
+			sctp_timer_validation(inp, stcb, asoc);
+			return;
+		}
+		if (ret < 0) {
+			/*-
+			 * The count was off.. retran is not happening so do
+			 * the normal retransmission.
+			 */
+#ifdef SCTP_AUDITING_ENABLED
+			sctp_auditing(9, inp, stcb, NULL);
+#endif
+			if (ret == SCTP_RETRAN_EXIT) {
+				return;
+			}
+			break;
+		}
+		if (from_where == SCTP_OUTPUT_FROM_T3) {
+			/* Only one transmission allowed out of a timeout */
+#ifdef SCTP_AUDITING_ENABLED
+			sctp_auditing(10, inp, stcb, NULL);
+#endif
+			/* Push out any control */
+			(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, from_where,
+						    &now, &now_filled, frag_point, so_locked);
+			return;
+		}
+		if ((asoc->fr_max_burst > 0) && (tot_frs >= asoc->fr_max_burst)) {
+			/* Hit FR burst limit */
+			return;
+		}
+		if ((num_out == 0) && (ret == 0)) {
+			/* No more retrans to send */
+			break;
+		}
+	}
+#ifdef SCTP_AUDITING_ENABLED
+	sctp_auditing(12, inp, stcb, NULL);
+#endif
+	/* Check for bad destinations, if they exist move chunks around. */
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+		if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
+			/*-
+			 * if possible move things off of this address we
+			 * still may send below due to the dormant state but
+			 * we try to find an alternate address to send to
+			 * and if we have one we move all queued data on the
+			 * out wheel to this alternate address.
+			 */
+			if (net->ref_count > 1)
+				sctp_move_chunks_from_net(stcb, net);
+		} else {
+			/*-
+			 * if ((asoc->sat_network) || (net->addr_is_local))
+			 * { burst_limit = asoc->max_burst *
+			 * SCTP_SAT_NETWORK_BURST_INCR; }
+			 */
+			if (asoc->max_burst > 0) {
+				if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst)) {
+					if ((net->flight_size + (asoc->max_burst * net->mtu)) < net->cwnd) {
+						/* JRS - Use the congestion control given in the congestion control module */
+						asoc->cc_functions.sctp_cwnd_update_after_output(stcb, net, asoc->max_burst);
+						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
+							sctp_log_maxburst(stcb, net, 0, asoc->max_burst, SCTP_MAX_BURST_APPLIED);
+						}
+						SCTP_STAT_INCR(sctps_maxburstqueued);
+					}
+					net->fast_retran_ip = 0;
+				} else {
+					if (net->flight_size == 0) {
+						/* Should be decaying the cwnd here */
+						;
+					}
+				}
+			}
+		}
+
+	}
+	burst_cnt = 0;
+	do {
+		error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
+					      &reason_code, 0, from_where,
+					      &now, &now_filled, frag_point, so_locked);
+		if (error) {
+			SCTPDBG(SCTP_DEBUG_OUTPUT1, "Error %d was returned from med-c-op\n", error);
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
+				sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
+			}
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+				sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
+				sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
+			}
+			break;
+		}
+		SCTPDBG(SCTP_DEBUG_OUTPUT3, "m-c-o put out %d\n", num_out);
+
+		tot_out += num_out;
+		burst_cnt++;
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+			sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
+			if (num_out == 0) {
+				sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
+			}
+		}
+		if (nagle_on) {
+			/*
+			 * When the Nagle algorithm is used, look at how much
+			 * is unsent, then if its smaller than an MTU and we
+			 * have data in flight we stop, except if we are
+			 * handling a fragmented user message.
+			 */
+			un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
+			           (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
+			if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
+			    (stcb->asoc.total_flight > 0)) { 
+/*	&&		     sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {*/
+				break;
+			}
+		}
+		if (TAILQ_EMPTY(&asoc->control_send_queue) &&
+		    TAILQ_EMPTY(&asoc->send_queue) &&
+		    sctp_is_there_unsent_data(stcb, so_locked) == 0) {
+			/* Nothing left to send */
+			break;
+		}
+		if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
+			/* Nothing left to send */
+			break;
+		}
+	} while (num_out &&
+	         ((asoc->max_burst == 0) ||
+		  SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) ||
+		  (burst_cnt < asoc->max_burst)));
+
+	if (SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) == 0) {
+		if ((asoc->max_burst > 0) && (burst_cnt >= asoc->max_burst)) {
+			SCTP_STAT_INCR(sctps_maxburstqueued);
+			asoc->burst_limit_applied = 1;
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_MAXBURST_ENABLE) {
+				sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
+			}
+		} else {
+			asoc->burst_limit_applied = 0;
+		}
+	}
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+		sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
+	}
+	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Ok, we have put out %d chunks\n",
+		tot_out);
+
+	/*-
+	 * Now we need to clean up the control chunk chain if a ECNE is on
+	 * it. It must be marked as UNSENT again so next call will continue
+	 * to send it until such time that we get a CWR, to remove it.
+	 */
+	if (stcb->asoc.ecn_echo_cnt_onq)
+		sctp_fix_ecn_echo(asoc);
+
+	if (stcb->asoc.trigger_reset) {
+		if (sctp_send_stream_reset_out_if_possible(stcb, so_locked) == 0)  {
+			goto do_it_again;
+		}
+	}
+	return;
+}
+
+
+int
+sctp_output(
+	struct sctp_inpcb *inp,
+#if defined(__Panda__)
+	pakhandle_type m,
+#else
+	struct mbuf *m,
+#endif
+	struct sockaddr *addr,
+#if defined(__Panda__)
+	pakhandle_type control,
+#else
+	struct mbuf *control,
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+	struct thread *p,
+#elif defined(__Windows__)
+	PKTHREAD p,
+#else
+#if defined(__APPLE__)
+	struct proc *p SCTP_UNUSED,
+#else
+	struct proc *p,
+#endif
+#endif
+	int flags)
+{
+	if (inp == NULL) {
+		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+		return (EINVAL);
+	}
+
+	if (inp->sctp_socket == NULL) {
+		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+		return (EINVAL);
+	}
+	return (sctp_sosend(inp->sctp_socket,
+			    addr,
+			    (struct uio *)NULL,
+			    m,
+			    control,
+#if defined(__APPLE__) || defined(__Panda__)
+			    flags
+#else
+			    flags, p
+#endif
+			));
+}
+
+void
+send_forward_tsn(struct sctp_tcb *stcb,
+		 struct sctp_association *asoc)
+{
+	struct sctp_tmit_chunk *chk, *at, *tp1, *last;
+	struct sctp_forward_tsn_chunk *fwdtsn;
+	struct sctp_strseq *strseq;
+	struct sctp_strseq_mid *strseq_m;
+	uint32_t advance_peer_ack_point;
+	unsigned int cnt_of_space, i, ovh;
+	unsigned int space_needed;
+	unsigned int cnt_of_skipped = 0;
+
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+		if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
+			/* mark it to unsent */
+			chk->sent = SCTP_DATAGRAM_UNSENT;
+			chk->snd_count = 0;
+			/* Do we correct its output location? */
+			if (chk->whoTo) {
+				sctp_free_remote_addr(chk->whoTo);
+				chk->whoTo = NULL;
+			}
+			goto sctp_fill_in_rest;
+		}
+	}
+	/* Ok if we reach here we must build one */
+	sctp_alloc_a_chunk(stcb, chk);
+	if (chk == NULL) {
+		return;
+	}
+	asoc->fwd_tsn_cnt++;
+	chk->copy_by_ref = 0;
+	/* 
+	 * We don't do the old thing here since
+	 * this is used not for on-wire but to
+	 * tell if we are sending a fwd-tsn by
+	 * the stack during output. And if its
+	 * a IFORWARD or a FORWARD it is a fwd-tsn.
+	 */
+	chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
+	chk->rec.chunk_id.can_take_data = 0;
+	chk->flags = 0;
+	chk->asoc = asoc;
+	chk->whoTo = NULL;
+	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+	if (chk->data == NULL) {
+		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+		return;
+	}
+	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+	chk->sent = SCTP_DATAGRAM_UNSENT;
+	chk->snd_count = 0;
+	TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
+	asoc->ctrl_queue_cnt++;
+sctp_fill_in_rest:
+	/*-
+	 * Here we go through and fill out the part that deals with
+	 * stream/seq of the ones we skip.
+	 */
+	SCTP_BUF_LEN(chk->data) = 0;
+	TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
+		if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
+		    (at->sent != SCTP_DATAGRAM_NR_ACKED)) {
+			/* no more to look at */
+			break;
+		}
+		if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
+			/* We don't report these */
+			continue;
+		}
+		cnt_of_skipped++;
+	}
+	if (asoc->idata_supported) {
+		space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
+		                (cnt_of_skipped * sizeof(struct sctp_strseq_mid)));
+	} else {
+		space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
+		                (cnt_of_skipped * sizeof(struct sctp_strseq)));
+	}
+	cnt_of_space = (unsigned int)M_TRAILINGSPACE(chk->data);
+
+	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+		ovh = SCTP_MIN_OVERHEAD;
+	} else {
+		ovh = SCTP_MIN_V4_OVERHEAD;
+	}
+	if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
+		/* trim to a mtu size */
+		cnt_of_space = asoc->smallest_mtu - ovh;
+	}
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
+		sctp_misc_ints(SCTP_FWD_TSN_CHECK,
+			       0xff, 0, cnt_of_skipped,
+			       asoc->advanced_peer_ack_point);
+	}
+	advance_peer_ack_point = asoc->advanced_peer_ack_point;
+	if (cnt_of_space < space_needed) {
+		/*-
+		 * ok we must trim down the chunk by lowering the
+		 * advance peer ack point.
+		 */
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
+			sctp_misc_ints(SCTP_FWD_TSN_CHECK,
+				       0xff, 0xff, cnt_of_space,
+				       space_needed);
+		}
+		cnt_of_skipped = cnt_of_space - sizeof(struct sctp_forward_tsn_chunk);
+		if (asoc->idata_supported) {
+			cnt_of_skipped /= sizeof(struct sctp_strseq_mid);
+		} else {
+			cnt_of_skipped /= sizeof(struct sctp_strseq);
+		}
+		/*-
+		 * Go through and find the TSN that will be the one
+		 * we report.
+		 */
+		at = TAILQ_FIRST(&asoc->sent_queue);
+		if (at != NULL) {
+			for (i = 0; i < cnt_of_skipped; i++) {
+				tp1 = TAILQ_NEXT(at, sctp_next);
+				if (tp1 == NULL) {
+					break;
+				}
+				at = tp1;
+			}
+		}
+		if (at && SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
+			sctp_misc_ints(SCTP_FWD_TSN_CHECK,
+				       0xff, cnt_of_skipped, at->rec.data.tsn,
+				       asoc->advanced_peer_ack_point);
+		}
+		last = at;
+		/*-
+		 * last now points to last one I can report, update
+		 * peer ack point
+		 */
+		if (last) {
+			advance_peer_ack_point = last->rec.data.tsn;
+		}
+		if (asoc->idata_supported) {
+			space_needed = sizeof(struct sctp_forward_tsn_chunk) +
+			               cnt_of_skipped * sizeof(struct sctp_strseq_mid);
+		} else {
+			space_needed = sizeof(struct sctp_forward_tsn_chunk) +
+			               cnt_of_skipped * sizeof(struct sctp_strseq);
+		}
+	}
+	chk->send_size = space_needed;
+	/* Setup the chunk */
+	fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
+	fwdtsn->ch.chunk_length = htons(chk->send_size);
+	fwdtsn->ch.chunk_flags = 0;
+	if (asoc->idata_supported) {
+		fwdtsn->ch.chunk_type = SCTP_IFORWARD_CUM_TSN;
+	} else {
+		fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
+	}
+	fwdtsn->new_cumulative_tsn = htonl(advance_peer_ack_point);
+	SCTP_BUF_LEN(chk->data) = chk->send_size;
+	fwdtsn++;
+	/*-
+	 * Move pointer to after the fwdtsn and transfer to the
+	 * strseq pointer.
+	 */
+	if (asoc->idata_supported) {
+		strseq_m = (struct sctp_strseq_mid *)fwdtsn;
+		strseq = NULL;
+	} else {
+		strseq = (struct sctp_strseq *)fwdtsn;
+		strseq_m = NULL;
+	}
+	/*-
+	 * Now populate the strseq list. This is done blindly
+	 * without pulling out duplicate stream info. This is
+	 * inefficent but won't harm the process since the peer will
+	 * look at these in sequence and will thus release anything.
+	 * It could mean we exceed the PMTU and chop off some that
+	 * we could have included.. but this is unlikely (aka 1432/4
+	 * would mean 300+ stream seq's would have to be reported in
+	 * one FWD-TSN. With a bit of work we can later FIX this to
+	 * optimize and pull out duplicates.. but it does add more
+	 * overhead. So for now... not!
+	 */
+	i = 0;
+	TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
+		if (i >= cnt_of_skipped) {
+			break;
+		}
+		if (!asoc->idata_supported && (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
+			/* We don't report these */
+			continue;
+		}
+		if (at->rec.data.tsn == advance_peer_ack_point) {
+			at->rec.data.fwd_tsn_cnt = 0;
+		}
+		if (asoc->idata_supported) {
+			strseq_m->sid = htons(at->rec.data.sid);
+			if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
+				strseq_m->flags = htons(PR_SCTP_UNORDERED_FLAG);
+			} else {
+				strseq_m->flags = 0;
+			}
+			strseq_m->mid = htonl(at->rec.data.mid);
+			strseq_m++;
+		} else {
+			strseq->sid = htons(at->rec.data.sid);
+			strseq->ssn = htons((uint16_t)at->rec.data.mid);
+			strseq++;
+		}
+		i++;
+	}
+	return;
+}
+
+void
+sctp_send_sack(struct sctp_tcb *stcb, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+	SCTP_UNUSED
+#endif
+)
+{
+	/*-
+	 * Queue up a SACK or NR-SACK in the control queue.
+	 * We must first check to see if a SACK or NR-SACK is
+	 * somehow on the control queue.
+	 * If so, we will take and and remove the old one.
+	 */
+	struct sctp_association *asoc;
+	struct sctp_tmit_chunk *chk, *a_chk;
+	struct sctp_sack_chunk *sack;
+	struct sctp_nr_sack_chunk *nr_sack;
+	struct sctp_gap_ack_block *gap_descriptor;
+	const struct sack_track *selector;
+	int mergeable = 0;
+	int offset;
+	caddr_t limit;
+	uint32_t *dup;
+	int limit_reached = 0;
+	unsigned int i, siz, j;
+	unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
+	int num_dups = 0;
+	int space_req;
+	uint32_t highest_tsn;
+	uint8_t flags;
+	uint8_t type;
+	uint8_t tsn_map;
+
+	if (stcb->asoc.nrsack_supported == 1) {
+		type = SCTP_NR_SELECTIVE_ACK;
+	} else {
+		type = SCTP_SELECTIVE_ACK;
+	}
+	a_chk = NULL;
+	asoc = &stcb->asoc;
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	if (asoc->last_data_chunk_from == NULL) {
+		/* Hmm we never received anything */
+		return;
+	}
+	sctp_slide_mapping_arrays(stcb);
+	sctp_set_rwnd(stcb, asoc);
+	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+		if (chk->rec.chunk_id.id == type) {
+			/* Hmm, found a sack already on queue, remove it */
+			TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+			asoc->ctrl_queue_cnt--;
+			a_chk = chk;
+			if (a_chk->data) {
+				sctp_m_freem(a_chk->data);
+				a_chk->data = NULL;
+			}
+			if (a_chk->whoTo) {
+				sctp_free_remote_addr(a_chk->whoTo);
+				a_chk->whoTo = NULL;
+			}
+			break;
+		}
+	}
+	if (a_chk == NULL) {
+		sctp_alloc_a_chunk(stcb, a_chk);
+		if (a_chk == NULL) {
+			/* No memory so we drop the idea, and set a timer */
+			if (stcb->asoc.delayed_ack) {
+				sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+				                stcb->sctp_ep, stcb, NULL,
+				                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
+				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+				    stcb->sctp_ep, stcb, NULL);
+			} else {
+				stcb->asoc.send_sack = 1;
+			}
+			return;
+		}
+		a_chk->copy_by_ref = 0;
+		a_chk->rec.chunk_id.id = type;
+		a_chk->rec.chunk_id.can_take_data = 1;
+	}
+	/* Clear our pkt counts */
+	asoc->data_pkts_seen = 0;
+
+	a_chk->flags = 0;
+	a_chk->asoc = asoc;
+	a_chk->snd_count = 0;
+	a_chk->send_size = 0;	/* fill in later */
+	a_chk->sent = SCTP_DATAGRAM_UNSENT;
+	a_chk->whoTo = NULL;
+
+	if (!(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_REACHABLE)) {
+		/*-
+		 * Ok, the destination for the SACK is unreachable, lets see if
+		 * we can select an alternate to asoc->last_data_chunk_from
+		 */
+		a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
+		if (a_chk->whoTo == NULL) {
+			/* Nope, no alternate */
+			a_chk->whoTo = asoc->last_data_chunk_from;
+		}
+	} else {
+		a_chk->whoTo = asoc->last_data_chunk_from;
+	}
+	if (a_chk->whoTo) {
+		atomic_add_int(&a_chk->whoTo->ref_count, 1);
+	}
+	if (SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map)) {
+		highest_tsn = asoc->highest_tsn_inside_map;
+	} else {
+		highest_tsn = asoc->highest_tsn_inside_nr_map;
+	}
+	if (highest_tsn == asoc->cumulative_tsn) {
+		/* no gaps */
+		if (type == SCTP_SELECTIVE_ACK) {
+			space_req = sizeof(struct sctp_sack_chunk);
+		} else {
+			space_req = sizeof(struct sctp_nr_sack_chunk);
+		}
+	} else {
+		/* gaps get a cluster */
+		space_req = MCLBYTES;
+	}
+	/* Ok now lets formulate a MBUF with our sack */
+	a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_NOWAIT, 1, MT_DATA);
+	if ((a_chk->data == NULL) ||
+	    (a_chk->whoTo == NULL)) {
+		/* rats, no mbuf memory */
+		if (a_chk->data) {
+			/* was a problem with the destination */
+			sctp_m_freem(a_chk->data);
+			a_chk->data = NULL;
+		}
+		sctp_free_a_chunk(stcb, a_chk, so_locked);
+		/* sa_ignore NO_NULL_CHK */
+		if (stcb->asoc.delayed_ack) {
+			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
+			                stcb->sctp_ep, stcb, NULL,
+			                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
+			sctp_timer_start(SCTP_TIMER_TYPE_RECV,
+			    stcb->sctp_ep, stcb, NULL);
+		} else {
+			stcb->asoc.send_sack = 1;
+		}
+		return;
+	}
+	/* ok, lets go through and fill it in */
+	SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
+	space = (unsigned int)M_TRAILINGSPACE(a_chk->data);
+	if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
+		space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
+	}
+	limit = mtod(a_chk->data, caddr_t);
+	limit += space;
+
+	flags = 0;
+
+	if ((asoc->sctp_cmt_on_off > 0) &&
+	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
+		/*-
+		 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
+		 * received, then set high bit to 1, else 0. Reset
+		 * pkts_rcvd.
+		 */
+		flags |= (asoc->cmt_dac_pkts_rcvd << 6);
+		asoc->cmt_dac_pkts_rcvd = 0;
+	}
+#ifdef SCTP_ASOCLOG_OF_TSNS
+	stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
+	stcb->asoc.cumack_log_atsnt++;
+	if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
+		stcb->asoc.cumack_log_atsnt = 0;
+	}
+#endif
+	/* reset the readers interpretation */
+	stcb->freed_by_sorcv_sincelast = 0;
+
+	if (type == SCTP_SELECTIVE_ACK) {
+		sack = mtod(a_chk->data, struct sctp_sack_chunk *);
+		nr_sack = NULL;
+		gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
+		if (highest_tsn > asoc->mapping_array_base_tsn) {
+			siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
+		} else {
+			siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
+		}
+	} else {
+		sack = NULL;
+		nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
+		gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
+		if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
+			siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
+		} else {
+			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
+		}
+	}
+
+	if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
+		offset = 1;
+	} else {
+		offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
+	}
+	if (((type == SCTP_SELECTIVE_ACK) &&
+	     SCTP_TSN_GT(highest_tsn, asoc->cumulative_tsn)) ||
+	    ((type == SCTP_NR_SELECTIVE_ACK) &&
+	     SCTP_TSN_GT(asoc->highest_tsn_inside_map, asoc->cumulative_tsn))) {
+		/* we have a gap .. maybe */
+		for (i = 0; i < siz; i++) {
+			tsn_map = asoc->mapping_array[i];
+			if (type == SCTP_SELECTIVE_ACK) {
+				tsn_map |= asoc->nr_mapping_array[i];
+			}
+			if (i == 0) {
+				/*
+				 * Clear all bits corresponding to TSNs
+				 * smaller or equal to the cumulative TSN.
+				 */
+				tsn_map &= (~0U << (1 - offset));
+			}
+			selector = &sack_array[tsn_map];
+			if (mergeable && selector->right_edge) {
+				/*
+				 * Backup, left and right edges were ok to
+				 * merge.
+				 */
+				num_gap_blocks--;
+				gap_descriptor--;
+			}
+			if (selector->num_entries == 0)
+				mergeable = 0;
+			else {
+				for (j = 0; j < selector->num_entries; j++) {
+					if (mergeable && selector->right_edge) {
+						/*
+						 * do a merge by NOT setting
+						 * the left side
+						 */
+						mergeable = 0;
+					} else {
+						/*
+						 * no merge, set the left
+						 * side
+						 */
+						mergeable = 0;
+						gap_descriptor->start = htons((selector->gaps[j].start + offset));
+					}
+					gap_descriptor->end = htons((selector->gaps[j].end + offset));
+					num_gap_blocks++;
+					gap_descriptor++;
+					if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
+						/* no more room */
+						limit_reached = 1;
+						break;
+					}
+				}
+				if (selector->left_edge) {
+					mergeable = 1;
+				}
+			}
+			if (limit_reached) {
+				/* Reached the limit stop */
+				break;
+			}
+			offset += 8;
+		}
+	}
+	if ((type == SCTP_NR_SELECTIVE_ACK) &&
+	    (limit_reached == 0)) {
+
+		mergeable = 0;
+
+		if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn) {
+			siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
+		} else {
+			siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
+		}
+
+		if (SCTP_TSN_GT(asoc->mapping_array_base_tsn, asoc->cumulative_tsn)) {
+			offset = 1;
+		} else {
+			offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
+		}
+		if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn)) {
+			/* we have a gap .. maybe */
+			for (i = 0; i < siz; i++) {
+				tsn_map = asoc->nr_mapping_array[i];
+				if (i == 0) {
+					/*
+					 * Clear all bits corresponding to TSNs
+					 * smaller or equal to the cumulative TSN.
+					 */
+					tsn_map &= (~0U << (1 - offset));
+				}
+				selector = &sack_array[tsn_map];
+				if (mergeable && selector->right_edge) {
+					/*
+					* Backup, left and right edges were ok to
+					* merge.
+					*/
+					num_nr_gap_blocks--;
+					gap_descriptor--;
+				}
+				if (selector->num_entries == 0)
+					mergeable = 0;
+				else {
+					for (j = 0; j < selector->num_entries; j++) {
+						if (mergeable && selector->right_edge) {
+							/*
+							* do a merge by NOT setting
+							* the left side
+							*/
+							mergeable = 0;
+						} else {
+							/*
+							* no merge, set the left
+							* side
+							*/
+							mergeable = 0;
+							gap_descriptor->start = htons((selector->gaps[j].start + offset));
+						}
+						gap_descriptor->end = htons((selector->gaps[j].end + offset));
+						num_nr_gap_blocks++;
+						gap_descriptor++;
+						if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
+							/* no more room */
+							limit_reached = 1;
+							break;
+						}
+					}
+					if (selector->left_edge) {
+						mergeable = 1;
+					}
+				}
+				if (limit_reached) {
+					/* Reached the limit stop */
+					break;
+				}
+				offset += 8;
+			}
+		}
+	}
+	/* now we must add any dups we are going to report. */
+	if ((limit_reached == 0) && (asoc->numduptsns)) {
+		dup = (uint32_t *) gap_descriptor;
+		for (i = 0; i < asoc->numduptsns; i++) {
+			*dup = htonl(asoc->dup_tsns[i]);
+			dup++;
+			num_dups++;
+			if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
+				/* no more room */
+				break;
+			}
+		}
+		asoc->numduptsns = 0;
+	}
+	/*
+	 * now that the chunk is prepared queue it to the control chunk
+	 * queue.
+	 */
+	if (type == SCTP_SELECTIVE_ACK) {
+		a_chk->send_size = (uint16_t)(sizeof(struct sctp_sack_chunk) +
+		                              (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
+		                              num_dups * sizeof(int32_t));
+		SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
+		sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
+		sack->sack.a_rwnd = htonl(asoc->my_rwnd);
+		sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
+		sack->sack.num_dup_tsns = htons(num_dups);
+		sack->ch.chunk_type = type;
+		sack->ch.chunk_flags = flags;
+		sack->ch.chunk_length = htons(a_chk->send_size);
+	} else {
+		a_chk->send_size = (uint16_t)(sizeof(struct sctp_nr_sack_chunk) +
+		                              (num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
+		                              num_dups * sizeof(int32_t));
+		SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
+		nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
+		nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
+		nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
+		nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
+		nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
+		nr_sack->nr_sack.reserved = 0;
+		nr_sack->ch.chunk_type = type;
+		nr_sack->ch.chunk_flags = flags;
+		nr_sack->ch.chunk_length = htons(a_chk->send_size);
+	}
+	TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
+	asoc->my_last_reported_rwnd = asoc->my_rwnd;
+	asoc->ctrl_queue_cnt++;
+	asoc->send_sack = 0;
+	SCTP_STAT_INCR(sctps_sendsacks);
+	return;
+}
+
+void
+sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+    )
+{
+	struct mbuf *m_abort, *m, *m_last;
+	struct mbuf *m_out, *m_end = NULL;
+	struct sctp_abort_chunk *abort;
+	struct sctp_auth_chunk *auth = NULL;
+	struct sctp_nets *net;
+	uint32_t vtag;
+	uint32_t auth_offset = 0;
+	int error;
+	uint16_t cause_len, chunk_len, padding_len;
+
+#if defined(__APPLE__)
+	if (so_locked) {
+		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
+	} else {
+		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
+	}
+#endif
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	/*-
+	 * Add an AUTH chunk, if chunk requires it and save the offset into
+	 * the chain for AUTH
+	 */
+	if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
+	                                stcb->asoc.peer_auth_chunks)) {
+		m_out = sctp_add_auth_chunk(NULL, &m_end, &auth, &auth_offset,
+					    stcb, SCTP_ABORT_ASSOCIATION);
+		SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+	} else {
+		m_out = NULL;
+	}
+	m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_NOWAIT, 1, MT_HEADER);
+	if (m_abort == NULL) {
+		if (m_out) {
+			sctp_m_freem(m_out);
+		}
+		if (operr) {
+			sctp_m_freem(operr);
+		}
+		return;
+	}
+	/* link in any error */
+	SCTP_BUF_NEXT(m_abort) = operr;
+	cause_len = 0;
+	m_last = NULL;
+	for (m = operr; m; m = SCTP_BUF_NEXT(m)) {
+		cause_len += (uint16_t)SCTP_BUF_LEN(m);
+		if (SCTP_BUF_NEXT(m) == NULL) {
+			m_last = m;
+		}
+	}
+	SCTP_BUF_LEN(m_abort) = sizeof(struct sctp_abort_chunk);
+	chunk_len = (uint16_t)sizeof(struct sctp_abort_chunk) + cause_len;
+	padding_len = SCTP_SIZE32(chunk_len) - chunk_len;
+	if (m_out == NULL) {
+		/* NO Auth chunk prepended, so reserve space in front */
+		SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
+		m_out = m_abort;
+	} else {
+		/* Put AUTH chunk at the front of the chain */
+		SCTP_BUF_NEXT(m_end) = m_abort;
+	}
+	if (stcb->asoc.alternate) {
+		net = stcb->asoc.alternate;
+	} else {
+		net = stcb->asoc.primary_destination;
+	}
+	/* Fill in the ABORT chunk header. */
+	abort = mtod(m_abort, struct sctp_abort_chunk *);
+	abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
+	if (stcb->asoc.peer_vtag == 0) {
+		/* This happens iff the assoc is in COOKIE-WAIT state. */
+		vtag = stcb->asoc.my_vtag;
+		abort->ch.chunk_flags = SCTP_HAD_NO_TCB;
+	} else {
+		vtag = stcb->asoc.peer_vtag;
+		abort->ch.chunk_flags = 0;
+	}
+	abort->ch.chunk_length = htons(chunk_len);
+	/* Add padding, if necessary. */
+	if (padding_len > 0) {
+		if ((m_last == NULL) ||
+		    (sctp_add_pad_tombuf(m_last, padding_len) == NULL)) {
+			sctp_m_freem(m_out);
+			return;
+		}
+	}
+	if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
+	                                        (struct sockaddr *)&net->ro._l_addr,
+	                                        m_out, auth_offset, auth, stcb->asoc.authinfo.active_keyid, 1, 0, 0,
+	                                        stcb->sctp_ep->sctp_lport, stcb->rport, htonl(vtag),
+	                                        stcb->asoc.primary_destination->port, NULL,
+#if defined(__FreeBSD__)
+	                                        0, 0,
+#endif
+	                                        so_locked))) {
+		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
+		if (error == ENOBUFS) {
+			stcb->asoc.ifp_had_enobuf = 1;
+			SCTP_STAT_INCR(sctps_lowlevelerr);
+		}
+	} else {
+		stcb->asoc.ifp_had_enobuf = 0;
+	}
+	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+}
+
+void
+sctp_send_shutdown_complete(struct sctp_tcb *stcb,
+                            struct sctp_nets *net,
+                            int reflect_vtag)
+{
+	/* formulate and SEND a SHUTDOWN-COMPLETE */
+	struct mbuf *m_shutdown_comp;
+	struct sctp_shutdown_complete_chunk *shutdown_complete;
+	uint32_t vtag;
+	int error;
+	uint8_t flags;
+
+	m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_NOWAIT, 1, MT_HEADER);
+	if (m_shutdown_comp == NULL) {
+		/* no mbuf's */
+		return;
+	}
+	if (reflect_vtag) {
+		flags = SCTP_HAD_NO_TCB;
+		vtag = stcb->asoc.my_vtag;
+	} else {
+		flags = 0;
+		vtag = stcb->asoc.peer_vtag;
+	}
+	shutdown_complete = mtod(m_shutdown_comp, struct sctp_shutdown_complete_chunk *);
+	shutdown_complete->ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
+	shutdown_complete->ch.chunk_flags = flags;
+	shutdown_complete->ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
+	SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_chunk);
+	if ((error = sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
+	                                        (struct sockaddr *)&net->ro._l_addr,
+	                                        m_shutdown_comp, 0, NULL, 0, 1, 0, 0,
+	                                        stcb->sctp_ep->sctp_lport, stcb->rport,
+	                                        htonl(vtag),
+	                                        net->port, NULL,
+#if defined(__FreeBSD__)
+	                                        0, 0,
+#endif
+	                                        SCTP_SO_NOT_LOCKED))) {
+		SCTPDBG(SCTP_DEBUG_OUTPUT3, "Gak send error %d\n", error);
+		if (error == ENOBUFS) {
+			stcb->asoc.ifp_had_enobuf = 1;
+			SCTP_STAT_INCR(sctps_lowlevelerr);
+		}
+	} else {
+		stcb->asoc.ifp_had_enobuf = 0;
+	}
+	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+	return;
+}
+
+#if defined(__FreeBSD__)
+static void
+sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
+                   struct sctphdr *sh, uint32_t vtag,
+                   uint8_t type, struct mbuf *cause,
+                   uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
+                   uint32_t vrf_id, uint16_t port)
+#else
+static void
+sctp_send_resp_msg(struct sockaddr *src, struct sockaddr *dst,
+                   struct sctphdr *sh, uint32_t vtag,
+                   uint8_t type, struct mbuf *cause,
+                   uint32_t vrf_id SCTP_UNUSED, uint16_t port)
+#endif
+{
+#ifdef __Panda__
+	pakhandle_type o_pak;
+#else
+	struct mbuf *o_pak;
+#endif
+	struct mbuf *mout;
+	struct sctphdr *shout;
+	struct sctp_chunkhdr *ch;
+#if defined(INET) || defined(INET6)
+	struct udphdr *udp;
+	int ret;
+#endif
+	int len, cause_len, padding_len;
+#ifdef INET
+#if defined(__APPLE__) || defined(__Panda__)
+	sctp_route_t ro;
+#endif
+	struct sockaddr_in *src_sin, *dst_sin;
+	struct ip *ip;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *src_sin6, *dst_sin6;
+	struct ip6_hdr *ip6;
+#endif
+
+	/* Compute the length of the cause and add final padding. */
+	cause_len = 0;
+	if (cause != NULL) {
+		struct mbuf *m_at, *m_last = NULL;
+
+		for (m_at = cause; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
+			if (SCTP_BUF_NEXT(m_at) == NULL)
+				m_last = m_at;
+			cause_len += SCTP_BUF_LEN(m_at);
+		}
+		padding_len = cause_len % 4;
+		if (padding_len != 0) {
+			padding_len = 4 - padding_len;
+		}
+		if (padding_len != 0) {
+			if (sctp_add_pad_tombuf(m_last, padding_len) == NULL) {
+				sctp_m_freem(cause);
+				return;
+			}
+		}
+	} else {
+		padding_len = 0;
+	}
+	/* Get an mbuf for the header. */
+	len = sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
+	switch (dst->sa_family) {
+#ifdef INET
+	case AF_INET:
+		len += sizeof(struct ip);
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		len += sizeof(struct ip6_hdr);
+		break;
+#endif
+	default:
+		break;
+	}
+#if defined(INET) || defined(INET6)
+	if (port) {
+		len += sizeof(struct udphdr);
+	}
+#endif
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+	mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
+#else
+	mout = sctp_get_mbuf_for_msg(len + SCTP_MAX_LINKHDR, 1, M_NOWAIT, 1, MT_DATA);
+#endif
+#else
+	mout = sctp_get_mbuf_for_msg(len + max_linkhdr, 1, M_NOWAIT, 1, MT_DATA);
+#endif
+	if (mout == NULL) {
+		if (cause) {
+			sctp_m_freem(cause);
+		}
+		return;
+	}
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+	SCTP_BUF_RESV_UF(mout, max_linkhdr);
+#else
+	SCTP_BUF_RESV_UF(mout, SCTP_MAX_LINKHDR);
+#endif
+#else
+	SCTP_BUF_RESV_UF(mout, max_linkhdr);
+#endif
+	SCTP_BUF_LEN(mout) = len;
+	SCTP_BUF_NEXT(mout) = cause;
+#if defined(__FreeBSD__)
+	M_SETFIB(mout, fibnum);
+	mout->m_pkthdr.flowid = mflowid;
+	M_HASHTYPE_SET(mout, mflowtype);
+#endif
+#ifdef INET
+	ip = NULL;
+#endif
+#ifdef INET6
+	ip6 = NULL;
+#endif
+	switch (dst->sa_family) {
+#ifdef INET
+	case AF_INET:
+		src_sin = (struct sockaddr_in *)src;
+		dst_sin = (struct sockaddr_in *)dst;
+		ip = mtod(mout, struct ip *);
+		ip->ip_v = IPVERSION;
+		ip->ip_hl = (sizeof(struct ip) >> 2);
+		ip->ip_tos = 0;
+		ip->ip_off = 0;
+#if defined(__FreeBSD__)
+		ip_fillid(ip);
+#elif defined(__APPLE__)
+#if RANDOM_IP_ID
+		ip->ip_id = ip_randomid();
+#else
+		ip->ip_id = htons(ip_id++);
+#endif
+#elif defined(__Userspace__)
+		ip->ip_id = htons(ip_id++);
+#else
+		ip->ip_id = ip_id++;
+#endif
+		ip->ip_ttl = MODULE_GLOBAL(ip_defttl);
+		if (port) {
+			ip->ip_p = IPPROTO_UDP;
+		} else {
+			ip->ip_p = IPPROTO_SCTP;
+		}
+		ip->ip_src.s_addr = dst_sin->sin_addr.s_addr;
+		ip->ip_dst.s_addr = src_sin->sin_addr.s_addr;
+		ip->ip_sum = 0;
+		len = sizeof(struct ip);
+		shout = (struct sctphdr *)((caddr_t)ip + len);
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		src_sin6 = (struct sockaddr_in6 *)src;
+		dst_sin6 = (struct sockaddr_in6 *)dst;
+		ip6 = mtod(mout, struct ip6_hdr *);
+		ip6->ip6_flow = htonl(0x60000000);
+#if defined(__FreeBSD__)
+		if (V_ip6_auto_flowlabel) {
+			ip6->ip6_flow |= (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
+		}
+#endif
+#if defined(__Userspace__)
+		ip6->ip6_hlim = IPv6_HOP_LIMIT;
+#else
+		ip6->ip6_hlim = MODULE_GLOBAL(ip6_defhlim);
+#endif
+		if (port) {
+			ip6->ip6_nxt = IPPROTO_UDP;
+		} else {
+			ip6->ip6_nxt = IPPROTO_SCTP;
+		}
+		ip6->ip6_src = dst_sin6->sin6_addr;
+		ip6->ip6_dst = src_sin6->sin6_addr;
+		len = sizeof(struct ip6_hdr);
+		shout = (struct sctphdr *)((caddr_t)ip6 + len);
+		break;
+#endif
+	default:
+		len = 0;
+		shout = mtod(mout, struct sctphdr *);
+		break;
+	}
+#if defined(INET) || defined(INET6)
+	if (port) {
+		if (htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)) == 0) {
+			sctp_m_freem(mout);
+			return;
+		}
+		udp = (struct udphdr *)shout;
+		udp->uh_sport = htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port));
+		udp->uh_dport = port;
+		udp->uh_sum = 0;
+		udp->uh_ulen = htons((uint16_t)(sizeof(struct udphdr) +
+		                                sizeof(struct sctphdr) +
+		                                sizeof(struct sctp_chunkhdr) +
+		                                cause_len + padding_len));
+		len += sizeof(struct udphdr);
+		shout = (struct sctphdr *)((caddr_t)shout + sizeof(struct udphdr));
+	} else {
+		udp = NULL;
+	}
+#endif
+	shout->src_port = sh->dest_port;
+	shout->dest_port = sh->src_port;
+	shout->checksum = 0;
+	if (vtag) {
+		shout->v_tag = htonl(vtag);
+	} else {
+		shout->v_tag = sh->v_tag;
+	}
+	len += sizeof(struct sctphdr);
+	ch = (struct sctp_chunkhdr *)((caddr_t)shout + sizeof(struct sctphdr));
+	ch->chunk_type = type;
+	if (vtag) {
+		ch->chunk_flags = 0;
+	} else {
+		ch->chunk_flags = SCTP_HAD_NO_TCB;
+	}
+	ch->chunk_length = htons((uint16_t)(sizeof(struct sctp_chunkhdr) + cause_len));
+	len += sizeof(struct sctp_chunkhdr);
+	len += cause_len + padding_len;
+
+	if (SCTP_GET_HEADER_FOR_OUTPUT(o_pak)) {
+		sctp_m_freem(mout);
+		return;
+	}
+	SCTP_ATTACH_CHAIN(o_pak, mout, len);
+	switch (dst->sa_family) {
+#ifdef INET
+	case AF_INET:
+#if defined(__APPLE__) || defined(__Panda__)
+		/* zap the stack pointer to the route */
+		bzero(&ro, sizeof(sctp_route_t));
+#if defined(__Panda__)
+		ro._l_addr.sa.sa_family = AF_INET;
+#endif
+#endif
+		if (port) {
+#if !defined(__Windows__) && !defined(__Userspace__)
+#if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
+			if (V_udp_cksum) {
+				udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
+			} else {
+				udp->uh_sum = 0;
+			}
+#else
+			udp->uh_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, udp->uh_ulen + htons(IPPROTO_UDP));
+#endif
+#else
+			udp->uh_sum = 0;
+#endif
+		}
+#if defined(__FreeBSD__)
+#if __FreeBSD_version >= 1000000
+		ip->ip_len = htons(len);
+#else
+		ip->ip_len = len;
+#endif
+#elif defined(__APPLE__) || defined(__Userspace__)
+		ip->ip_len = len;
+#else
+		ip->ip_len = htons(len);
+#endif
+		if (port) {
+#if defined(SCTP_WITH_NO_CSUM)
+			SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip) + sizeof(struct udphdr));
+			SCTP_STAT_INCR(sctps_sendswcrc);
+#endif
+#if !defined(__Windows__) && !defined(__Userspace__)
+#if defined(__FreeBSD__) && ((__FreeBSD_version > 803000 && __FreeBSD_version < 900000) || __FreeBSD_version > 900000)
+			if (V_udp_cksum) {
+				SCTP_ENABLE_UDP_CSUM(o_pak);
+			}
+#else
+			SCTP_ENABLE_UDP_CSUM(o_pak);
+#endif
+#endif
+		} else {
+#if defined(SCTP_WITH_NO_CSUM)
+			SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+			mout->m_pkthdr.csum_flags = CSUM_SCTP;
+			mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
+			SCTP_STAT_INCR(sctps_sendhwcrc);
+#else
+			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip));
+			SCTP_STAT_INCR(sctps_sendswcrc);
+#endif
+#endif
+		}
+#ifdef SCTP_PACKET_LOGGING
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
+			sctp_packet_log(o_pak);
+		}
+#endif
+#if defined(__APPLE__) || defined(__Panda__)
+		SCTP_IP_OUTPUT(ret, o_pak, &ro, NULL, vrf_id);
+		/* Free the route if we got one back */
+		if (ro.ro_rt) {
+			RTFREE(ro.ro_rt);
+			ro.ro_rt = NULL;
+		}
+#else
+		SCTP_IP_OUTPUT(ret, o_pak, NULL, NULL, vrf_id);
+#endif
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		ip6->ip6_plen = (uint16_t)(len - sizeof(struct ip6_hdr));
+		if (port) {
+#if defined(SCTP_WITH_NO_CSUM)
+			SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr) + sizeof(struct udphdr));
+			SCTP_STAT_INCR(sctps_sendswcrc);
+#endif
+#if defined(__Windows__)
+			udp->uh_sum = 0;
+#elif !defined(__Userspace__)
+			if ((udp->uh_sum = in6_cksum(o_pak, IPPROTO_UDP, sizeof(struct ip6_hdr), len - sizeof(struct ip6_hdr))) == 0) {
+				udp->uh_sum = 0xffff;
+			}
+#endif
+		} else {
+#if defined(SCTP_WITH_NO_CSUM)
+			SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+#if defined(__FreeBSD__) && __FreeBSD_version >= 900000
+#if __FreeBSD_version > 901000
+			mout->m_pkthdr.csum_flags = CSUM_SCTP_IPV6;
+#else
+			mout->m_pkthdr.csum_flags = CSUM_SCTP;
+#endif
+			mout->m_pkthdr.csum_data = offsetof(struct sctphdr, checksum);
+			SCTP_STAT_INCR(sctps_sendhwcrc);
+#else
+			shout->checksum = sctp_calculate_cksum(mout, sizeof(struct ip6_hdr));
+			SCTP_STAT_INCR(sctps_sendswcrc);
+#endif
+#endif
+		}
+#ifdef SCTP_PACKET_LOGGING
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
+			sctp_packet_log(o_pak);
+		}
+#endif
+		SCTP_IP6_OUTPUT(ret, o_pak, NULL, NULL, NULL, vrf_id);
+		break;
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+	{
+		char *buffer;
+		struct sockaddr_conn *sconn;
+
+		sconn = (struct sockaddr_conn *)src;
+#if defined(SCTP_WITH_NO_CSUM)
+		SCTP_STAT_INCR(sctps_sendnocrc);
+#else
+		if (SCTP_BASE_VAR(crc32c_offloaded) == 0) {
+			shout->checksum = sctp_calculate_cksum(mout, 0);
+			SCTP_STAT_INCR(sctps_sendswcrc);
+		} else {
+			SCTP_STAT_INCR(sctps_sendhwcrc);
+		}
+#endif
+#ifdef SCTP_PACKET_LOGGING
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) {
+			sctp_packet_log(mout);
+		}
+#endif
+		/* Don't alloc/free for each packet */
+		if ((buffer = malloc(len)) != NULL) {
+			m_copydata(mout, 0, len, buffer);
+			SCTP_BASE_VAR(conn_output)(sconn->sconn_addr, buffer, len, 0, 0);
+			free(buffer);
+		}
+		sctp_m_freem(mout);
+		break;
+	}
+#endif
+	default:
+		SCTPDBG(SCTP_DEBUG_OUTPUT1, "Unknown protocol (TSNH) type %d\n",
+		        dst->sa_family);
+		sctp_m_freem(mout);
+		SCTP_LTRACE_ERR_RET_PKT(mout, NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
+		return;
+	}
+	SCTP_STAT_INCR(sctps_sendpackets);
+	SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
+	SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
+	return;
+}
+
+void
+sctp_send_shutdown_complete2(struct sockaddr *src, struct sockaddr *dst,
+                             struct sctphdr *sh,
+#if defined(__FreeBSD__)
+                             uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
+#endif
+                             uint32_t vrf_id, uint16_t port)
+{
+	sctp_send_resp_msg(src, dst, sh, 0, SCTP_SHUTDOWN_COMPLETE, NULL,
+#if defined(__FreeBSD__)
+	                   mflowtype, mflowid, fibnum,
+#endif
+	                   vrf_id, port);
+}
+
+void
+sctp_send_hb(struct sctp_tcb *stcb, struct sctp_nets *net,int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+	SCTP_UNUSED
+#endif
+)
+{
+	struct sctp_tmit_chunk *chk;
+	struct sctp_heartbeat_chunk *hb;
+	struct timeval now;
+
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	if (net == NULL) {
+		return;
+	}
+	(void)SCTP_GETTIME_TIMEVAL(&now);
+	switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+	case AF_INET:
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		break;
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+		break;
+#endif
+	default:
+		return;
+	}
+	sctp_alloc_a_chunk(stcb, chk);
+	if (chk == NULL) {
+		SCTPDBG(SCTP_DEBUG_OUTPUT4, "Gak, can't get a chunk for hb\n");
+		return;
+	}
+
+	chk->copy_by_ref = 0;
+	chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
+	chk->rec.chunk_id.can_take_data = 1;
+	chk->flags = 0;
+	chk->asoc = &stcb->asoc;
+	chk->send_size = sizeof(struct sctp_heartbeat_chunk);
+
+	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
+	if (chk->data == NULL) {
+		sctp_free_a_chunk(stcb, chk, so_locked);
+		return;
+	}
+	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+	SCTP_BUF_LEN(chk->data) = chk->send_size;
+	chk->sent = SCTP_DATAGRAM_UNSENT;
+	chk->snd_count = 0;
+	chk->whoTo = net;
+	atomic_add_int(&chk->whoTo->ref_count, 1);
+	/* Now we have a mbuf that we can fill in with the details */
+	hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
+	memset(hb, 0, sizeof(struct sctp_heartbeat_chunk));
+	/* fill out chunk header */
+	hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
+	hb->ch.chunk_flags = 0;
+	hb->ch.chunk_length = htons(chk->send_size);
+	/* Fill out hb parameter */
+	hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
+	hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
+	hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
+	hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
+	/* Did our user request this one, put it in */
+	hb->heartbeat.hb_info.addr_family = (uint8_t)net->ro._l_addr.sa.sa_family;
+#ifdef HAVE_SA_LEN
+	hb->heartbeat.hb_info.addr_len = net->ro._l_addr.sa.sa_len;
+#else
+	switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+	case AF_INET:
+		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in);
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_in6);
+		break;
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+		hb->heartbeat.hb_info.addr_len = sizeof(struct sockaddr_conn);
+		break;
+#endif
+	default:
+		hb->heartbeat.hb_info.addr_len = 0;
+		break;
+	}
+#endif
+	if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
+		/*
+		 * we only take from the entropy pool if the address is not
+		 * confirmed.
+		 */
+		net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
+		net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
+	} else {
+		net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
+		net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
+	}
+	switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+	case AF_INET:
+		memcpy(hb->heartbeat.hb_info.address,
+		       &net->ro._l_addr.sin.sin_addr,
+		       sizeof(net->ro._l_addr.sin.sin_addr));
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		memcpy(hb->heartbeat.hb_info.address,
+		       &net->ro._l_addr.sin6.sin6_addr,
+		       sizeof(net->ro._l_addr.sin6.sin6_addr));
+		break;
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+		memcpy(hb->heartbeat.hb_info.address,
+		       &net->ro._l_addr.sconn.sconn_addr,
+		       sizeof(net->ro._l_addr.sconn.sconn_addr));
+		break;
+#endif
+	default:
+		if (chk->data) {
+			sctp_m_freem(chk->data);
+			chk->data = NULL;
+		}
+		sctp_free_a_chunk(stcb, chk, so_locked);
+		return;
+		break;
+	}
+	net->hb_responded = 0;
+	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
+	stcb->asoc.ctrl_queue_cnt++;
+	SCTP_STAT_INCR(sctps_sendheartbeat);
+	return;
+}
+
+void
+sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
+		   uint32_t high_tsn)
+{
+	struct sctp_association *asoc;
+	struct sctp_ecne_chunk *ecne;
+	struct sctp_tmit_chunk *chk;
+
+	if (net == NULL) {
+		return;
+	}
+	asoc = &stcb->asoc;
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+		if ((chk->rec.chunk_id.id == SCTP_ECN_ECHO) && (net == chk->whoTo)) {
+			/* found a previous ECN_ECHO update it if needed */
+			uint32_t cnt, ctsn;
+			ecne = mtod(chk->data, struct sctp_ecne_chunk *);
+			ctsn = ntohl(ecne->tsn);
+			if (SCTP_TSN_GT(high_tsn, ctsn)) {
+				ecne->tsn = htonl(high_tsn);
+				SCTP_STAT_INCR(sctps_queue_upd_ecne);
+			}
+			cnt = ntohl(ecne->num_pkts_since_cwr);
+			cnt++;
+			ecne->num_pkts_since_cwr = htonl(cnt);
+			return;
+		}
+	}
+	/* nope could not find one to update so we must build one */
+	sctp_alloc_a_chunk(stcb, chk);
+	if (chk == NULL) {
+		return;
+	}
+	SCTP_STAT_INCR(sctps_queue_upd_ecne);
+	chk->copy_by_ref = 0;
+	chk->rec.chunk_id.id = SCTP_ECN_ECHO;
+	chk->rec.chunk_id.can_take_data = 0;
+	chk->flags = 0;
+	chk->asoc = &stcb->asoc;
+	chk->send_size = sizeof(struct sctp_ecne_chunk);
+	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
+	if (chk->data == NULL) {
+		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+		return;
+	}
+	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+	SCTP_BUF_LEN(chk->data) = chk->send_size;
+	chk->sent = SCTP_DATAGRAM_UNSENT;
+	chk->snd_count = 0;
+	chk->whoTo = net;
+	atomic_add_int(&chk->whoTo->ref_count, 1);
+
+	stcb->asoc.ecn_echo_cnt_onq++;
+	ecne = mtod(chk->data, struct sctp_ecne_chunk *);
+	ecne->ch.chunk_type = SCTP_ECN_ECHO;
+	ecne->ch.chunk_flags = 0;
+	ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
+	ecne->tsn = htonl(high_tsn);
+	ecne->num_pkts_since_cwr = htonl(1);
+	TAILQ_INSERT_HEAD(&stcb->asoc.control_send_queue, chk, sctp_next);
+	asoc->ctrl_queue_cnt++;
+}
+
+void
+sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
+    struct mbuf *m, int len, int iphlen, int bad_crc)
+{
+	struct sctp_association *asoc;
+	struct sctp_pktdrop_chunk *drp;
+	struct sctp_tmit_chunk *chk;
+	uint8_t *datap;
+	int was_trunc = 0;
+	int fullsz = 0;
+	long spc;
+	int offset;
+	struct sctp_chunkhdr *ch, chunk_buf;
+	unsigned int chk_length;
+
+        if (!stcb) {
+            return;
+        }
+	asoc = &stcb->asoc;
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	if (asoc->pktdrop_supported == 0) {
+		/*-
+		 * peer must declare support before I send one.
+		 */
+		return;
+	}
+	if (stcb->sctp_socket == NULL) {
+		return;
+	}
+	sctp_alloc_a_chunk(stcb, chk);
+	if (chk == NULL) {
+		return;
+	}
+	chk->copy_by_ref = 0;
+	chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
+	chk->rec.chunk_id.can_take_data = 1;
+	chk->flags = 0;
+	len -= iphlen;
+	chk->send_size = len;
+        /* Validate that we do not have an ABORT in here. */
+	offset = iphlen + sizeof(struct sctphdr);
+	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+						   sizeof(*ch), (uint8_t *) & chunk_buf);
+	while (ch != NULL) {
+		chk_length = ntohs(ch->chunk_length);
+		if (chk_length < sizeof(*ch)) {
+			/* break to abort land */
+			break;
+		}
+		switch (ch->chunk_type) {
+		case SCTP_PACKET_DROPPED:
+		case SCTP_ABORT_ASSOCIATION:
+		case SCTP_INITIATION_ACK:
+			/**
+			 * We don't respond with an PKT-DROP to an ABORT
+			 * or PKT-DROP. We also do not respond to an
+			 * INIT-ACK, because we can't know if the initiation
+			 * tag is correct or not.
+			 */
+			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+			return;
+		default:
+			break;
+		}
+		offset += SCTP_SIZE32(chk_length);
+		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+		    sizeof(*ch), (uint8_t *) & chunk_buf);
+	}
+
+	if ((len + SCTP_MAX_OVERHEAD + sizeof(struct sctp_pktdrop_chunk)) >
+	    min(stcb->asoc.smallest_mtu, MCLBYTES)) {
+		/* only send 1 mtu worth, trim off the
+		 * excess on the end.
+		 */
+		fullsz = len;
+		len = min(stcb->asoc.smallest_mtu, MCLBYTES) - SCTP_MAX_OVERHEAD;
+		was_trunc = 1;
+	}
+	chk->asoc = &stcb->asoc;
+	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+	if (chk->data == NULL) {
+jump_out:
+		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+		return;
+	}
+	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+	drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
+	if (drp == NULL) {
+		sctp_m_freem(chk->data);
+		chk->data = NULL;
+		goto jump_out;
+	}
+	chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
+	    sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
+	chk->book_size_scale = 0;
+	if (was_trunc) {
+		drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
+		drp->trunc_len = htons(fullsz);
+		/* Len is already adjusted to size minus overhead above
+		 * take out the pkt_drop chunk itself from it.
+		 */
+		chk->send_size = (uint16_t)(len - sizeof(struct sctp_pktdrop_chunk));
+		len = chk->send_size;
+	} else {
+		/* no truncation needed */
+		drp->ch.chunk_flags = 0;
+		drp->trunc_len = htons(0);
+	}
+	if (bad_crc) {
+		drp->ch.chunk_flags |= SCTP_BADCRC;
+	}
+	chk->send_size += sizeof(struct sctp_pktdrop_chunk);
+	SCTP_BUF_LEN(chk->data) = chk->send_size;
+	chk->sent = SCTP_DATAGRAM_UNSENT;
+	chk->snd_count = 0;
+	if (net) {
+		/* we should hit here */
+		chk->whoTo = net;
+		atomic_add_int(&chk->whoTo->ref_count, 1);
+	} else {
+		chk->whoTo = NULL;
+	}
+	drp->ch.chunk_type = SCTP_PACKET_DROPPED;
+	drp->ch.chunk_length = htons(chk->send_size);
+	spc = SCTP_SB_LIMIT_RCV(stcb->sctp_socket);
+	if (spc < 0) {
+		spc = 0;
+	}
+	drp->bottle_bw = htonl(spc);
+	if (asoc->my_rwnd) {
+		drp->current_onq = htonl(asoc->size_on_reasm_queue +
+		    asoc->size_on_all_streams +
+		    asoc->my_rwnd_control_len +
+		    stcb->sctp_socket->so_rcv.sb_cc);
+	} else {
+		/*-
+		 * If my rwnd is 0, possibly from mbuf depletion as well as
+		 * space used, tell the peer there is NO space aka onq == bw
+		 */
+		drp->current_onq = htonl(spc);
+	}
+	drp->reserved = 0;
+	datap = drp->data;
+	m_copydata(m, iphlen, len, (caddr_t)datap);
+	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
+	asoc->ctrl_queue_cnt++;
+}
+
+void
+sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn, uint8_t override)
+{
+	struct sctp_association *asoc;
+	struct sctp_cwr_chunk *cwr;
+	struct sctp_tmit_chunk *chk;
+
+	SCTP_TCB_LOCK_ASSERT(stcb);
+	if (net == NULL) {
+		return;
+	}
+	asoc = &stcb->asoc;
+	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+		if ((chk->rec.chunk_id.id == SCTP_ECN_CWR) && (net == chk->whoTo)) {
+			/* found a previous CWR queued to same destination update it if needed */
+			uint32_t ctsn;
+			cwr = mtod(chk->data, struct sctp_cwr_chunk *);
+			ctsn = ntohl(cwr->tsn);
+			if (SCTP_TSN_GT(high_tsn, ctsn)) {
+				cwr->tsn = htonl(high_tsn);
+			}
+			if (override & SCTP_CWR_REDUCE_OVERRIDE) {
+				/* Make sure override is carried */
+				cwr->ch.chunk_flags |= SCTP_CWR_REDUCE_OVERRIDE;
+			}
+			return;
+		}
+	}
+	sctp_alloc_a_chunk(stcb, chk);
+	if (chk == NULL) {
+		return;
+	}
+	chk->copy_by_ref = 0;
+	chk->rec.chunk_id.id = SCTP_ECN_CWR;
+	chk->rec.chunk_id.can_take_data = 1;
+	chk->flags = 0;
+	chk->asoc = &stcb->asoc;
+	chk->send_size = sizeof(struct sctp_cwr_chunk);
+	chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_NOWAIT, 1, MT_HEADER);
+	if (chk->data == NULL) {
+		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+		return;
+	}
+	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+	SCTP_BUF_LEN(chk->data) = chk->send_size;
+	chk->sent = SCTP_DATAGRAM_UNSENT;
+	chk->snd_count = 0;
+	chk->whoTo = net;
+	atomic_add_int(&chk->whoTo->ref_count, 1);
+	cwr = mtod(chk->data, struct sctp_cwr_chunk *);
+	cwr->ch.chunk_type = SCTP_ECN_CWR;
+	cwr->ch.chunk_flags = override;
+	cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
+	cwr->tsn = htonl(high_tsn);
+	TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
+	asoc->ctrl_queue_cnt++;
+}
+
+static int
+sctp_add_stream_reset_out(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk,
+                          uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
+{
+	uint16_t len, old_len, i;
+	struct sctp_stream_reset_out_request *req_out;
+	struct sctp_chunkhdr *ch;
+	int at;
+	int number_entries=0;
+
+	ch = mtod(chk->data, struct sctp_chunkhdr *);
+	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+	/* get to new offset for the param. */
+	req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
+	/* now how long will this param be? */
+	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+		if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
+		    (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
+		    TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
+			number_entries++;
+		}
+	}
+	if (number_entries == 0) {
+		return (0);
+	}
+	if (number_entries == stcb->asoc.streamoutcnt) {
+		number_entries = 0;
+	}
+	if (number_entries > SCTP_MAX_STREAMS_AT_ONCE_RESET) {
+		number_entries = SCTP_MAX_STREAMS_AT_ONCE_RESET;
+	}
+	len = (uint16_t)(sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
+	req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
+	req_out->ph.param_length = htons(len);
+	req_out->request_seq = htonl(seq);
+	req_out->response_seq = htonl(resp_seq);
+	req_out->send_reset_at_tsn = htonl(last_sent);
+	at = 0;
+	if (number_entries) {
+		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+			if ((stcb->asoc.strmout[i].state == SCTP_STREAM_RESET_PENDING) &&
+			    (stcb->asoc.strmout[i].chunks_on_queues == 0) &&
+			    TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
+				req_out->list_of_streams[at] = htons(i);
+				at++;
+				stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
+				if (at >= number_entries) {
+					break;
+				}
+			}
+		}
+	} else {
+		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+			stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_IN_FLIGHT;
+		}
+	}
+	if (SCTP_SIZE32(len) > len) {
+		/*-
+		 * Need to worry about the pad we may end up adding to the
+		 * end. This is easy since the struct is either aligned to 4
+		 * bytes or 2 bytes off.
+		 */
+		req_out->list_of_streams[number_entries] = 0;
+	}
+	/* now fix the chunk length */
+	ch->chunk_length = htons(len + old_len);
+	chk->book_size = len + old_len;
+	chk->book_size_scale = 0;
+	chk->send_size = SCTP_SIZE32(chk->book_size);
+	SCTP_BUF_LEN(chk->data) = chk->send_size;
+	return (1);
+}
+
+static void
+sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
+                         int number_entries, uint16_t *list,
+                         uint32_t seq)
+{
+	uint16_t len, old_len, i;
+	struct sctp_stream_reset_in_request *req_in;
+	struct sctp_chunkhdr *ch;
+
+	ch = mtod(chk->data, struct sctp_chunkhdr *);
+	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+	/* get to new offset for the param. */
+	req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
+	/* now how long will this param be? */
+	len = (uint16_t)(sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
+	req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
+	req_in->ph.param_length = htons(len);
+	req_in->request_seq = htonl(seq);
+	if (number_entries) {
+		for (i = 0; i < number_entries; i++) {
+			req_in->list_of_streams[i] = htons(list[i]);
+		}
+	}
+	if (SCTP_SIZE32(len) > len) {
+		/*-
+		 * Need to worry about the pad we may end up adding to the
+		 * end. This is easy since the struct is either aligned to 4
+		 * bytes or 2 bytes off.
+		 */
+		req_in->list_of_streams[number_entries] = 0;
+	}
+	/* now fix the chunk length */
+	ch->chunk_length = htons(len + old_len);
+	chk->book_size = len + old_len;
+	chk->book_size_scale = 0;
+	chk->send_size = SCTP_SIZE32(chk->book_size);
+	SCTP_BUF_LEN(chk->data) = chk->send_size;
+	return;
+}
+
+static void
+sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
+                          uint32_t seq)
+{
+	uint16_t len, old_len;
+	struct sctp_stream_reset_tsn_request *req_tsn;
+	struct sctp_chunkhdr *ch;
+
+	ch = mtod(chk->data, struct sctp_chunkhdr *);
+	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+	/* get to new offset for the param. */
+	req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
+	/* now how long will this param be? */
+	len = sizeof(struct sctp_stream_reset_tsn_request);
+	req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
+	req_tsn->ph.param_length = htons(len);
+	req_tsn->request_seq = htonl(seq);
+
+	/* now fix the chunk length */
+	ch->chunk_length = htons(len + old_len);
+	chk->send_size = len + old_len;
+	chk->book_size = SCTP_SIZE32(chk->send_size);
+	chk->book_size_scale = 0;
+	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
+	return;
+}
+
+void
+sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
+                             uint32_t resp_seq, uint32_t result)
+{
+	uint16_t len, old_len;
+	struct sctp_stream_reset_response *resp;
+	struct sctp_chunkhdr *ch;
+
+	ch = mtod(chk->data, struct sctp_chunkhdr *);
+	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+	/* get to new offset for the param. */
+	resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
+	/* now how long will this param be? */
+	len = sizeof(struct sctp_stream_reset_response);
+	resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
+	resp->ph.param_length = htons(len);
+	resp->response_seq = htonl(resp_seq);
+	resp->result = ntohl(result);
+
+	/* now fix the chunk length */
+	ch->chunk_length = htons(len + old_len);
+	chk->book_size = len + old_len;
+	chk->book_size_scale = 0;
+	chk->send_size = SCTP_SIZE32(chk->book_size);
+	SCTP_BUF_LEN(chk->data) = chk->send_size;
+	return;
+}
+
+void
+sctp_send_deferred_reset_response(struct sctp_tcb *stcb,
+				 struct sctp_stream_reset_list *ent,
+				 int response)
+{
+	struct sctp_association *asoc;
+	struct sctp_tmit_chunk *chk;
+	struct sctp_chunkhdr *ch;
+
+	asoc = &stcb->asoc;
+
+	/*
+	 * Reset our last reset action to the new one IP -> response
+	 * (PERFORMED probably). This assures that if we fail to send, a
+	 * retran from the peer will get the new response.
+	 */
+	asoc->last_reset_action[0] = response;
+	if (asoc->stream_reset_outstanding) {
+		return;
+	}
+	sctp_alloc_a_chunk(stcb, chk);
+	if (chk == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+		return;
+	}
+	chk->copy_by_ref = 0;
+	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
+	chk->rec.chunk_id.can_take_data = 0;
+	chk->flags = 0;
+	chk->asoc = &stcb->asoc;
+	chk->book_size = sizeof(struct sctp_chunkhdr);
+	chk->send_size = SCTP_SIZE32(chk->book_size);
+	chk->book_size_scale = 0;
+	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+	if (chk->data == NULL) {
+		sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+		return;
+	}
+	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+	/* setup chunk parameters */
+	chk->sent = SCTP_DATAGRAM_UNSENT;
+	chk->snd_count = 0;
+	if (stcb->asoc.alternate) {
+		chk->whoTo = stcb->asoc.alternate;
+	} else {
+		chk->whoTo = stcb->asoc.primary_destination;
+	}
+	ch = mtod(chk->data, struct sctp_chunkhdr *);
+	ch->chunk_type = SCTP_STREAM_RESET;
+	ch->chunk_flags = 0;
+	ch->chunk_length = htons(chk->book_size);
+	atomic_add_int(&chk->whoTo->ref_count, 1);
+	SCTP_BUF_LEN(chk->data) = chk->send_size;
+	sctp_add_stream_reset_result(chk, ent->seq, response);
+	/* insert the chunk for sending */
+	TAILQ_INSERT_TAIL(&asoc->control_send_queue,
+			  chk,
+			  sctp_next);
+	asoc->ctrl_queue_cnt++;
+}
+
+void
+sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
+                                 uint32_t resp_seq, uint32_t result,
+                                 uint32_t send_una, uint32_t recv_next)
+{
+	uint16_t len, old_len;
+	struct sctp_stream_reset_response_tsn *resp;
+	struct sctp_chunkhdr *ch;
+
+	ch = mtod(chk->data, struct sctp_chunkhdr *);
+	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+	/* get to new offset for the param. */
+	resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
+	/* now how long will this param be? */
+	len = sizeof(struct sctp_stream_reset_response_tsn);
+	resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
+	resp->ph.param_length = htons(len);
+	resp->response_seq = htonl(resp_seq);
+	resp->result = htonl(result);
+	resp->senders_next_tsn = htonl(send_una);
+	resp->receivers_next_tsn = htonl(recv_next);
+
+	/* now fix the chunk length */
+	ch->chunk_length = htons(len + old_len);
+	chk->book_size = len + old_len;
+	chk->send_size = SCTP_SIZE32(chk->book_size);
+	chk->book_size_scale = 0;
+	SCTP_BUF_LEN(chk->data) = chk->send_size;
+	return;
+}
+
+static void
+sctp_add_an_out_stream(struct sctp_tmit_chunk *chk,
+		       uint32_t seq,
+		       uint16_t adding)
+{
+	uint16_t len, old_len;
+	struct sctp_chunkhdr *ch;
+	struct sctp_stream_reset_add_strm *addstr;
+
+	ch = mtod(chk->data, struct sctp_chunkhdr *);
+	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+	/* get to new offset for the param. */
+	addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
+	/* now how long will this param be? */
+	len = sizeof(struct sctp_stream_reset_add_strm);
+
+	/* Fill it out. */
+	addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_OUT_STREAMS);
+	addstr->ph.param_length = htons(len);
+	addstr->request_seq = htonl(seq);
+	addstr->number_of_streams = htons(adding);
+	addstr->reserved = 0;
+
+	/* now fix the chunk length */
+	ch->chunk_length = htons(len + old_len);
+	chk->send_size = len + old_len;
+	chk->book_size = SCTP_SIZE32(chk->send_size);
+	chk->book_size_scale = 0;
+	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
+	return;
+}
+
+static void
+sctp_add_an_in_stream(struct sctp_tmit_chunk *chk,
+                      uint32_t seq,
+                      uint16_t adding)
+{
+	uint16_t len, old_len;
+	struct sctp_chunkhdr *ch;
+	struct sctp_stream_reset_add_strm *addstr;
+
+	ch = mtod(chk->data, struct sctp_chunkhdr *);
+	old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
+
+	/* get to new offset for the param. */
+	addstr = (struct sctp_stream_reset_add_strm *)((caddr_t)ch + len);
+	/* now how long will this param be? */
+	len = sizeof(struct sctp_stream_reset_add_strm);
+	/* Fill it out. */
+	addstr->ph.param_type = htons(SCTP_STR_RESET_ADD_IN_STREAMS);
+	addstr->ph.param_length = htons(len);
+	addstr->request_seq = htonl(seq);
+	addstr->number_of_streams = htons(adding);
+	addstr->reserved = 0;
+
+	/* now fix the chunk length */
+	ch->chunk_length = htons(len + old_len);
+	chk->send_size = len + old_len;
+	chk->book_size = SCTP_SIZE32(chk->send_size);
+	chk->book_size_scale = 0;
+	SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
+	return;
+}
+
+int
+sctp_send_stream_reset_out_if_possible(struct sctp_tcb *stcb, int so_locked)
+{
+	struct sctp_association *asoc;
+	struct sctp_tmit_chunk *chk;
+	struct sctp_chunkhdr *ch;
+	uint32_t seq;
+
+	asoc = &stcb->asoc;
+	asoc->trigger_reset = 0;
+	if (asoc->stream_reset_outstanding) {
+		return (EALREADY);
+	}
+	sctp_alloc_a_chunk(stcb, chk);
+	if (chk == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+		return (ENOMEM);
+	}
+	chk->copy_by_ref = 0;
+	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
+	chk->rec.chunk_id.can_take_data = 0;
+	chk->flags = 0;
+	chk->asoc = &stcb->asoc;
+	chk->book_size = sizeof(struct sctp_chunkhdr);
+	chk->send_size = SCTP_SIZE32(chk->book_size);
+	chk->book_size_scale = 0;
+	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+	if (chk->data == NULL) {
+		sctp_free_a_chunk(stcb, chk, so_locked);
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+		return (ENOMEM);
+	}
+	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+
+	/* setup chunk parameters */
+	chk->sent = SCTP_DATAGRAM_UNSENT;
+	chk->snd_count = 0;
+	if (stcb->asoc.alternate) {
+		chk->whoTo = stcb->asoc.alternate;
+	} else {
+		chk->whoTo = stcb->asoc.primary_destination;
+	}
+	ch = mtod(chk->data, struct sctp_chunkhdr *);
+	ch->chunk_type = SCTP_STREAM_RESET;
+	ch->chunk_flags = 0;
+	ch->chunk_length = htons(chk->book_size);
+	atomic_add_int(&chk->whoTo->ref_count, 1);
+	SCTP_BUF_LEN(chk->data) = chk->send_size;
+	seq = stcb->asoc.str_reset_seq_out;
+	if (sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1))) {
+		seq++;
+		asoc->stream_reset_outstanding++;
+	} else {
+		m_freem(chk->data);
+		chk->data = NULL;
+		sctp_free_a_chunk(stcb, chk, so_locked);
+		return (ENOENT);
+	}
+	asoc->str_reset = chk;
+	/* insert the chunk for sending */
+	TAILQ_INSERT_TAIL(&asoc->control_send_queue,
+			  chk,
+			  sctp_next);
+	asoc->ctrl_queue_cnt++;
+
+	if (stcb->asoc.send_sack) {
+		sctp_send_sack(stcb, so_locked);
+	}
+	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
+	return (0);
+}
+
+int
+sctp_send_str_reset_req(struct sctp_tcb *stcb,
+                        uint16_t number_entries, uint16_t *list,
+                        uint8_t send_in_req,
+                        uint8_t send_tsn_req,
+                        uint8_t add_stream,
+                        uint16_t adding_o,
+                        uint16_t adding_i, uint8_t peer_asked)
+{
+	struct sctp_association *asoc;
+	struct sctp_tmit_chunk *chk;
+	struct sctp_chunkhdr *ch;
+	int can_send_out_req=0;
+	uint32_t seq;
+
+	asoc = &stcb->asoc;
+	if (asoc->stream_reset_outstanding) {
+		/*-
+		 * Already one pending, must get ACK back to clear the flag.
+		 */
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EBUSY);
+		return (EBUSY);
+	}
+	if ((send_in_req == 0) && (send_tsn_req == 0) &&
+	    (add_stream == 0)) {
+		/* nothing to do */
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+		return (EINVAL);
+	}
+	if (send_tsn_req && send_in_req) {
+		/* error, can't do that */
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+		return (EINVAL);
+	} else if (send_in_req) {
+		can_send_out_req = 1;
+	}
+	if (number_entries > (MCLBYTES -
+	                      SCTP_MIN_OVERHEAD -
+	                      sizeof(struct sctp_chunkhdr) -
+	                      sizeof(struct sctp_stream_reset_out_request)) /
+	                     sizeof(uint16_t)) {
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+		return (ENOMEM);
+	}
+	sctp_alloc_a_chunk(stcb, chk);
+	if (chk == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+		return (ENOMEM);
+	}
+	chk->copy_by_ref = 0;
+	chk->rec.chunk_id.id = SCTP_STREAM_RESET;
+	chk->rec.chunk_id.can_take_data = 0;
+	chk->flags = 0;
+	chk->asoc = &stcb->asoc;
+	chk->book_size = sizeof(struct sctp_chunkhdr);
+	chk->send_size = SCTP_SIZE32(chk->book_size);
+	chk->book_size_scale = 0;
+
+	chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+	if (chk->data == NULL) {
+		sctp_free_a_chunk(stcb, chk, SCTP_SO_LOCKED);
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+		return (ENOMEM);
+	}
+	SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
+
+	/* setup chunk parameters */
+	chk->sent = SCTP_DATAGRAM_UNSENT;
+	chk->snd_count = 0;
+	if (stcb->asoc.alternate) {
+		chk->whoTo = stcb->asoc.alternate;
+	} else {
+		chk->whoTo = stcb->asoc.primary_destination;
+	}
+	atomic_add_int(&chk->whoTo->ref_count, 1);
+	ch = mtod(chk->data, struct sctp_chunkhdr *);
+	ch->chunk_type = SCTP_STREAM_RESET;
+	ch->chunk_flags = 0;
+	ch->chunk_length = htons(chk->book_size);
+	SCTP_BUF_LEN(chk->data) = chk->send_size;
+
+	seq = stcb->asoc.str_reset_seq_out;
+	if (can_send_out_req) {
+		int ret;
+	        ret = sctp_add_stream_reset_out(stcb, chk, seq, (stcb->asoc.str_reset_seq_in - 1), (stcb->asoc.sending_seq - 1));
+		if (ret) {
+			seq++;
+			asoc->stream_reset_outstanding++;
+		}
+	}
+	if ((add_stream & 1) &&
+	    ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < adding_o)) {
+		/* Need to allocate more */
+		struct sctp_stream_out *oldstream;
+		struct sctp_stream_queue_pending *sp, *nsp;
+		int i;
+#if defined(SCTP_DETAILED_STR_STATS)
+		int j;
+#endif
+
+		oldstream = stcb->asoc.strmout;
+		/* get some more */
+		SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
+			    (stcb->asoc.streamoutcnt + adding_o) * sizeof(struct sctp_stream_out),
+			    SCTP_M_STRMO);
+		if (stcb->asoc.strmout == NULL) {
+			uint8_t x;
+			stcb->asoc.strmout = oldstream;
+			/* Turn off the bit */
+			x = add_stream & 0xfe;
+			add_stream = x;
+			goto skip_stuff;
+		}
+		/* Ok now we proceed with copying the old out stuff and
+		 * initializing the new stuff.
+		 */
+		SCTP_TCB_SEND_LOCK(stcb);
+		stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 0, 1);
+		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+			TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
+			stcb->asoc.strmout[i].chunks_on_queues = oldstream[i].chunks_on_queues;
+			stcb->asoc.strmout[i].next_mid_ordered = oldstream[i].next_mid_ordered;
+			stcb->asoc.strmout[i].next_mid_unordered = oldstream[i].next_mid_unordered;
+			stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
+			stcb->asoc.strmout[i].sid = i;
+			stcb->asoc.strmout[i].state = oldstream[i].state;
+			/* FIX ME FIX ME */
+			/* This should be a SS_COPY operation FIX ME STREAM SCHEDULER EXPERT */
+			stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], &oldstream[i]);
+			/* now anything on those queues? */
+			TAILQ_FOREACH_SAFE(sp, &oldstream[i].outqueue, next, nsp) {
+				TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
+				TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
+			}
+
+		}
+		/* now the new streams */
+		stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
+		for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + adding_o); i++) {
+			TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
+			stcb->asoc.strmout[i].chunks_on_queues = 0;
+#if defined(SCTP_DETAILED_STR_STATS)
+			for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
+				stcb->asoc.strmout[i].abandoned_sent[j] = 0;
+				stcb->asoc.strmout[i].abandoned_unsent[j] = 0;
+			}
+#else
+			stcb->asoc.strmout[i].abandoned_sent[0] = 0;
+			stcb->asoc.strmout[i].abandoned_unsent[0] = 0;
+#endif
+			stcb->asoc.strmout[i].next_mid_ordered = 0;
+			stcb->asoc.strmout[i].next_mid_unordered = 0;
+			stcb->asoc.strmout[i].sid = i;
+			stcb->asoc.strmout[i].last_msg_incomplete = 0;
+			stcb->asoc.ss_functions.sctp_ss_init_stream(stcb, &stcb->asoc.strmout[i], NULL);
+			stcb->asoc.strmout[i].state = SCTP_STREAM_CLOSED;
+		}
+		stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + adding_o;
+		SCTP_FREE(oldstream, SCTP_M_STRMO);
+		SCTP_TCB_SEND_UNLOCK(stcb);
+	}
+skip_stuff:
+	if ((add_stream & 1) && (adding_o > 0)) {
+		asoc->strm_pending_add_size = adding_o;
+		asoc->peer_req_out = peer_asked;
+		sctp_add_an_out_stream(chk, seq, adding_o);
+		seq++;
+		asoc->stream_reset_outstanding++;
+	}
+	if ((add_stream & 2) && (adding_i > 0)) {
+		sctp_add_an_in_stream(chk, seq, adding_i);
+		seq++;
+		asoc->stream_reset_outstanding++;
+	}
+	if (send_in_req) {
+		sctp_add_stream_reset_in(chk, number_entries, list, seq);
+		seq++;
+		asoc->stream_reset_outstanding++;
+	}
+	if (send_tsn_req) {
+		sctp_add_stream_reset_tsn(chk, seq);
+		asoc->stream_reset_outstanding++;
+	}
+	asoc->str_reset = chk;
+	/* insert the chunk for sending */
+	TAILQ_INSERT_TAIL(&asoc->control_send_queue,
+			  chk,
+			  sctp_next);
+	asoc->ctrl_queue_cnt++;
+	if (stcb->asoc.send_sack) {
+		sctp_send_sack(stcb, SCTP_SO_LOCKED);
+	}
+	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
+	return (0);
+}
+
+void
+sctp_send_abort(struct mbuf *m, int iphlen, struct sockaddr *src, struct sockaddr *dst,
+                struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
+#if defined(__FreeBSD__)
+                uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
+#endif
+                uint32_t vrf_id, uint16_t port)
+{
+	/* Don't respond to an ABORT with an ABORT. */
+	if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
+		if (cause)
+			sctp_m_freem(cause);
+		return;
+	}
+	sctp_send_resp_msg(src, dst, sh, vtag, SCTP_ABORT_ASSOCIATION, cause,
+#if defined(__FreeBSD__)
+	                   mflowtype, mflowid, fibnum,
+#endif
+	                   vrf_id, port);
+	return;
+}
+
+void
+sctp_send_operr_to(struct sockaddr *src, struct sockaddr *dst,
+                   struct sctphdr *sh, uint32_t vtag, struct mbuf *cause,
+#if defined(__FreeBSD__)
+                   uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
+#endif
+                   uint32_t vrf_id, uint16_t port)
+{
+	sctp_send_resp_msg(src, dst, sh, vtag, SCTP_OPERATION_ERROR, cause,
+#if defined(__FreeBSD__)
+	                   mflowtype, mflowid, fibnum,
+#endif
+	                   vrf_id, port);
+	return;
+}
+
+static struct mbuf *
+sctp_copy_resume(struct uio *uio,
+		 int max_send_len,
+#if defined(__FreeBSD__) && __FreeBSD_version > 602000
+		 int user_marks_eor,
+#endif
+		 int *error,
+		 uint32_t *sndout,
+		 struct mbuf **new_tail)
+{
+#if defined(__Panda__)
+	struct mbuf *m;
+
+	m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
+			(user_marks_eor ? M_EOR : 0));
+	if (m == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
+		*error = ENOBUFS;
+	} else {
+		*sndout = m_length(m, NULL);
+		*new_tail = m_last(m);
+	}
+	return (m);
+#elif defined(__FreeBSD__) && __FreeBSD_version > 602000
+	struct mbuf *m;
+
+	m = m_uiotombuf(uio, M_WAITOK, max_send_len, 0,
+		(M_PKTHDR | (user_marks_eor ? M_EOR : 0)));
+	if (m == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
+		*error = ENOBUFS;
+	} else {
+		*sndout = m_length(m, NULL);
+		*new_tail = m_last(m);
+	}
+	return (m);
+#else
+	int left, cancpy, willcpy;
+	struct mbuf *m, *head;
+
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+	left = (int)min(uio->uio_resid, max_send_len);
+#else
+	left = (int)min(uio_resid(uio), max_send_len);
+#endif
+#else
+	left = (int)min(uio->uio_resid, max_send_len);
+#endif
+	/* Always get a header just in case */
+	head = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
+	if (head == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
+		*error = ENOBUFS;
+		return (NULL);
+	}
+	cancpy = (int)M_TRAILINGSPACE(head);
+	willcpy = min(cancpy, left);
+	*error = uiomove(mtod(head, caddr_t), willcpy, uio);
+	if (*error) {
+		sctp_m_freem(head);
+		return (NULL);
+	}
+	*sndout += willcpy;
+	left -= willcpy;
+	SCTP_BUF_LEN(head) = willcpy;
+	m = head;
+	*new_tail = head;
+	while (left > 0) {
+		/* move in user data */
+		SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
+		if (SCTP_BUF_NEXT(m) == NULL) {
+			sctp_m_freem(head);
+			*new_tail = NULL;
+			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
+			*error = ENOBUFS;
+			return (NULL);
+		}
+		m = SCTP_BUF_NEXT(m);
+		cancpy = (int)M_TRAILINGSPACE(m);
+		willcpy = min(cancpy, left);
+		*error = uiomove(mtod(m, caddr_t), willcpy, uio);
+		if (*error) {
+			sctp_m_freem(head);
+			*new_tail = NULL;
+			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EFAULT);
+			*error = EFAULT;
+			return (NULL);
+		}
+		SCTP_BUF_LEN(m) = willcpy;
+		left -= willcpy;
+		*sndout += willcpy;
+		*new_tail = m;
+		if (left == 0) {
+			SCTP_BUF_NEXT(m) = NULL;
+		}
+	}
+	return (head);
+#endif
+}
+
+static int
+sctp_copy_one(struct sctp_stream_queue_pending *sp,
+              struct uio *uio,
+              int resv_upfront)
+{
+#if defined(__Panda__)
+	sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
+	                       resv_upfront, 0);
+	if (sp->data == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
+		return (ENOBUFS);
+	}
+
+	sp->tail_mbuf = m_last(sp->data);
+	return (0);
+#elif defined(__FreeBSD__) && __FreeBSD_version > 602000
+	sp->data = m_uiotombuf(uio, M_WAITOK, sp->length,
+	                       resv_upfront, 0);
+	if (sp->data == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
+		return (ENOBUFS);
+	}
+
+	sp->tail_mbuf = m_last(sp->data);
+	return (0);
+#else
+	int left;
+	int cancpy, willcpy, error;
+	struct mbuf *m, *head;
+	int cpsz = 0;
+
+	/* First one gets a header */
+	left = sp->length;
+	head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAITOK, 0, MT_DATA);
+	if (m == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
+		return (ENOBUFS);
+	}
+	/*-
+	 * Add this one for m in now, that way if the alloc fails we won't
+	 * have a bad cnt.
+	 */
+	SCTP_BUF_RESV_UF(m, resv_upfront);
+	cancpy = (int)M_TRAILINGSPACE(m);
+	willcpy = min(cancpy, left);
+	while (left > 0) {
+		/* move in user data */
+		error = uiomove(mtod(m, caddr_t), willcpy, uio);
+		if (error) {
+			sctp_m_freem(head);
+			return (error);
+		}
+		SCTP_BUF_LEN(m) = willcpy;
+		left -= willcpy;
+		cpsz += willcpy;
+		if (left > 0) {
+			SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAITOK, 0, MT_DATA);
+			if (SCTP_BUF_NEXT(m) == NULL) {
+				/*
+				 * the head goes back to caller, he can free
+				 * the rest
+				 */
+				sctp_m_freem(head);
+				SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOBUFS);
+				return (ENOBUFS);
+			}
+			m = SCTP_BUF_NEXT(m);
+			cancpy = (int)M_TRAILINGSPACE(m);
+			willcpy = min(cancpy, left);
+		} else {
+			sp->tail_mbuf = m;
+			SCTP_BUF_NEXT(m) = NULL;
+		}
+	}
+	sp->data = head;
+	sp->length = cpsz;
+	return (0);
+#endif
+}
+
+
+
+static struct sctp_stream_queue_pending *
+sctp_copy_it_in(struct sctp_tcb *stcb,
+    struct sctp_association *asoc,
+    struct sctp_sndrcvinfo *srcv,
+    struct uio *uio,
+    struct sctp_nets *net,
+    int max_send_len,
+    int user_marks_eor,
+    int *error)
+
+{
+	/*-
+	 * This routine must be very careful in its work. Protocol
+	 * processing is up and running so care must be taken to spl...()
+	 * when you need to do something that may effect the stcb/asoc. The
+	 * sb is locked however. When data is copied the protocol processing
+	 * should be enabled since this is a slower operation...
+	 */
+	struct sctp_stream_queue_pending *sp = NULL;
+	int resv_in_first;
+
+	*error = 0;
+	/* Now can we send this? */
+	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
+	    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
+	    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
+	    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
+		/* got data while shutting down */
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
+		*error = ECONNRESET;
+		goto out_now;
+	}
+	sctp_alloc_a_strmoq(stcb, sp);
+	if (sp == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+		*error = ENOMEM;
+		goto out_now;
+	}
+	sp->act_flags = 0;
+	sp->sender_all_done = 0;
+	sp->sinfo_flags = srcv->sinfo_flags;
+	sp->timetolive = srcv->sinfo_timetolive;
+	sp->ppid = srcv->sinfo_ppid;
+	sp->context = srcv->sinfo_context;
+	sp->fsn = 0;
+	(void)SCTP_GETTIME_TIMEVAL(&sp->ts);
+
+	sp->sid = srcv->sinfo_stream;
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+	sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
+#else
+	sp->length = (uint32_t)min(uio_resid(uio), max_send_len);
+#endif
+#else
+	sp->length = (uint32_t)min(uio->uio_resid, max_send_len);
+#endif
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+	if ((sp->length == (uint32_t)uio->uio_resid) &&
+#else
+	if ((sp->length == (uint32_t)uio_resid(uio)) &&
+#endif
+#else
+	if ((sp->length == (uint32_t)uio->uio_resid) &&
+#endif
+	    ((user_marks_eor == 0) ||
+	     (srcv->sinfo_flags & SCTP_EOF) ||
+	     (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
+		sp->msg_is_complete = 1;
+	} else {
+		sp->msg_is_complete = 0;
+	}
+	sp->sender_all_done = 0;
+	sp->some_taken = 0;
+	sp->put_last_out = 0;
+	resv_in_first = sizeof(struct sctp_data_chunk);
+	sp->data = sp->tail_mbuf = NULL;
+	if (sp->length == 0) {
+		*error = 0;
+		goto skip_copy;
+	}
+	if (srcv->sinfo_keynumber_valid) {
+		sp->auth_keyid = srcv->sinfo_keynumber;
+	} else {
+		sp->auth_keyid = stcb->asoc.authinfo.active_keyid;
+	}
+	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
+		sctp_auth_key_acquire(stcb, sp->auth_keyid);
+		sp->holds_key_ref = 1;
+	}
+#if defined(__APPLE__)
+	SCTP_SOCKET_UNLOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
+#endif
+	*error = sctp_copy_one(sp, uio, resv_in_first);
+#if defined(__APPLE__)
+	SCTP_SOCKET_LOCK(SCTP_INP_SO(stcb->sctp_ep), 0);
+#endif
+ skip_copy:
+	if (*error) {
+		sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
+		sp = NULL;
+	} else {
+		if (sp->sinfo_flags & SCTP_ADDR_OVER) {
+			sp->net = net;
+			atomic_add_int(&sp->net->ref_count, 1);
+		} else {
+			sp->net = NULL;
+		}
+		sctp_set_prsctp_policy(sp);
+	}
+out_now:
+	return (sp);
+}
+
+
+int
+sctp_sosend(struct socket *so,
+            struct sockaddr *addr,
+            struct uio *uio,
+#ifdef __Panda__
+            pakhandle_type top,
+            pakhandle_type icontrol,
+#else
+            struct mbuf *top,
+            struct mbuf *control,
+#endif
+#if defined(__APPLE__) || defined(__Panda__)
+            int flags
+#else
+            int flags,
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+            struct thread *p
+#elif defined(__Windows__)
+            PKTHREAD p
+#else
+#if defined(__Userspace__)
+            /*
+	     * proc is a dummy in __Userspace__ and will not be passed
+	     * to sctp_lower_sosend
+	     */
+#endif
+            struct proc *p
+#endif
+#endif
+)
+{
+#ifdef __Panda__
+	struct mbuf *control = NULL;
+#endif
+#if defined(__APPLE__)
+	struct proc *p = current_proc();
+#endif
+	int error, use_sndinfo = 0;
+	struct sctp_sndrcvinfo sndrcvninfo;
+	struct sockaddr *addr_to_use;
+#if defined(INET) && defined(INET6)
+	struct sockaddr_in sin;
+#endif
+
+#if defined(__APPLE__)
+	SCTP_SOCKET_LOCK(so, 1);
+#endif
+#ifdef __Panda__
+	control = SCTP_HEADER_TO_CHAIN(icontrol);
+#endif
+	if (control) {
+		/* process cmsg snd/rcv info (maybe a assoc-id) */
+		if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&sndrcvninfo, control,
+		    sizeof(sndrcvninfo))) {
+			/* got one */
+			use_sndinfo = 1;
+		}
+	}
+	addr_to_use = addr;
+#if defined(INET) && defined(INET6)
+	if ((addr) && (addr->sa_family == AF_INET6)) {
+		struct sockaddr_in6 *sin6;
+
+		sin6 = (struct sockaddr_in6 *)addr;
+		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+			in6_sin6_2_sin(&sin, sin6);
+			addr_to_use = (struct sockaddr *)&sin;
+		}
+	}
+#endif
+	error = sctp_lower_sosend(so, addr_to_use, uio, top,
+#ifdef __Panda__
+				  icontrol,
+#else
+				  control,
+#endif
+				  flags,
+				  use_sndinfo ? &sndrcvninfo: NULL
+#if !(defined(__Panda__) || defined(__Userspace__))
+				  , p
+#endif
+		);
+#if defined(__APPLE__)
+	SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+	return (error);
+}
+
+
+int
+sctp_lower_sosend(struct socket *so,
+                  struct sockaddr *addr,
+                  struct uio *uio,
+#ifdef __Panda__
+                  pakhandle_type i_pak,
+                  pakhandle_type i_control,
+#else
+                  struct mbuf *i_pak,
+                  struct mbuf *control,
+#endif
+                  int flags,
+                  struct sctp_sndrcvinfo *srcv
+#if !(defined( __Panda__) || defined(__Userspace__))
+                  ,
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+                  struct thread *p
+#elif defined(__Windows__)
+                  PKTHREAD p
+#else
+                  struct proc *p
+#endif
+#endif
+	)
+{
+	unsigned int sndlen = 0, max_len;
+	int error, len;
+	struct mbuf *top = NULL;
+#ifdef __Panda__
+	struct mbuf *control = NULL;
+#endif
+	int queue_only = 0, queue_only_for_init = 0;
+	int free_cnt_applied = 0;
+	int un_sent;
+	int now_filled = 0;
+	unsigned int inqueue_bytes = 0;
+	struct sctp_block_entry be;
+	struct sctp_inpcb *inp;
+	struct sctp_tcb *stcb = NULL;
+	struct timeval now;
+	struct sctp_nets *net;
+	struct sctp_association *asoc;
+	struct sctp_inpcb *t_inp;
+	int user_marks_eor;
+	int create_lock_applied = 0;
+	int nagle_applies = 0;
+	int some_on_control = 0;
+	int got_all_of_the_send = 0;
+	int hold_tcblock = 0;
+	int non_blocking = 0;
+	uint32_t local_add_more, local_soresv = 0;
+	uint16_t port;
+	uint16_t sinfo_flags;
+	sctp_assoc_t sinfo_assoc_id;
+
+	error = 0;
+	net = NULL;
+	stcb = NULL;
+	asoc = NULL;
+
+#if defined(__APPLE__)
+	sctp_lock_assert(so);
+#endif
+	t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+		error = EINVAL;
+		if (i_pak) {
+			SCTP_RELEASE_PKT(i_pak);
+		}
+		return (error);
+	}
+	if ((uio == NULL) && (i_pak == NULL)) {
+		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+		return (EINVAL);
+	}
+	user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
+	atomic_add_int(&inp->total_sends, 1);
+	if (uio) {
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+		if (uio->uio_resid < 0) {
+#else
+		if (uio_resid(uio) < 0) {
+#endif
+#else
+		if (uio->uio_resid < 0) {
+#endif
+			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+			return (EINVAL);
+		}
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+		sndlen = (unsigned int)uio->uio_resid;
+#else
+		sndlen = (unsigned int)uio_resid(uio);
+#endif
+#else
+		sndlen = (unsigned int)uio->uio_resid;
+#endif
+	} else {
+		top = SCTP_HEADER_TO_CHAIN(i_pak);
+#ifdef __Panda__
+		/*-
+		 * app len indicates the datalen, dgsize for cases
+		 * of SCTP_EOF/ABORT will not have the right len
+		 */
+		sndlen = SCTP_APP_DATA_LEN(i_pak);
+		/*-
+		 * Set the particle len also to zero to match
+		 * up with app len. We only have one particle
+		 * if app len is zero for Panda. This is ensured
+		 * in the socket lib
+		 */
+		if (sndlen == 0) {
+			SCTP_BUF_LEN(top)  = 0;
+		}
+		/*-
+		 * We delink the chain from header, but keep
+		 * the header around as we will need it in
+		 * EAGAIN case
+		 */
+		SCTP_DETACH_HEADER_FROM_CHAIN(i_pak);
+#else
+		sndlen = SCTP_HEADER_LEN(i_pak);
+#endif
+	}
+	SCTPDBG(SCTP_DEBUG_OUTPUT1, "Send called addr:%p send length %d\n",
+		(void *)addr,
+	        sndlen);
+#ifdef __Panda__
+	if (i_control) {
+		control = SCTP_HEADER_TO_CHAIN(i_control);
+	}
+#endif
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+	    (inp->sctp_socket->so_qlimit)) {
+		/* The listener can NOT send */
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
+		error = ENOTCONN;
+		goto out_unlocked;
+	}
+	/**
+	 * Pre-screen address, if one is given the sin-len
+	 * must be set correctly!
+	 */
+	if (addr) {
+		union sctp_sockstore *raddr = (union sctp_sockstore *)addr;
+		switch (raddr->sa.sa_family) {
+#ifdef INET
+		case AF_INET:
+#ifdef HAVE_SIN_LEN
+			if (raddr->sin.sin_len != sizeof(struct sockaddr_in)) {
+				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+				error = EINVAL;
+				goto out_unlocked;
+			}
+#endif
+			port = raddr->sin.sin_port;
+			break;
+#endif
+#ifdef INET6
+		case AF_INET6:
+#ifdef HAVE_SIN6_LEN
+			if (raddr->sin6.sin6_len != sizeof(struct sockaddr_in6)) {
+				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+				error = EINVAL;
+				goto out_unlocked;
+			}
+#endif
+			port = raddr->sin6.sin6_port;
+			break;
+#endif
+#if defined(__Userspace__)
+		case AF_CONN:
+#ifdef HAVE_SCONN_LEN
+			if (raddr->sconn.sconn_len != sizeof(struct sockaddr_conn)) {
+				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+				error = EINVAL;
+				goto out_unlocked;
+			}
+#endif
+			port = raddr->sconn.sconn_port;
+			break;
+#endif
+		default:
+			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EAFNOSUPPORT);
+			error = EAFNOSUPPORT;
+			goto out_unlocked;
+		}
+	} else
+		port = 0;
+
+	if (srcv) {
+		sinfo_flags = srcv->sinfo_flags;
+		sinfo_assoc_id = srcv->sinfo_assoc_id;
+		if (INVALID_SINFO_FLAG(sinfo_flags) ||
+		    PR_SCTP_INVALID_POLICY(sinfo_flags)) {
+			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+			error = EINVAL;
+			goto out_unlocked;
+		}
+		if (srcv->sinfo_flags)
+			SCTP_STAT_INCR(sctps_sends_with_flags);
+	} else {
+		sinfo_flags = inp->def_send.sinfo_flags;
+		sinfo_assoc_id = inp->def_send.sinfo_assoc_id;
+	}
+	if (sinfo_flags & SCTP_SENDALL) {
+		/* its a sendall */
+		error = sctp_sendall(inp, uio, top, srcv);
+		top = NULL;
+		goto out_unlocked;
+	}
+	if ((sinfo_flags & SCTP_ADDR_OVER) && (addr == NULL)) {
+		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+		error = EINVAL;
+		goto out_unlocked;
+	}
+	/* now we must find the assoc */
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+		SCTP_INP_RLOCK(inp);
+		stcb = LIST_FIRST(&inp->sctp_asoc_list);
+		if (stcb) {
+			SCTP_TCB_LOCK(stcb);
+			hold_tcblock = 1;
+		}
+		SCTP_INP_RUNLOCK(inp);
+	} else if (sinfo_assoc_id) {
+		stcb = sctp_findassociation_ep_asocid(inp, sinfo_assoc_id, 1);
+		if (stcb != NULL) {
+			hold_tcblock = 1;
+		}
+	} else if (addr) {
+		/*-
+		 * Since we did not use findep we must
+		 * increment it, and if we don't find a tcb
+		 * decrement it.
+		 */
+		SCTP_INP_WLOCK(inp);
+		SCTP_INP_INCR_REF(inp);
+		SCTP_INP_WUNLOCK(inp);
+		stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
+		if (stcb == NULL) {
+			SCTP_INP_WLOCK(inp);
+			SCTP_INP_DECR_REF(inp);
+			SCTP_INP_WUNLOCK(inp);
+		} else {
+			hold_tcblock = 1;
+		}
+	}
+	if ((stcb == NULL) && (addr)) {
+		/* Possible implicit send? */
+		SCTP_ASOC_CREATE_LOCK(inp);
+		create_lock_applied = 1;
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+		    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+			/* Should I really unlock ? */
+			SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+			error = EINVAL;
+			goto out_unlocked;
+
+		}
+		if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
+		    (addr->sa_family == AF_INET6)) {
+			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+			error = EINVAL;
+			goto out_unlocked;
+		}
+		SCTP_INP_WLOCK(inp);
+		SCTP_INP_INCR_REF(inp);
+		SCTP_INP_WUNLOCK(inp);
+		/* With the lock applied look again */
+		stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
+		if ((stcb == NULL) && (control != NULL) && (port > 0)) {
+			stcb = sctp_findassociation_cmsgs(&t_inp, port, control, &net, &error);
+		}
+		if (stcb == NULL) {
+			SCTP_INP_WLOCK(inp);
+			SCTP_INP_DECR_REF(inp);
+			SCTP_INP_WUNLOCK(inp);
+		} else {
+			hold_tcblock = 1;
+		}
+		if (error) {
+			goto out_unlocked;
+		}
+		if (t_inp != inp) {
+			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOTCONN);
+			error = ENOTCONN;
+			goto out_unlocked;
+		}
+	}
+	if (stcb == NULL) {
+		if (addr == NULL) {
+			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
+			error = ENOENT;
+			goto out_unlocked;
+		} else {
+			/* We must go ahead and start the INIT process */
+			uint32_t vrf_id;
+
+			if ((sinfo_flags & SCTP_ABORT) ||
+			    ((sinfo_flags & SCTP_EOF) && (sndlen == 0))) {
+				/*-
+				 * User asks to abort a non-existant assoc,
+				 * or EOF a non-existant assoc with no data
+				 */
+				SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOENT);
+				error = ENOENT;
+				goto out_unlocked;
+			}
+			/* get an asoc/stcb struct */
+			vrf_id = inp->def_vrf_id;
+#ifdef INVARIANTS
+			if (create_lock_applied == 0) {
+				panic("Error, should hold create lock and I don't?");
+			}
+#endif
+			stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
+			                       inp->sctp_ep.pre_open_stream_count,
+			                       inp->sctp_ep.port,
+#if !(defined( __Panda__) || defined(__Userspace__))
+			                       p);
+#else
+			                       (struct proc *)NULL);
+#endif
+			if (stcb == NULL) {
+				/* Error is setup for us in the call */
+				goto out_unlocked;
+			}
+			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+				stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+				/* Set the connected flag so we can queue data */
+				soisconnecting(so);
+			}
+			hold_tcblock = 1;
+			if (create_lock_applied) {
+				SCTP_ASOC_CREATE_UNLOCK(inp);
+				create_lock_applied = 0;
+			} else {
+				SCTP_PRINTF("Huh-3? create lock should have been on??\n");
+			}
+			/* Turn on queue only flag to prevent data from being sent */
+			queue_only = 1;
+			asoc = &stcb->asoc;
+			SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
+			(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
+
+			/* initialize authentication params for the assoc */
+			sctp_initialize_auth_params(inp, stcb);
+
+			if (control) {
+				if (sctp_process_cmsgs_for_init(stcb, control, &error)) {
+					sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE,
+					                SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
+					hold_tcblock = 0;
+					stcb = NULL;
+					goto out_unlocked;
+				}
+			}
+			/* out with the INIT */
+			queue_only_for_init = 1;
+			/*-
+			 * we may want to dig in after this call and adjust the MTU
+			 * value. It defaulted to 1500 (constant) but the ro
+			 * structure may now have an update and thus we may need to
+			 * change it BEFORE we append the message.
+			 */
+		}
+	} else
+		asoc = &stcb->asoc;
+	if (srcv == NULL)
+		srcv = (struct sctp_sndrcvinfo *)&asoc->def_send;
+	if (srcv->sinfo_flags & SCTP_ADDR_OVER) {
+		if (addr)
+			net = sctp_findnet(stcb, addr);
+		else
+			net = NULL;
+		if ((net == NULL) ||
+		    ((port != 0) && (port != stcb->rport))) {
+			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+			error = EINVAL;
+			goto out_unlocked;
+		}
+	} else {
+		if (stcb->asoc.alternate) {
+			net = stcb->asoc.alternate;
+		} else {
+			net = stcb->asoc.primary_destination;
+		}
+	}
+	atomic_add_int(&stcb->total_sends, 1);
+	/* Keep the stcb from being freed under our feet */
+	atomic_add_int(&asoc->refcnt, 1);
+	free_cnt_applied = 1;
+
+	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT)) {
+		if (sndlen > asoc->smallest_mtu) {
+			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
+			error = EMSGSIZE;
+			goto out_unlocked;
+		}
+	}
+#if defined(__Userspace__)
+	if (inp->recv_callback) {
+		non_blocking = 1;
+	}
+#endif
+	if (SCTP_SO_IS_NBIO(so)
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+	     || (flags & MSG_NBIO)
+#endif
+	    ) {
+		non_blocking = 1;
+	}
+	/* would we block? */
+	if (non_blocking) {
+		if (hold_tcblock == 0) {
+			SCTP_TCB_LOCK(stcb);
+			hold_tcblock = 1;
+		}
+		inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
+		if ((SCTP_SB_LIMIT_SND(so) <  (sndlen + inqueue_bytes + stcb->asoc.sb_send_resv)) ||
+		    (stcb->asoc.chunks_on_out_queue >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
+			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EWOULDBLOCK);
+			if (sndlen > SCTP_SB_LIMIT_SND(so))
+				error = EMSGSIZE;
+			else
+				error = EWOULDBLOCK;
+			goto out_unlocked;
+		}
+		stcb->asoc.sb_send_resv += sndlen;
+		SCTP_TCB_UNLOCK(stcb);
+		hold_tcblock = 0;
+	} else {
+		atomic_add_int(&stcb->asoc.sb_send_resv, sndlen);
+	}
+	local_soresv = sndlen;
+	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
+		error = ECONNRESET;
+		goto out_unlocked;
+	}
+	if (create_lock_applied) {
+		SCTP_ASOC_CREATE_UNLOCK(inp);
+		create_lock_applied = 0;
+	}
+	/* Is the stream no. valid? */
+	if (srcv->sinfo_stream >= asoc->streamoutcnt) {
+		/* Invalid stream number */
+		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+		error = EINVAL;
+		goto out_unlocked;
+	}
+	if ((asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPEN) &&
+	    (asoc->strmout[srcv->sinfo_stream].state != SCTP_STREAM_OPENING)) {
+		/*
+		 * Can't queue any data while stream reset is underway.
+		 */
+		if (asoc->strmout[srcv->sinfo_stream].state > SCTP_STREAM_OPEN) {
+			error = EAGAIN;
+		} else {
+			error = EINVAL;
+		}
+		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, error);
+		goto out_unlocked;
+	}
+	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
+	    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
+		queue_only = 1;
+	}
+	/* we are now done with all control */
+	if (control) {
+		sctp_m_freem(control);
+		control = NULL;
+	}
+	if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
+	    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
+	    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
+	    (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
+		if (srcv->sinfo_flags & SCTP_ABORT) {
+			;
+		} else {
+			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
+			error = ECONNRESET;
+			goto out_unlocked;
+		}
+	}
+	/* Ok, we will attempt a msgsnd :> */
+#if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__))
+	if (p) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 603000
+		p->td_ru.ru_msgsnd++;
+#elif defined(__FreeBSD__) && __FreeBSD_version >= 500000
+		p->td_proc->p_stats->p_ru.ru_msgsnd++;
+#else
+		p->p_stats->p_ru.ru_msgsnd++;
+#endif
+	}
+#endif
+	/* Are we aborting? */
+	if (srcv->sinfo_flags & SCTP_ABORT) {
+		struct mbuf *mm;
+		int tot_demand, tot_out = 0, max_out;
+
+		SCTP_STAT_INCR(sctps_sends_with_abort);
+		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
+		    (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
+			/* It has to be up before we abort */
+			/* how big is the user initiated abort? */
+			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+			error = EINVAL;
+			goto out;
+		}
+		if (hold_tcblock) {
+			SCTP_TCB_UNLOCK(stcb);
+			hold_tcblock = 0;
+		}
+		if (top) {
+			struct mbuf *cntm = NULL;
+
+			mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_WAITOK, 1, MT_DATA);
+			if (sndlen != 0) {
+				for (cntm = top; cntm; cntm = SCTP_BUF_NEXT(cntm)) {
+					tot_out += SCTP_BUF_LEN(cntm);
+				}
+			}
+		} else {
+			/* Must fit in a MTU */
+			tot_out = sndlen;
+			tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
+			if (tot_demand > SCTP_DEFAULT_ADD_MORE) {
+				/* To big */
+				SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
+				error = EMSGSIZE;
+				goto out;
+			}
+			mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAITOK, 1, MT_DATA);
+		}
+		if (mm == NULL) {
+			SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, ENOMEM);
+			error = ENOMEM;
+			goto out;
+		}
+		max_out = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
+		max_out -= sizeof(struct sctp_abort_msg);
+		if (tot_out > max_out) {
+			tot_out = max_out;
+		}
+		if (mm) {
+			struct sctp_paramhdr *ph;
+
+			/* now move forward the data pointer */
+			ph = mtod(mm, struct sctp_paramhdr *);
+			ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+			ph->param_length = htons((uint16_t)(sizeof(struct sctp_paramhdr) + tot_out));
+			ph++;
+			SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
+			if (top == NULL) {
+#if defined(__APPLE__)
+				SCTP_SOCKET_UNLOCK(so, 0);
+#endif
+				error = uiomove((caddr_t)ph, (int)tot_out, uio);
+#if defined(__APPLE__)
+				SCTP_SOCKET_LOCK(so, 0);
+#endif
+				if (error) {
+					/*-
+					 * Here if we can't get his data we
+					 * still abort we just don't get to
+					 * send the users note :-0
+					 */
+					sctp_m_freem(mm);
+					mm = NULL;
+				}
+			} else {
+				if (sndlen != 0) {
+					SCTP_BUF_NEXT(mm) = top;
+				}
+			}
+		}
+		if (hold_tcblock == 0) {
+			SCTP_TCB_LOCK(stcb);
+		}
+		atomic_add_int(&stcb->asoc.refcnt, -1);
+		free_cnt_applied = 0;
+		/* release this lock, otherwise we hang on ourselves */
+		sctp_abort_an_association(stcb->sctp_ep, stcb, mm, SCTP_SO_LOCKED);
+		/* now relock the stcb so everything is sane */
+		hold_tcblock = 0;
+		stcb = NULL;
+		/* In this case top is already chained to mm
+		 * avoid double free, since we free it below if
+		 * top != NULL and driver would free it after sending
+		 * the packet out
+		 */
+		if (sndlen != 0) {
+			top = NULL;
+		}
+		goto out_unlocked;
+	}
+	/* Calculate the maximum we can send */
+	inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
+	if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
+		if (non_blocking) {
+			/* we already checked for non-blocking above. */
+			max_len = sndlen;
+		} else {
+			max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
+		}
+	} else {
+		max_len = 0;
+	}
+	if (hold_tcblock) {
+		SCTP_TCB_UNLOCK(stcb);
+		hold_tcblock = 0;
+	}
+	if (asoc->strmout == NULL) {
+		/* huh? software error */
+		SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EFAULT);
+		error = EFAULT;
+		goto out_unlocked;
+	}
+
+	/* Unless E_EOR mode is on, we must make a send FIT in one call. */
+	if ((user_marks_eor == 0) &&
+	    (sndlen > SCTP_SB_LIMIT_SND(stcb->sctp_socket))) {
+		/* It will NEVER fit */
+		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EMSGSIZE);
+		error = EMSGSIZE;
+		goto out_unlocked;
+	}
+	if ((uio == NULL) && user_marks_eor) {
+		/*-
+		 * We do not support eeor mode for
+		 * sending with mbuf chains (like sendfile).
+		 */
+		SCTP_LTRACE_ERR_RET(NULL, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+		error = EINVAL;
+		goto out_unlocked;
+	}
+
+	if (user_marks_eor) {
+		local_add_more = min(SCTP_SB_LIMIT_SND(so), SCTP_BASE_SYSCTL(sctp_add_more_threshold));
+	} else {
+		/*-
+		 * For non-eeor the whole message must fit in
+		 * the socket send buffer.
+		 */
+		local_add_more = sndlen;
+	}
+	len = 0;
+	if (non_blocking) {
+		goto skip_preblock;
+	}
+	if (((max_len <= local_add_more) &&
+	     (SCTP_SB_LIMIT_SND(so) >= local_add_more)) ||
+	    (max_len == 0) ||
+	    ((stcb->asoc.chunks_on_out_queue+stcb->asoc.stream_queue_cnt) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
+		/* No room right now ! */
+		SOCKBUF_LOCK(&so->so_snd);
+		inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
+		while ((SCTP_SB_LIMIT_SND(so) < (inqueue_bytes + local_add_more)) ||
+		       ((stcb->asoc.stream_queue_cnt+stcb->asoc.chunks_on_out_queue) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue))) {
+			SCTPDBG(SCTP_DEBUG_OUTPUT1,"pre_block limit:%u <(inq:%d + %d) || (%d+%d > %d)\n",
+			        (unsigned int)SCTP_SB_LIMIT_SND(so),
+			        inqueue_bytes,
+			        local_add_more,
+			        stcb->asoc.stream_queue_cnt,
+			        stcb->asoc.chunks_on_out_queue,
+			        SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue));
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+				sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA, asoc, sndlen);
+			}
+			be.error = 0;
+#if !defined(__Panda__) && !defined(__Windows__)
+			stcb->block_entry = &be;
+#endif
+			error = sbwait(&so->so_snd);
+			stcb->block_entry = NULL;
+			if (error || so->so_error || be.error) {
+				if (error == 0) {
+					if (so->so_error)
+						error = so->so_error;
+					if (be.error) {
+						error = be.error;
+					}
+				}
+				SOCKBUF_UNLOCK(&so->so_snd);
+				goto out_unlocked;
+			}
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+				sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
+				               asoc, stcb->asoc.total_output_queue_size);
+			}
+			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+				SOCKBUF_UNLOCK(&so->so_snd);
+				goto out_unlocked;
+			}
+			inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
+		}
+		if (SCTP_SB_LIMIT_SND(so) > inqueue_bytes) {
+			max_len = SCTP_SB_LIMIT_SND(so) -  inqueue_bytes;
+		} else {
+			max_len = 0;
+		}
+		SOCKBUF_UNLOCK(&so->so_snd);
+	}
+
+skip_preblock:
+	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+		goto out_unlocked;
+	}
+#if defined(__APPLE__)
+	error = sblock(&so->so_snd, SBLOCKWAIT(flags));
+#endif
+	/* sndlen covers for mbuf case
+	 * uio_resid covers for the non-mbuf case
+	 * NOTE: uio will be null when top/mbuf is passed
+	 */
+	if (sndlen == 0) {
+		if (srcv->sinfo_flags & SCTP_EOF) {
+			got_all_of_the_send = 1;
+			goto dataless_eof;
+		} else {
+			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+			error = EINVAL;
+			goto out;
+		}
+	}
+	if (top == NULL) {
+		struct sctp_stream_queue_pending *sp;
+		struct sctp_stream_out *strm;
+		uint32_t sndout;
+
+		SCTP_TCB_SEND_LOCK(stcb);
+		if ((asoc->stream_locked) &&
+		    (asoc->stream_locked_on  != srcv->sinfo_stream)) {
+			SCTP_TCB_SEND_UNLOCK(stcb);
+			SCTP_LTRACE_ERR_RET(inp, stcb, net, SCTP_FROM_SCTP_OUTPUT, EINVAL);
+			error = EINVAL;
+			goto out;
+		}
+		SCTP_TCB_SEND_UNLOCK(stcb);
+
+		strm = &stcb->asoc.strmout[srcv->sinfo_stream];
+		if (strm->last_msg_incomplete == 0) {
+		do_a_copy_in:
+			sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error);
+			if ((sp == NULL) || (error)) {
+				goto out;
+			}
+			SCTP_TCB_SEND_LOCK(stcb);
+			if (sp->msg_is_complete) {
+				strm->last_msg_incomplete = 0;
+				asoc->stream_locked = 0;
+			} else {
+				/* Just got locked to this guy in
+				 * case of an interrupt.
+				 */
+				strm->last_msg_incomplete = 1;
+				if (stcb->asoc.idata_supported == 0) {
+					asoc->stream_locked = 1;
+					asoc->stream_locked_on  = srcv->sinfo_stream;
+				}
+				sp->sender_all_done = 0;
+			}
+			sctp_snd_sb_alloc(stcb, sp->length);
+			atomic_add_int(&asoc->stream_queue_cnt, 1);
+			if (srcv->sinfo_flags & SCTP_UNORDERED) {
+				SCTP_STAT_INCR(sctps_sends_with_unord);
+			}
+			TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
+			stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, asoc, strm, sp, 1);
+			SCTP_TCB_SEND_UNLOCK(stcb);
+		} else {
+			SCTP_TCB_SEND_LOCK(stcb);
+			sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
+			SCTP_TCB_SEND_UNLOCK(stcb);
+			if (sp == NULL) {
+				/* ???? Huh ??? last msg is gone */
+#ifdef INVARIANTS
+				panic("Warning: Last msg marked incomplete, yet nothing left?");
+#else
+				SCTP_PRINTF("Warning: Last msg marked incomplete, yet nothing left?\n");
+				strm->last_msg_incomplete = 0;
+#endif
+				goto do_a_copy_in;
+
+			}
+		}
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+		while (uio->uio_resid > 0) {
+#else
+		while (uio_resid(uio) > 0) {
+#endif
+#else
+		while (uio->uio_resid > 0) {
+#endif
+			/* How much room do we have? */
+			struct mbuf *new_tail, *mm;
+
+			if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
+				max_len = SCTP_SB_LIMIT_SND(so) - stcb->asoc.total_output_queue_size;
+			else
+				max_len = 0;
+
+			if ((max_len > SCTP_BASE_SYSCTL(sctp_add_more_threshold)) ||
+			    (max_len && (SCTP_SB_LIMIT_SND(so) < SCTP_BASE_SYSCTL(sctp_add_more_threshold))) ||
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+			    (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
+#else
+			    (uio_resid(uio) && (uio_resid(uio) <= (int)max_len))) {
+#endif
+#else
+			    (uio->uio_resid && (uio->uio_resid <= (int)max_len))) {
+#endif
+				sndout = 0;
+				new_tail = NULL;
+				if (hold_tcblock) {
+					SCTP_TCB_UNLOCK(stcb);
+					hold_tcblock = 0;
+				}
+#if defined(__APPLE__)
+				SCTP_SOCKET_UNLOCK(so, 0);
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_version > 602000
+				    mm = sctp_copy_resume(uio, max_len, user_marks_eor, &error, &sndout, &new_tail);
+#else
+				    mm = sctp_copy_resume(uio, max_len, &error, &sndout, &new_tail);
+#endif
+#if defined(__APPLE__)
+				SCTP_SOCKET_LOCK(so, 0);
+#endif
+				if ((mm == NULL) || error) {
+					if (mm) {
+						sctp_m_freem(mm);
+					}
+					goto out;
+				}
+				/* Update the mbuf and count */
+				SCTP_TCB_SEND_LOCK(stcb);
+				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+					/* we need to get out.
+					 * Peer probably aborted.
+					 */
+					sctp_m_freem(mm);
+					if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED) {
+						SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_OUTPUT, ECONNRESET);
+						error = ECONNRESET;
+					}
+					SCTP_TCB_SEND_UNLOCK(stcb);
+					goto out;
+				}
+				if (sp->tail_mbuf) {
+					/* tack it to the end */
+					SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
+					sp->tail_mbuf = new_tail;
+				} else {
+					/* A stolen mbuf */
+					sp->data = mm;
+					sp->tail_mbuf = new_tail;
+				}
+				sctp_snd_sb_alloc(stcb, sndout);
+				atomic_add_int(&sp->length, sndout);
+				len += sndout;
+				if (srcv->sinfo_flags & SCTP_SACK_IMMEDIATELY) {
+					sp->sinfo_flags |= SCTP_SACK_IMMEDIATELY;
+				}
+
+				/* Did we reach EOR? */
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+				if ((uio->uio_resid == 0) &&
+#else
+				if ((uio_resid(uio) == 0) &&
+#endif
+#else
+				if ((uio->uio_resid == 0) &&
+#endif
+				    ((user_marks_eor == 0) ||
+				     (srcv->sinfo_flags & SCTP_EOF) ||
+				     (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))) {
+					sp->msg_is_complete = 1;
+				} else {
+					sp->msg_is_complete = 0;
+				}
+				SCTP_TCB_SEND_UNLOCK(stcb);
+			}
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+			if (uio->uio_resid == 0) {
+#else
+			if (uio_resid(uio) == 0) {
+#endif
+#else
+			if (uio->uio_resid == 0) {
+#endif
+				/* got it all? */
+				continue;
+			}
+			/* PR-SCTP? */
+			if ((asoc->prsctp_supported) && (asoc->sent_queue_cnt_removeable > 0)) {
+				/* This is ugly but we must assure locking order */
+				if (hold_tcblock == 0) {
+					SCTP_TCB_LOCK(stcb);
+					hold_tcblock = 1;
+				}
+				sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
+				inqueue_bytes = stcb->asoc.total_output_queue_size - (stcb->asoc.chunks_on_out_queue * sizeof(struct sctp_data_chunk));
+				if (SCTP_SB_LIMIT_SND(so) > stcb->asoc.total_output_queue_size)
+					max_len = SCTP_SB_LIMIT_SND(so) - inqueue_bytes;
+				else
+					max_len = 0;
+				if (max_len > 0) {
+					continue;
+				}
+				SCTP_TCB_UNLOCK(stcb);
+				hold_tcblock = 0;
+			}
+			/* wait for space now */
+			if (non_blocking) {
+				/* Non-blocking io in place out */
+				goto skip_out_eof;
+			}
+			/* What about the INIT, send it maybe */
+			if (queue_only_for_init) {
+				if (hold_tcblock == 0) {
+					SCTP_TCB_LOCK(stcb);
+					hold_tcblock = 1;
+				}
+				if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
+					/* a collision took us forward? */
+					queue_only = 0;
+				} else {
+					sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+					SCTP_SET_STATE(asoc, SCTP_STATE_COOKIE_WAIT);
+					queue_only = 1;
+				}
+			}
+			if ((net->flight_size > net->cwnd) &&
+			    (asoc->sctp_cmt_on_off == 0)) {
+				SCTP_STAT_INCR(sctps_send_cwnd_avoid);
+				queue_only = 1;
+			} else if (asoc->ifp_had_enobuf) {
+				SCTP_STAT_INCR(sctps_ifnomemqueued);
+				if (net->flight_size > (2 * net->mtu)) {
+					queue_only = 1;
+				}
+				asoc->ifp_had_enobuf = 0;
+			}
+			un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
+			           (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
+			if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
+			    (stcb->asoc.total_flight > 0) &&
+			    (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
+			    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
+
+				/*-
+				 * Ok, Nagle is set on and we have data outstanding.
+				 * Don't send anything and let SACKs drive out the
+				 * data unless we have a "full" segment to send.
+				 */
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
+					sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
+				}
+				SCTP_STAT_INCR(sctps_naglequeued);
+				nagle_applies = 1;
+			} else {
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
+					if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
+						sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
+				}
+				SCTP_STAT_INCR(sctps_naglesent);
+				nagle_applies = 0;
+			}
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+
+				sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
+					       nagle_applies, un_sent);
+				sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
+					       stcb->asoc.total_flight,
+					       stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
+			}
+			if (queue_only_for_init)
+				queue_only_for_init = 0;
+			if ((queue_only == 0) && (nagle_applies == 0)) {
+				/*-
+				 * need to start chunk output
+				 * before blocking.. note that if
+				 * a lock is already applied, then
+				 * the input via the net is happening
+				 * and I don't need to start output :-D
+				 */
+				if (hold_tcblock == 0) {
+					if (SCTP_TCB_TRYLOCK(stcb)) {
+						hold_tcblock = 1;
+						sctp_chunk_output(inp,
+								  stcb,
+								  SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
+					}
+				} else {
+					sctp_chunk_output(inp,
+							  stcb,
+							  SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
+				}
+				if (hold_tcblock == 1) {
+					SCTP_TCB_UNLOCK(stcb);
+					hold_tcblock = 0;
+				}
+			}
+			SOCKBUF_LOCK(&so->so_snd);
+			/*-
+			 * This is a bit strange, but I think it will
+			 * work. The total_output_queue_size is locked and
+			 * protected by the TCB_LOCK, which we just released.
+			 * There is a race that can occur between releasing it
+			 * above, and me getting the socket lock, where sacks
+			 * come in but we have not put the SB_WAIT on the
+			 * so_snd buffer to get the wakeup. After the LOCK
+			 * is applied the sack_processing will also need to
+			 * LOCK the so->so_snd to do the actual sowwakeup(). So
+			 * once we have the socket buffer lock if we recheck the
+			 * size we KNOW we will get to sleep safely with the
+			 * wakeup flag in place.
+			 */
+			if (SCTP_SB_LIMIT_SND(so) <= (stcb->asoc.total_output_queue_size +
+						      min(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTP_SB_LIMIT_SND(so)))) {
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
+						       asoc, uio->uio_resid);
+#else
+					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
+						       asoc, uio_resid(uio));
+#endif
+#else
+					sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
+						       asoc, (size_t)uio->uio_resid);
+#endif
+				}
+				be.error = 0;
+#if !defined(__Panda__) && !defined(__Windows__)
+				stcb->block_entry = &be;
+#endif
+#if defined(__APPLE__)
+				sbunlock(&so->so_snd, 1);
+#endif
+				error = sbwait(&so->so_snd);
+				stcb->block_entry = NULL;
+
+				if (error || so->so_error || be.error) {
+					if (error == 0) {
+						if (so->so_error)
+							error = so->so_error;
+						if (be.error) {
+							error = be.error;
+						}
+					}
+					SOCKBUF_UNLOCK(&so->so_snd);
+					goto out_unlocked;
+				}
+
+#if defined(__APPLE__)
+				error = sblock(&so->so_snd, SBLOCKWAIT(flags));
+#endif
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+					sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
+						       asoc, stcb->asoc.total_output_queue_size);
+				}
+			}
+			SOCKBUF_UNLOCK(&so->so_snd);
+			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+				goto out_unlocked;
+			}
+		}
+		SCTP_TCB_SEND_LOCK(stcb);
+		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+			SCTP_TCB_SEND_UNLOCK(stcb);
+			goto out_unlocked;
+		}
+		if (sp) {
+			if (sp->msg_is_complete == 0) {
+				strm->last_msg_incomplete = 1;
+				if (stcb->asoc.idata_supported == 0) {
+					asoc->stream_locked = 1;
+					asoc->stream_locked_on  = srcv->sinfo_stream;
+				}
+			} else {
+				sp->sender_all_done = 1;
+				strm->last_msg_incomplete = 0;
+				asoc->stream_locked = 0;
+			}
+		} else {
+			SCTP_PRINTF("Huh no sp TSNH?\n");
+			strm->last_msg_incomplete = 0;
+			asoc->stream_locked = 0;
+		}
+		SCTP_TCB_SEND_UNLOCK(stcb);
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+		if (uio->uio_resid == 0) {
+#else
+		if (uio_resid(uio) == 0) {
+#endif
+#else
+		if (uio->uio_resid == 0) {
+#endif
+			got_all_of_the_send = 1;
+		}
+	} else {
+		/* We send in a 0, since we do NOT have any locks */
+		error = sctp_msg_append(stcb, net, top, srcv, 0);
+		top = NULL;
+		if (srcv->sinfo_flags & SCTP_EOF) {
+			/*
+			 * This should only happen for Panda for the mbuf
+			 * send case, which does NOT yet support EEOR mode.
+			 * Thus, we can just set this flag to do the proper
+			 * EOF handling.
+			 */
+			got_all_of_the_send = 1;
+		}
+	}
+	if (error) {
+		goto out;
+	}
+dataless_eof:
+	/* EOF thing ? */
+	if ((srcv->sinfo_flags & SCTP_EOF) &&
+	    (got_all_of_the_send == 1)) {
+		SCTP_STAT_INCR(sctps_sends_with_eof);
+		error = 0;
+		if (hold_tcblock == 0) {
+			SCTP_TCB_LOCK(stcb);
+			hold_tcblock = 1;
+		}
+		if (TAILQ_EMPTY(&asoc->send_queue) &&
+		    TAILQ_EMPTY(&asoc->sent_queue) &&
+		    sctp_is_there_unsent_data(stcb, SCTP_SO_LOCKED) == 0) {
+			if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
+				goto abort_anyway;
+			}
+			/* there is nothing queued to send, so I'm done... */
+			if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+			    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+			    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+				struct sctp_nets *netp;
+
+				/* only send SHUTDOWN the first time through */
+				if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
+					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+				}
+				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+				sctp_stop_timers_for_shutdown(stcb);
+				if (stcb->asoc.alternate) {
+					netp = stcb->asoc.alternate;
+				} else {
+					netp = stcb->asoc.primary_destination;
+				}
+				sctp_send_shutdown(stcb, netp);
+				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
+				                 netp);
+				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+				                 asoc->primary_destination);
+			}
+		} else {
+			/*-
+			 * we still got (or just got) data to send, so set
+			 * SHUTDOWN_PENDING
+			 */
+			/*-
+			 * XXX sockets draft says that SCTP_EOF should be
+			 * sent with no data.  currently, we will allow user
+			 * data to be sent first and move to
+			 * SHUTDOWN-PENDING
+			 */
+			if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+			    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
+			    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+				if (hold_tcblock == 0) {
+					SCTP_TCB_LOCK(stcb);
+					hold_tcblock = 1;
+				}
+				if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
+					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+				}
+				asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
+				if (TAILQ_EMPTY(&asoc->send_queue) &&
+				    TAILQ_EMPTY(&asoc->sent_queue) &&
+				    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+					struct mbuf *op_err;
+					char msg[SCTP_DIAG_INFO_LEN];
+
+				abort_anyway:
+					if (free_cnt_applied) {
+						atomic_add_int(&stcb->asoc.refcnt, -1);
+						free_cnt_applied = 0;
+					}
+					snprintf(msg, sizeof(msg),
+					         "%s:%d at %s", __FILE__, __LINE__, __func__);
+					op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+					                             msg);
+					sctp_abort_an_association(stcb->sctp_ep, stcb,
+					                          op_err, SCTP_SO_LOCKED);
+					/* now relock the stcb so everything is sane */
+					hold_tcblock = 0;
+					stcb = NULL;
+					goto out;
+				}
+				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+				                 asoc->primary_destination);
+				sctp_feature_off(inp, SCTP_PCB_FLAGS_NODELAY);
+			}
+		}
+	}
+skip_out_eof:
+	if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
+		some_on_control = 1;
+	}
+	if (queue_only_for_init) {
+		if (hold_tcblock == 0) {
+			SCTP_TCB_LOCK(stcb);
+			hold_tcblock = 1;
+		}
+		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
+			/* a collision took us forward? */
+			queue_only = 0;
+		} else {
+			sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+			SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
+			queue_only = 1;
+		}
+	}
+	if ((net->flight_size > net->cwnd) &&
+	    (stcb->asoc.sctp_cmt_on_off == 0)) {
+		SCTP_STAT_INCR(sctps_send_cwnd_avoid);
+		queue_only = 1;
+	} else if (asoc->ifp_had_enobuf) {
+		SCTP_STAT_INCR(sctps_ifnomemqueued);
+		if (net->flight_size > (2 * net->mtu)) {
+			queue_only = 1;
+		}
+		asoc->ifp_had_enobuf = 0;
+	}
+	un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
+	           (stcb->asoc.stream_queue_cnt * sizeof(struct sctp_data_chunk)));
+	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
+	    (stcb->asoc.total_flight > 0) &&
+	    (stcb->asoc.stream_queue_cnt < SCTP_MAX_DATA_BUNDLING) &&
+	    (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))) {
+		/*-
+		 * Ok, Nagle is set on and we have data outstanding.
+		 * Don't send anything and let SACKs drive out the
+		 * data unless wen have a "full" segment to send.
+		 */
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
+			sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
+		}
+		SCTP_STAT_INCR(sctps_naglequeued);
+		nagle_applies = 1;
+	} else {
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_NAGLE_LOGGING_ENABLE) {
+			if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
+				sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
+		}
+		SCTP_STAT_INCR(sctps_naglesent);
+		nagle_applies = 0;
+	}
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_BLK_LOGGING_ENABLE) {
+		sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only,
+		               nagle_applies, un_sent);
+		sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size,
+		               stcb->asoc.total_flight,
+		               stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
+	}
+	if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
+		/* we can attempt to send too. */
+		if (hold_tcblock == 0) {
+			/* If there is activity recv'ing sacks no need to send */
+			if (SCTP_TCB_TRYLOCK(stcb)) {
+				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
+				hold_tcblock = 1;
+			}
+		} else {
+			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
+		}
+	} else if ((queue_only == 0) &&
+	           (stcb->asoc.peers_rwnd == 0) &&
+	           (stcb->asoc.total_flight == 0)) {
+		/* We get to have a probe outstanding */
+		if (hold_tcblock == 0) {
+			hold_tcblock = 1;
+			SCTP_TCB_LOCK(stcb);
+		}
+		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND, SCTP_SO_LOCKED);
+	} else if (some_on_control) {
+		int num_out, reason, frag_point;
+
+		/* Here we do control only */
+		if (hold_tcblock == 0) {
+			hold_tcblock = 1;
+			SCTP_TCB_LOCK(stcb);
+		}
+		frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
+		(void)sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
+		                            &reason, 1, 1, &now, &now_filled, frag_point, SCTP_SO_LOCKED);
+	}
+	SCTPDBG(SCTP_DEBUG_OUTPUT1, "USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d err:%d\n",
+	        queue_only, stcb->asoc.peers_rwnd, un_sent,
+		stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
+	        stcb->asoc.total_output_queue_size, error);
+
+out:
+#if defined(__APPLE__)
+	sbunlock(&so->so_snd, 1);
+#endif
+out_unlocked:
+
+	if (local_soresv && stcb) {
+		atomic_subtract_int(&stcb->asoc.sb_send_resv, sndlen);
+	}
+	if (create_lock_applied) {
+		SCTP_ASOC_CREATE_UNLOCK(inp);
+	}
+	if ((stcb) && hold_tcblock) {
+		SCTP_TCB_UNLOCK(stcb);
+	}
+	if (stcb && free_cnt_applied) {
+		atomic_add_int(&stcb->asoc.refcnt, -1);
+	}
+#ifdef INVARIANTS
+#if defined(__FreeBSD__)
+	if (stcb) {
+		if (mtx_owned(&stcb->tcb_mtx)) {
+			panic("Leaving with tcb mtx owned?");
+		}
+		if (mtx_owned(&stcb->tcb_send_mtx)) {
+			panic("Leaving with tcb send mtx owned?");
+		}
+	}
+#endif
+#endif
+#ifdef __Panda__
+	/*
+	 * Handle the EAGAIN/ENOMEM cases to reattach the pak header
+	 * to particle when pak is passed in, so that caller
+	 * can try again with this pak
+	 *
+	 * NOTE: For other cases, including success case,
+	 * we simply want to return the header back to free
+	 * pool
+	 */
+	if (top) {
+		if ((error == EAGAIN) || (error == ENOMEM)) {
+			SCTP_ATTACH_CHAIN(i_pak, top, sndlen);
+			top = NULL;
+		} else {
+			(void)SCTP_RELEASE_HEADER(i_pak);
+		}
+	} else {
+		/* This is to handle cases when top has
+		 * been reset to NULL but pak might not
+		 * be freed
+		 */
+		if (i_pak) {
+			(void)SCTP_RELEASE_HEADER(i_pak);
+		}
+	}
+#endif
+	if (top) {
+		sctp_m_freem(top);
+	}
+	if (control) {
+		sctp_m_freem(control);
+	}
+	return (error);
+}
+
+
+/*
+ * generate an AUTHentication chunk, if required
+ */
+struct mbuf *
+sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
+    struct sctp_auth_chunk **auth_ret, uint32_t * offset,
+    struct sctp_tcb *stcb, uint8_t chunk)
+{
+	struct mbuf *m_auth;
+	struct sctp_auth_chunk *auth;
+	int chunk_len;
+	struct mbuf *cn;
+
+	if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
+	    (stcb == NULL))
+		return (m);
+
+	if (stcb->asoc.auth_supported == 0) {
+		return (m);
+	}
+	/* does the requested chunk require auth? */
+	if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
+		return (m);
+	}
+	m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_NOWAIT, 1, MT_HEADER);
+	if (m_auth == NULL) {
+		/* no mbuf's */
+		return (m);
+	}
+	/* reserve some space if this will be the first mbuf */
+	if (m == NULL)
+		SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
+	/* fill in the AUTH chunk details */
+	auth = mtod(m_auth, struct sctp_auth_chunk *);
+	bzero(auth, sizeof(*auth));
+	auth->ch.chunk_type = SCTP_AUTHENTICATION;
+	auth->ch.chunk_flags = 0;
+	chunk_len = sizeof(*auth) +
+	    sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
+	auth->ch.chunk_length = htons(chunk_len);
+	auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
+	/* key id and hmac digest will be computed and filled in upon send */
+
+	/* save the offset where the auth was inserted into the chain */
+	*offset = 0;
+	for (cn = m; cn; cn = SCTP_BUF_NEXT(cn)) {
+		*offset += SCTP_BUF_LEN(cn);
+	}
+
+	/* update length and return pointer to the auth chunk */
+	SCTP_BUF_LEN(m_auth) = chunk_len;
+	m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
+	if (auth_ret != NULL)
+		*auth_ret = auth;
+
+	return (m);
+}
+
+#if defined(__FreeBSD__)  || defined(__APPLE__)
+#ifdef INET6
+int
+sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
+{
+	struct nd_prefix *pfx = NULL;
+	struct nd_pfxrouter *pfxrtr = NULL;
+	struct sockaddr_in6 gw6;
+
+	if (ro == NULL || ro->ro_rt == NULL || src6->sin6_family != AF_INET6)
+		return (0);
+
+	/* get prefix entry of address */
+#if defined(__FreeBSD__)
+	ND6_RLOCK();
+#endif
+	LIST_FOREACH(pfx, &MODULE_GLOBAL(nd_prefix), ndpr_entry) {
+		if (pfx->ndpr_stateflags & NDPRF_DETACHED)
+			continue;
+		if (IN6_ARE_MASKED_ADDR_EQUAL(&pfx->ndpr_prefix.sin6_addr,
+		    &src6->sin6_addr, &pfx->ndpr_mask))
+			break;
+	}
+	/* no prefix entry in the prefix list */
+	if (pfx == NULL) {
+#if defined(__FreeBSD__)
+		ND6_RUNLOCK();
+#endif
+		SCTPDBG(SCTP_DEBUG_OUTPUT2, "No prefix entry for ");
+		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
+		return (0);
+	}
+
+	SCTPDBG(SCTP_DEBUG_OUTPUT2, "v6src_match_nexthop(), Prefix entry is ");
+	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)src6);
+
+	/* search installed gateway from prefix entry */
+	LIST_FOREACH(pfxrtr, &pfx->ndpr_advrtrs, pfr_entry) {
+		memset(&gw6, 0, sizeof(struct sockaddr_in6));
+		gw6.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+		gw6.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+		memcpy(&gw6.sin6_addr, &pfxrtr->router->rtaddr,
+		    sizeof(struct in6_addr));
+		SCTPDBG(SCTP_DEBUG_OUTPUT2, "prefix router is ");
+		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, (struct sockaddr *)&gw6);
+		SCTPDBG(SCTP_DEBUG_OUTPUT2, "installed router is ");
+		SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
+		if (sctp_cmpaddr((struct sockaddr *)&gw6, ro->ro_rt->rt_gateway)) {
+#if defined(__FreeBSD__)
+			ND6_RUNLOCK();
+#endif
+			SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is installed\n");
+			return (1);
+		}
+	}
+#if defined(__FreeBSD__)
+	ND6_RUNLOCK();
+#endif
+	SCTPDBG(SCTP_DEBUG_OUTPUT2, "pfxrouter is not installed\n");
+	return (0);
+}
+#endif
+
+int
+sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
+{
+#ifdef INET
+	struct sockaddr_in *sin, *mask;
+	struct ifaddr *ifa;
+	struct in_addr srcnetaddr, gwnetaddr;
+
+	if (ro == NULL || ro->ro_rt == NULL ||
+	    sifa->address.sa.sa_family != AF_INET) {
+		return (0);
+	}
+	ifa = (struct ifaddr *)sifa->ifa;
+	mask = (struct sockaddr_in *)(ifa->ifa_netmask);
+	sin = &sifa->address.sin;
+	srcnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
+	SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: src address is ");
+	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, &sifa->address.sa);
+	SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", srcnetaddr.s_addr);
+
+	sin = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
+	gwnetaddr.s_addr = (sin->sin_addr.s_addr & mask->sin_addr.s_addr);
+	SCTPDBG(SCTP_DEBUG_OUTPUT1, "match_nexthop4: nexthop is ");
+	SCTPDBG_ADDR(SCTP_DEBUG_OUTPUT2, ro->ro_rt->rt_gateway);
+	SCTPDBG(SCTP_DEBUG_OUTPUT1, "network address is %x\n", gwnetaddr.s_addr);
+	if (srcnetaddr.s_addr == gwnetaddr.s_addr) {
+		return (1);
+	}
+#endif
+	return (0);
+}
+#elif defined(__Userspace__)
+/* TODO __Userspace__ versions of sctp_vXsrc_match_nexthop(). */
+int
+sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro)
+{
+    return (0);
+}
+int
+sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro)
+{
+    return (0);
+}
+
+#endif
diff --git a/usrsctplib/netinet/sctp_output.h b/usrsctplib/netinet/sctp_output.h
new file mode 100755
index 0000000..fd628ba
--- /dev/null
+++ b/usrsctplib/netinet/sctp_output.h
@@ -0,0 +1,266 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_output.h 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_OUTPUT_H_
+#define _NETINET_SCTP_OUTPUT_H_
+
+#include <netinet/sctp_header.h>
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+
+struct mbuf *
+sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp,
+                           struct sctp_tcb *stcb,
+			   struct sctp_scoping *scope,
+			   struct mbuf *m_at,
+			   int cnt_inits_to,
+			   uint16_t *padding_len, uint16_t *chunk_len);
+
+
+int sctp_is_addr_restricted(struct sctp_tcb *, struct sctp_ifa *);
+
+
+int
+sctp_is_address_in_scope(struct sctp_ifa *ifa,
+                         struct sctp_scoping *scope,
+			 int do_update);
+
+int
+sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa);
+
+struct sctp_ifa *
+sctp_source_address_selection(struct sctp_inpcb *inp,
+			      struct sctp_tcb *stcb,
+			      sctp_route_t *ro, struct sctp_nets *net,
+			      int non_asoc_addr_ok, uint32_t vrf_id);
+
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
+int
+sctp_v6src_match_nexthop(struct sockaddr_in6 *src6, sctp_route_t *ro);
+int
+sctp_v4src_match_nexthop(struct sctp_ifa *sifa, sctp_route_t *ro);
+#endif
+
+void sctp_send_initiate(struct sctp_inpcb *, struct sctp_tcb *, int
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+    );
+
+void
+sctp_send_initiate_ack(struct sctp_inpcb *, struct sctp_tcb *,
+                       struct sctp_nets *, struct mbuf *,
+                       int, int,
+                       struct sockaddr *, struct sockaddr *,
+                       struct sctphdr *, struct sctp_init_chunk *,
+#if defined(__FreeBSD__)
+                       uint8_t, uint32_t,
+#endif
+                       uint32_t, uint16_t, int);
+
+struct mbuf *
+sctp_arethere_unrecognized_parameters(struct mbuf *, int, int *,
+				      struct sctp_chunkhdr *, int *);
+void sctp_queue_op_err(struct sctp_tcb *, struct mbuf *);
+
+int
+sctp_send_cookie_echo(struct mbuf *, int, struct sctp_tcb *,
+    struct sctp_nets *);
+
+void sctp_send_cookie_ack(struct sctp_tcb *);
+
+void
+sctp_send_heartbeat_ack(struct sctp_tcb *, struct mbuf *, int, int,
+    struct sctp_nets *);
+
+void
+sctp_remove_from_wheel(struct sctp_tcb *stcb,
+					   struct sctp_association *asoc,
+					   struct sctp_stream_out *strq, int holds_lock);
+
+
+void sctp_send_shutdown(struct sctp_tcb *, struct sctp_nets *);
+
+void sctp_send_shutdown_ack(struct sctp_tcb *, struct sctp_nets *);
+
+void sctp_send_shutdown_complete(struct sctp_tcb *, struct sctp_nets *, int);
+
+void sctp_send_shutdown_complete2(struct sockaddr *, struct sockaddr *,
+                                  struct sctphdr *,
+#if defined(__FreeBSD__)
+                                  uint8_t, uint32_t, uint16_t,
+#endif
+                                  uint32_t, uint16_t);
+
+void sctp_send_asconf(struct sctp_tcb *, struct sctp_nets *, int addr_locked);
+
+void sctp_send_asconf_ack(struct sctp_tcb *);
+
+int sctp_get_frag_point(struct sctp_tcb *, struct sctp_association *);
+
+void sctp_toss_old_cookies(struct sctp_tcb *, struct sctp_association *);
+
+void sctp_toss_old_asconf(struct sctp_tcb *);
+
+void sctp_fix_ecn_echo(struct sctp_association *);
+
+void sctp_move_chunks_from_net(struct sctp_tcb *stcb, struct sctp_nets *net);
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+int
+sctp_output(struct sctp_inpcb *, struct mbuf *, struct sockaddr *,
+    struct mbuf *, struct thread *, int);
+#elif defined(__Windows__)
+sctp_output(struct sctp_inpcb *, struct mbuf *, struct sockaddr *,
+    struct mbuf *, PKTHREAD, int);
+#else
+#if defined(__Userspace__)
+/* sctp_output is called bu sctp_sendm. Not using sctp_sendm for __Userspace__ */
+#endif
+int
+sctp_output(struct sctp_inpcb *,
+#if defined(__Panda__)
+    pakhandle_type,
+#else
+    struct mbuf *,
+#endif
+    struct sockaddr *,
+#if defined(__Panda__)
+    pakhandle_type,
+#else
+    struct mbuf *,
+#endif
+    struct proc *, int);
+#endif
+
+void sctp_chunk_output(struct sctp_inpcb *, struct sctp_tcb *, int, int
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+    );
+void sctp_send_abort_tcb(struct sctp_tcb *, struct mbuf *, int
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+    );
+
+void send_forward_tsn(struct sctp_tcb *, struct sctp_association *);
+
+void sctp_send_sack(struct sctp_tcb *, int);
+
+void sctp_send_hb(struct sctp_tcb *, struct sctp_nets *, int);
+
+void sctp_send_ecn_echo(struct sctp_tcb *, struct sctp_nets *, uint32_t);
+
+
+void
+sctp_send_packet_dropped(struct sctp_tcb *, struct sctp_nets *, struct mbuf *,
+    int, int, int);
+
+
+
+void sctp_send_cwr(struct sctp_tcb *, struct sctp_nets *, uint32_t, uint8_t);
+
+
+void
+sctp_add_stream_reset_result(struct sctp_tmit_chunk *, uint32_t, uint32_t);
+
+void
+sctp_send_deferred_reset_response(struct sctp_tcb *,
+				  struct sctp_stream_reset_list *,
+				  int);
+
+void
+sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *,
+                                 uint32_t, uint32_t, uint32_t, uint32_t);
+int
+sctp_send_stream_reset_out_if_possible(struct sctp_tcb *, int);
+
+int
+sctp_send_str_reset_req(struct sctp_tcb *, uint16_t , uint16_t *,
+                        uint8_t, uint8_t, uint8_t, uint16_t, uint16_t, uint8_t);
+
+void
+sctp_send_abort(struct mbuf *, int, struct sockaddr *, struct sockaddr *,
+                struct sctphdr *, uint32_t, struct mbuf *,
+#if defined(__FreeBSD__)
+                uint8_t, uint32_t, uint16_t,
+#endif
+                uint32_t, uint16_t);
+
+void sctp_send_operr_to(struct sockaddr *, struct sockaddr *,
+                        struct sctphdr *, uint32_t, struct mbuf *,
+#if defined(__FreeBSD__)
+                        uint8_t, uint32_t, uint16_t,
+#endif
+                        uint32_t, uint16_t);
+
+#endif /* _KERNEL || __Userspace__ */
+
+#if defined(_KERNEL) || defined(__Userspace__)
+int
+sctp_sosend(struct socket *so,
+    struct sockaddr *addr,
+    struct uio *uio,
+#ifdef __Panda__
+    pakhandle_type top,
+    pakhandle_type control,
+#else
+    struct mbuf *top,
+    struct mbuf *control,
+#endif
+#if defined(__APPLE__) || defined(__Panda__)
+    int flags
+#else
+    int flags,
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+    struct thread *p
+#elif defined(__Windows__)
+    PKTHREAD p
+#else
+#if defined(__Userspace__)
+    /* proc is a dummy in __Userspace__ and will not be passed to sctp_lower_sosend */
+#endif
+    struct proc *p
+#endif
+#endif
+);
+
+#endif
+#endif
+
diff --git a/usrsctplib/netinet/sctp_pcb.c b/usrsctplib/netinet/sctp_pcb.c
new file mode 100755
index 0000000..ec3ff6d
--- /dev/null
+++ b/usrsctplib/netinet/sctp_pcb.c
@@ -0,0 +1,8182 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_pcb.c 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#ifdef __FreeBSD__
+#include <sys/proc.h>
+#endif
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_bsd_addr.h>
+#if defined(__FreeBSD__) && __FreeBSD_version >= 1000000
+#include <netinet/sctp_dtrace_define.h>
+#endif
+#if defined(INET) || defined(INET6)
+#if !defined(__Userspace_os_Windows)
+#include <netinet/udp.h>
+#endif
+#endif
+#ifdef INET6
+#if defined(__Userspace__)
+#include "user_ip6_var.h"
+#else
+#include <netinet6/ip6_var.h>
+#endif
+#endif
+#if defined(__FreeBSD__)
+#include <sys/sched.h>
+#include <sys/smp.h>
+#include <sys/unistd.h>
+#endif
+#if defined(__Userspace__)
+#include <user_socketvar.h>
+#if !defined(__Userspace_os_Windows)
+#include <netdb.h>
+#endif
+#endif
+
+#if defined(__APPLE__)
+#define APPLE_FILE_NO 4
+#endif
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+VNET_DEFINE(struct sctp_base_info, system_base_info);
+#else
+struct sctp_base_info system_base_info;
+#endif
+
+/* FIX: we don't handle multiple link local scopes */
+/* "scopeless" replacement IN6_ARE_ADDR_EQUAL */
+#ifdef INET6
+int
+SCTP6_ARE_ADDR_EQUAL(struct sockaddr_in6 *a, struct sockaddr_in6 *b)
+{
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+#if defined(__APPLE__)
+	struct in6_addr tmp_a, tmp_b;
+
+	tmp_a = a->sin6_addr;
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+	if (in6_embedscope(&tmp_a, a, NULL, NULL) != 0) {
+#else
+	if (in6_embedscope(&tmp_a, a, NULL, NULL, NULL) != 0) {
+#endif
+		return (0);
+	}
+	tmp_b = b->sin6_addr;
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+	if (in6_embedscope(&tmp_b, b, NULL, NULL) != 0) {
+#else
+	if (in6_embedscope(&tmp_b, b, NULL, NULL, NULL) != 0) {
+#endif
+		return (0);
+	}
+	return (IN6_ARE_ADDR_EQUAL(&tmp_a, &tmp_b));
+#elif defined(SCTP_KAME)
+	struct sockaddr_in6 tmp_a, tmp_b;
+
+	memcpy(&tmp_a, a, sizeof(struct sockaddr_in6));
+	if (sa6_embedscope(&tmp_a, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
+		return (0);
+	}
+	memcpy(&tmp_b, b, sizeof(struct sockaddr_in6));
+	if (sa6_embedscope(&tmp_b, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
+		return (0);
+	}
+	return (IN6_ARE_ADDR_EQUAL(&tmp_a.sin6_addr, &tmp_b.sin6_addr));
+#else
+	struct in6_addr tmp_a, tmp_b;
+
+	tmp_a = a->sin6_addr;
+	if (in6_embedscope(&tmp_a, a) != 0) {
+		return (0);
+	}
+	tmp_b = b->sin6_addr;
+	if (in6_embedscope(&tmp_b, b) != 0) {
+		return (0);
+	}
+	return (IN6_ARE_ADDR_EQUAL(&tmp_a, &tmp_b));
+#endif
+#else
+	return (IN6_ARE_ADDR_EQUAL(&(a->sin6_addr), &(b->sin6_addr)));
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+}
+#endif
+
+void
+sctp_fill_pcbinfo(struct sctp_pcbinfo *spcb)
+{
+	/*
+	 * We really don't need to lock this, but I will just because it
+	 * does not hurt.
+	 */
+	SCTP_INP_INFO_RLOCK();
+	spcb->ep_count = SCTP_BASE_INFO(ipi_count_ep);
+	spcb->asoc_count = SCTP_BASE_INFO(ipi_count_asoc);
+	spcb->laddr_count = SCTP_BASE_INFO(ipi_count_laddr);
+	spcb->raddr_count = SCTP_BASE_INFO(ipi_count_raddr);
+	spcb->chk_count = SCTP_BASE_INFO(ipi_count_chunk);
+	spcb->readq_count = SCTP_BASE_INFO(ipi_count_readq);
+	spcb->stream_oque = SCTP_BASE_INFO(ipi_count_strmoq);
+	spcb->free_chunks = SCTP_BASE_INFO(ipi_free_chunks);
+	SCTP_INP_INFO_RUNLOCK();
+}
+
+/*-
+ * Addresses are added to VRF's (Virtual Router's). For BSD we
+ * have only the default VRF 0. We maintain a hash list of
+ * VRF's. Each VRF has its own list of sctp_ifn's. Each of
+ * these has a list of addresses. When we add a new address
+ * to a VRF we lookup the ifn/ifn_index, if the ifn does
+ * not exist we create it and add it to the list of IFN's
+ * within the VRF. Once we have the sctp_ifn, we add the
+ * address to the list. So we look something like:
+ *
+ * hash-vrf-table
+ *   vrf-> ifn-> ifn -> ifn
+ *   vrf    |
+ *    ...   +--ifa-> ifa -> ifa
+ *   vrf
+ *
+ * We keep these separate lists since the SCTP subsystem will
+ * point to these from its source address selection nets structure.
+ * When an address is deleted it does not happen right away on
+ * the SCTP side, it gets scheduled. What we do when a
+ * delete happens is immediately remove the address from
+ * the master list and decrement the refcount. As our
+ * addip iterator works through and frees the src address
+ * selection pointing to the sctp_ifa, eventually the refcount
+ * will reach 0 and we will delete it. Note that it is assumed
+ * that any locking on system level ifn/ifa is done at the
+ * caller of these functions and these routines will only
+ * lock the SCTP structures as they add or delete things.
+ *
+ * Other notes on VRF concepts.
+ *  - An endpoint can be in multiple VRF's
+ *  - An association lives within a VRF and only one VRF.
+ *  - Any incoming packet we can deduce the VRF for by
+ *    looking at the mbuf/pak inbound (for BSD its VRF=0 :D)
+ *  - Any downward send call or connect call must supply the
+ *    VRF via ancillary data or via some sort of set default
+ *    VRF socket option call (again for BSD no brainer since
+ *    the VRF is always 0).
+ *  - An endpoint may add multiple VRF's to it.
+ *  - Listening sockets can accept associations in any
+ *    of the VRF's they are in but the assoc will end up
+ *    in only one VRF (gotten from the packet or connect/send).
+ *
+ */
+
+struct sctp_vrf *
+sctp_allocate_vrf(int vrf_id)
+{
+	struct sctp_vrf *vrf = NULL;
+	struct sctp_vrflist *bucket;
+
+	/* First allocate the VRF structure */
+	vrf = sctp_find_vrf(vrf_id);
+	if (vrf) {
+		/* Already allocated */
+		return (vrf);
+	}
+	SCTP_MALLOC(vrf, struct sctp_vrf *, sizeof(struct sctp_vrf),
+		    SCTP_M_VRF);
+	if (vrf == NULL) {
+		/* No memory */
+#ifdef INVARIANTS
+		panic("No memory for VRF:%d", vrf_id);
+#endif
+		return (NULL);
+	}
+	/* setup the VRF */
+	memset(vrf, 0, sizeof(struct sctp_vrf));
+	vrf->vrf_id = vrf_id;
+	LIST_INIT(&vrf->ifnlist);
+	vrf->total_ifa_count = 0;
+	vrf->refcount = 0;
+	/* now also setup table ids */
+	SCTP_INIT_VRF_TABLEID(vrf);
+	/* Init the HASH of addresses */
+	vrf->vrf_addr_hash = SCTP_HASH_INIT(SCTP_VRF_ADDR_HASH_SIZE,
+					    &vrf->vrf_addr_hashmark);
+	if (vrf->vrf_addr_hash == NULL) {
+		/* No memory */
+#ifdef INVARIANTS
+		panic("No memory for VRF:%d", vrf_id);
+#endif
+		SCTP_FREE(vrf, SCTP_M_VRF);
+		return (NULL);
+	}
+
+	/* Add it to the hash table */
+	bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(vrf_id & SCTP_BASE_INFO(hashvrfmark))];
+	LIST_INSERT_HEAD(bucket, vrf, next_vrf);
+	atomic_add_int(&SCTP_BASE_INFO(ipi_count_vrfs), 1);
+	return (vrf);
+}
+
+
+struct sctp_ifn *
+sctp_find_ifn(void *ifn, uint32_t ifn_index)
+{
+	struct sctp_ifn *sctp_ifnp;
+	struct sctp_ifnlist *hash_ifn_head;
+
+	/* We assume the lock is held for the addresses
+	 * if that's wrong problems could occur :-)
+	 */
+	hash_ifn_head = &SCTP_BASE_INFO(vrf_ifn_hash)[(ifn_index & SCTP_BASE_INFO(vrf_ifn_hashmark))];
+	LIST_FOREACH(sctp_ifnp, hash_ifn_head, next_bucket) {
+		if (sctp_ifnp->ifn_index == ifn_index) {
+			return (sctp_ifnp);
+		}
+		if (sctp_ifnp->ifn_p && ifn && (sctp_ifnp->ifn_p == ifn)) {
+			return (sctp_ifnp);
+		}
+	}
+	return (NULL);
+}
+
+
+struct sctp_vrf *
+sctp_find_vrf(uint32_t vrf_id)
+{
+	struct sctp_vrflist *bucket;
+	struct sctp_vrf *liste;
+
+	bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(vrf_id & SCTP_BASE_INFO(hashvrfmark))];
+	LIST_FOREACH(liste, bucket, next_vrf) {
+		if (vrf_id == liste->vrf_id) {
+			return (liste);
+		}
+	}
+	return (NULL);
+}
+
+
+void
+sctp_free_vrf(struct sctp_vrf *vrf)
+{
+	if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&vrf->refcount)) {
+                if (vrf->vrf_addr_hash) {
+                    SCTP_HASH_FREE(vrf->vrf_addr_hash, vrf->vrf_addr_hashmark);
+                    vrf->vrf_addr_hash = NULL;
+                }
+		/* We zero'd the count */
+		LIST_REMOVE(vrf, next_vrf);
+		SCTP_FREE(vrf, SCTP_M_VRF);
+		atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_vrfs), 1);
+	}
+}
+
+
+void
+sctp_free_ifn(struct sctp_ifn *sctp_ifnp)
+{
+	if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&sctp_ifnp->refcount)) {
+		/* We zero'd the count */
+		if (sctp_ifnp->vrf) {
+			sctp_free_vrf(sctp_ifnp->vrf);
+		}
+		SCTP_FREE(sctp_ifnp, SCTP_M_IFN);
+		atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ifns), 1);
+	}
+}
+
+
+void
+sctp_update_ifn_mtu(uint32_t ifn_index, uint32_t mtu)
+{
+	struct sctp_ifn *sctp_ifnp;
+
+	sctp_ifnp = sctp_find_ifn((void *)NULL, ifn_index);
+	if (sctp_ifnp != NULL) {
+		sctp_ifnp->ifn_mtu = mtu;
+	}
+}
+
+
+void
+sctp_free_ifa(struct sctp_ifa *sctp_ifap)
+{
+	if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&sctp_ifap->refcount)) {
+		/* We zero'd the count */
+		if (sctp_ifap->ifn_p) {
+			sctp_free_ifn(sctp_ifap->ifn_p);
+		}
+		SCTP_FREE(sctp_ifap, SCTP_M_IFA);
+		atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ifas), 1);
+	}
+}
+
+
+static void
+sctp_delete_ifn(struct sctp_ifn *sctp_ifnp, int hold_addr_lock)
+{
+	struct sctp_ifn *found;
+
+	found = sctp_find_ifn(sctp_ifnp->ifn_p, sctp_ifnp->ifn_index);
+	if (found == NULL) {
+		/* Not in the list.. sorry */
+		return;
+	}
+	if (hold_addr_lock == 0)
+		SCTP_IPI_ADDR_WLOCK();
+	LIST_REMOVE(sctp_ifnp, next_bucket);
+	LIST_REMOVE(sctp_ifnp, next_ifn);
+	SCTP_DEREGISTER_INTERFACE(sctp_ifnp->ifn_index,
+				  sctp_ifnp->registered_af);
+	if (hold_addr_lock == 0)
+		SCTP_IPI_ADDR_WUNLOCK();
+	/* Take away the reference, and possibly free it */
+	sctp_free_ifn(sctp_ifnp);
+}
+
+
+void
+sctp_mark_ifa_addr_down(uint32_t vrf_id, struct sockaddr *addr,
+			const char *if_name, uint32_t ifn_index)
+{
+	struct sctp_vrf *vrf;
+	struct sctp_ifa *sctp_ifap;
+
+	SCTP_IPI_ADDR_RLOCK();
+	vrf = sctp_find_vrf(vrf_id);
+	if (vrf == NULL) {
+		SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id);
+		goto out;
+
+	}
+	sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED);
+	if (sctp_ifap == NULL) {
+		SCTPDBG(SCTP_DEBUG_PCB4, "Can't find sctp_ifap for address\n");
+		goto out;
+	}
+	if (sctp_ifap->ifn_p == NULL) {
+		SCTPDBG(SCTP_DEBUG_PCB4, "IFA has no IFN - can't mark unusable\n");
+		goto out;
+	}
+	if (if_name) {
+		if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, SCTP_IFNAMSIZ) != 0) {
+			SCTPDBG(SCTP_DEBUG_PCB4, "IFN %s of IFA not the same as %s\n",
+				sctp_ifap->ifn_p->ifn_name, if_name);
+			goto out;
+		}
+	} else {
+		if (sctp_ifap->ifn_p->ifn_index != ifn_index) {
+			SCTPDBG(SCTP_DEBUG_PCB4, "IFA owned by ifn_index:%d down command for ifn_index:%d - ignored\n",
+				sctp_ifap->ifn_p->ifn_index, ifn_index);
+			goto out;
+		}
+	}
+
+	sctp_ifap->localifa_flags &= (~SCTP_ADDR_VALID);
+	sctp_ifap->localifa_flags |= SCTP_ADDR_IFA_UNUSEABLE;
+ out:
+	SCTP_IPI_ADDR_RUNLOCK();
+}
+
+
+void
+sctp_mark_ifa_addr_up(uint32_t vrf_id, struct sockaddr *addr,
+		      const char *if_name, uint32_t ifn_index)
+{
+	struct sctp_vrf *vrf;
+	struct sctp_ifa *sctp_ifap;
+
+	SCTP_IPI_ADDR_RLOCK();
+	vrf = sctp_find_vrf(vrf_id);
+	if (vrf == NULL) {
+		SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id);
+		goto out;
+
+	}
+	sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED);
+	if (sctp_ifap == NULL) {
+		SCTPDBG(SCTP_DEBUG_PCB4, "Can't find sctp_ifap for address\n");
+		goto out;
+	}
+	if (sctp_ifap->ifn_p == NULL) {
+		SCTPDBG(SCTP_DEBUG_PCB4, "IFA has no IFN - can't mark unusable\n");
+		goto out;
+	}
+	if (if_name) {
+		if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, SCTP_IFNAMSIZ) != 0) {
+			SCTPDBG(SCTP_DEBUG_PCB4, "IFN %s of IFA not the same as %s\n",
+				sctp_ifap->ifn_p->ifn_name, if_name);
+			goto out;
+		}
+	} else {
+		if (sctp_ifap->ifn_p->ifn_index != ifn_index) {
+			SCTPDBG(SCTP_DEBUG_PCB4, "IFA owned by ifn_index:%d down command for ifn_index:%d - ignored\n",
+				sctp_ifap->ifn_p->ifn_index, ifn_index);
+			goto out;
+		}
+	}
+
+	sctp_ifap->localifa_flags &= (~SCTP_ADDR_IFA_UNUSEABLE);
+	sctp_ifap->localifa_flags |= SCTP_ADDR_VALID;
+ out:
+	SCTP_IPI_ADDR_RUNLOCK();
+}
+
+
+/*-
+ * Add an ifa to an ifn.
+ * Register the interface as necessary.
+ * NOTE: ADDR write lock MUST be held.
+ */
+static void
+sctp_add_ifa_to_ifn(struct sctp_ifn *sctp_ifnp, struct sctp_ifa *sctp_ifap)
+{
+	int ifa_af;
+
+	LIST_INSERT_HEAD(&sctp_ifnp->ifalist, sctp_ifap, next_ifa);
+	sctp_ifap->ifn_p = sctp_ifnp;
+	atomic_add_int(&sctp_ifap->ifn_p->refcount, 1);
+	/* update address counts */
+	sctp_ifnp->ifa_count++;
+	ifa_af = sctp_ifap->address.sa.sa_family;
+	switch (ifa_af) {
+#ifdef INET
+	case AF_INET:
+		sctp_ifnp->num_v4++;
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		sctp_ifnp->num_v6++;
+		break;
+#endif
+	default:
+		break;
+	}
+	if (sctp_ifnp->ifa_count == 1) {
+		/* register the new interface */
+		SCTP_REGISTER_INTERFACE(sctp_ifnp->ifn_index, ifa_af);
+		sctp_ifnp->registered_af = ifa_af;
+	}
+}
+
+
+/*-
+ * Remove an ifa from its ifn.
+ * If no more addresses exist, remove the ifn too. Otherwise, re-register
+ * the interface based on the remaining address families left.
+ * NOTE: ADDR write lock MUST be held.
+ */
+static void
+sctp_remove_ifa_from_ifn(struct sctp_ifa *sctp_ifap)
+{
+	LIST_REMOVE(sctp_ifap, next_ifa);
+	if (sctp_ifap->ifn_p) {
+		/* update address counts */
+		sctp_ifap->ifn_p->ifa_count--;
+		switch (sctp_ifap->address.sa.sa_family) {
+#ifdef INET
+		case AF_INET:
+			sctp_ifap->ifn_p->num_v4--;
+			break;
+#endif
+#ifdef INET6
+		case AF_INET6:
+			sctp_ifap->ifn_p->num_v6--;
+			break;
+#endif
+		default:
+			break;
+		}
+
+		if (LIST_EMPTY(&sctp_ifap->ifn_p->ifalist)) {
+			/* remove the ifn, possibly freeing it */
+			sctp_delete_ifn(sctp_ifap->ifn_p, SCTP_ADDR_LOCKED);
+		} else {
+			/* re-register address family type, if needed */
+			if ((sctp_ifap->ifn_p->num_v6 == 0) &&
+			    (sctp_ifap->ifn_p->registered_af == AF_INET6)) {
+				SCTP_DEREGISTER_INTERFACE(sctp_ifap->ifn_p->ifn_index, AF_INET6);
+				SCTP_REGISTER_INTERFACE(sctp_ifap->ifn_p->ifn_index, AF_INET);
+				sctp_ifap->ifn_p->registered_af = AF_INET;
+			} else if ((sctp_ifap->ifn_p->num_v4 == 0) &&
+				   (sctp_ifap->ifn_p->registered_af == AF_INET)) {
+				SCTP_DEREGISTER_INTERFACE(sctp_ifap->ifn_p->ifn_index, AF_INET);
+				SCTP_REGISTER_INTERFACE(sctp_ifap->ifn_p->ifn_index, AF_INET6);
+				sctp_ifap->ifn_p->registered_af = AF_INET6;
+			}
+			/* free the ifn refcount */
+			sctp_free_ifn(sctp_ifap->ifn_p);
+		}
+		sctp_ifap->ifn_p = NULL;
+	}
+}
+
+
+struct sctp_ifa *
+sctp_add_addr_to_vrf(uint32_t vrf_id, void *ifn, uint32_t ifn_index,
+		     uint32_t ifn_type, const char *if_name, void *ifa,
+		     struct sockaddr *addr, uint32_t ifa_flags,
+		     int dynamic_add)
+{
+	struct sctp_vrf *vrf;
+	struct sctp_ifn *sctp_ifnp = NULL;
+	struct sctp_ifa *sctp_ifap = NULL;
+	struct sctp_ifalist *hash_addr_head;
+	struct sctp_ifnlist *hash_ifn_head;
+	uint32_t hash_of_addr;
+	int new_ifn_af = 0;
+
+#ifdef SCTP_DEBUG
+	SCTPDBG(SCTP_DEBUG_PCB4, "vrf_id 0x%x: adding address: ", vrf_id);
+	SCTPDBG_ADDR(SCTP_DEBUG_PCB4, addr);
+#endif
+	SCTP_IPI_ADDR_WLOCK();
+	sctp_ifnp = sctp_find_ifn(ifn, ifn_index);
+	if (sctp_ifnp) {
+		vrf = sctp_ifnp->vrf;
+	} else {
+		vrf = sctp_find_vrf(vrf_id);
+		if (vrf == NULL) {
+			vrf = sctp_allocate_vrf(vrf_id);
+			if (vrf == NULL) {
+				SCTP_IPI_ADDR_WUNLOCK();
+				return (NULL);
+			}
+		}
+	}
+	if (sctp_ifnp == NULL) {
+		/* build one and add it, can't hold lock
+		 * until after malloc done though.
+		 */
+		SCTP_IPI_ADDR_WUNLOCK();
+		SCTP_MALLOC(sctp_ifnp, struct sctp_ifn *,
+			    sizeof(struct sctp_ifn), SCTP_M_IFN);
+		if (sctp_ifnp == NULL) {
+#ifdef INVARIANTS
+			panic("No memory for IFN");
+#endif
+			return (NULL);
+		}
+		memset(sctp_ifnp, 0, sizeof(struct sctp_ifn));
+		sctp_ifnp->ifn_index = ifn_index;
+		sctp_ifnp->ifn_p = ifn;
+		sctp_ifnp->ifn_type = ifn_type;
+		sctp_ifnp->refcount = 0;
+		sctp_ifnp->vrf = vrf;
+		atomic_add_int(&vrf->refcount, 1);
+		sctp_ifnp->ifn_mtu = SCTP_GATHER_MTU_FROM_IFN_INFO(ifn, ifn_index, addr->sa_family);
+		if (if_name != NULL) {
+			snprintf(sctp_ifnp->ifn_name, SCTP_IFNAMSIZ, "%s", if_name);
+		} else {
+			snprintf(sctp_ifnp->ifn_name, SCTP_IFNAMSIZ, "%s", "unknown");
+		}
+		hash_ifn_head = &SCTP_BASE_INFO(vrf_ifn_hash)[(ifn_index & SCTP_BASE_INFO(vrf_ifn_hashmark))];
+		LIST_INIT(&sctp_ifnp->ifalist);
+		SCTP_IPI_ADDR_WLOCK();
+		LIST_INSERT_HEAD(hash_ifn_head, sctp_ifnp, next_bucket);
+		LIST_INSERT_HEAD(&vrf->ifnlist, sctp_ifnp, next_ifn);
+		atomic_add_int(&SCTP_BASE_INFO(ipi_count_ifns), 1);
+		new_ifn_af = 1;
+	}
+	sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED);
+	if (sctp_ifap) {
+		/* Hmm, it already exists? */
+		if ((sctp_ifap->ifn_p) &&
+		    (sctp_ifap->ifn_p->ifn_index == ifn_index)) {
+			SCTPDBG(SCTP_DEBUG_PCB4, "Using existing ifn %s (0x%x) for ifa %p\n",
+				sctp_ifap->ifn_p->ifn_name, ifn_index,
+				(void *)sctp_ifap);
+			if (new_ifn_af) {
+				/* Remove the created one that we don't want */
+				sctp_delete_ifn(sctp_ifnp, SCTP_ADDR_LOCKED);
+			}
+			if (sctp_ifap->localifa_flags & SCTP_BEING_DELETED) {
+				/* easy to solve, just switch back to active */
+				SCTPDBG(SCTP_DEBUG_PCB4, "Clearing deleted ifa flag\n");
+				sctp_ifap->localifa_flags = SCTP_ADDR_VALID;
+				sctp_ifap->ifn_p = sctp_ifnp;
+				atomic_add_int(&sctp_ifap->ifn_p->refcount, 1);
+			}
+		exit_stage_left:
+			SCTP_IPI_ADDR_WUNLOCK();
+			return (sctp_ifap);
+		} else {
+			if (sctp_ifap->ifn_p) {
+				/*
+				 * The last IFN gets the address, remove the
+				 * old one
+				 */
+				SCTPDBG(SCTP_DEBUG_PCB4, "Moving ifa %p from %s (0x%x) to %s (0x%x)\n",
+					(void *)sctp_ifap, sctp_ifap->ifn_p->ifn_name,
+					sctp_ifap->ifn_p->ifn_index, if_name,
+					ifn_index);
+				/* remove the address from the old ifn */
+				sctp_remove_ifa_from_ifn(sctp_ifap);
+				/* move the address over to the new ifn */
+				sctp_add_ifa_to_ifn(sctp_ifnp, sctp_ifap);
+				goto exit_stage_left;
+			} else {
+				/* repair ifnp which was NULL ? */
+				sctp_ifap->localifa_flags = SCTP_ADDR_VALID;
+				SCTPDBG(SCTP_DEBUG_PCB4, "Repairing ifn %p for ifa %p\n",
+					(void *)sctp_ifnp, (void *)sctp_ifap);
+				sctp_add_ifa_to_ifn(sctp_ifnp, sctp_ifap);
+			}
+			goto exit_stage_left;
+		}
+	}
+	SCTP_IPI_ADDR_WUNLOCK();
+	SCTP_MALLOC(sctp_ifap, struct sctp_ifa *, sizeof(struct sctp_ifa), SCTP_M_IFA);
+	if (sctp_ifap == NULL) {
+#ifdef INVARIANTS
+		panic("No memory for IFA");
+#endif
+		return (NULL);
+	}
+	memset(sctp_ifap, 0, sizeof(struct sctp_ifa));
+	sctp_ifap->ifn_p = sctp_ifnp;
+	atomic_add_int(&sctp_ifnp->refcount, 1);
+	sctp_ifap->vrf_id = vrf_id;
+	sctp_ifap->ifa = ifa;
+#ifdef HAVE_SA_LEN
+	memcpy(&sctp_ifap->address, addr, addr->sa_len);
+#else
+	switch (addr->sa_family) {
+#ifdef INET
+	case AF_INET:
+		memcpy(&sctp_ifap->address, addr, sizeof(struct sockaddr_in));
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		memcpy(&sctp_ifap->address, addr, sizeof(struct sockaddr_in6));
+		break;
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+		memcpy(&sctp_ifap->address, addr, sizeof(struct sockaddr_conn));
+		break;
+#endif
+	default:
+		/* TSNH */
+		break;
+	}
+#endif
+	sctp_ifap->localifa_flags = SCTP_ADDR_VALID | SCTP_ADDR_DEFER_USE;
+	sctp_ifap->flags = ifa_flags;
+	/* Set scope */
+	switch (sctp_ifap->address.sa.sa_family) {
+#ifdef INET
+	case AF_INET:
+	{
+		struct sockaddr_in *sin;
+
+		sin = &sctp_ifap->address.sin;
+		if (SCTP_IFN_IS_IFT_LOOP(sctp_ifap->ifn_p) ||
+		    (IN4_ISLOOPBACK_ADDRESS(&sin->sin_addr))) {
+			sctp_ifap->src_is_loop = 1;
+		}
+		if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+			sctp_ifap->src_is_priv = 1;
+		}
+		sctp_ifnp->num_v4++;
+		if (new_ifn_af)
+		    new_ifn_af = AF_INET;
+		break;
+	}
+#endif
+#ifdef INET6
+	case AF_INET6:
+	{
+		/* ok to use deprecated addresses? */
+		struct sockaddr_in6 *sin6;
+
+		sin6 = &sctp_ifap->address.sin6;
+		if (SCTP_IFN_IS_IFT_LOOP(sctp_ifap->ifn_p) ||
+		    (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr))) {
+			sctp_ifap->src_is_loop = 1;
+		}
+		if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+			sctp_ifap->src_is_priv = 1;
+		}
+		sctp_ifnp->num_v6++;
+		if (new_ifn_af)
+			new_ifn_af = AF_INET6;
+		break;
+	}
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+		if (new_ifn_af)
+			new_ifn_af = AF_CONN;
+		break;
+#endif
+	default:
+		new_ifn_af = 0;
+		break;
+	}
+	hash_of_addr = sctp_get_ifa_hash_val(&sctp_ifap->address.sa);
+
+	if ((sctp_ifap->src_is_priv == 0) &&
+	    (sctp_ifap->src_is_loop == 0)) {
+		sctp_ifap->src_is_glob = 1;
+	}
+	SCTP_IPI_ADDR_WLOCK();
+	hash_addr_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
+	LIST_INSERT_HEAD(hash_addr_head, sctp_ifap, next_bucket);
+	sctp_ifap->refcount = 1;
+	LIST_INSERT_HEAD(&sctp_ifnp->ifalist, sctp_ifap, next_ifa);
+	sctp_ifnp->ifa_count++;
+	vrf->total_ifa_count++;
+	atomic_add_int(&SCTP_BASE_INFO(ipi_count_ifas), 1);
+	if (new_ifn_af) {
+		SCTP_REGISTER_INTERFACE(ifn_index, new_ifn_af);
+		sctp_ifnp->registered_af = new_ifn_af;
+	}
+	SCTP_IPI_ADDR_WUNLOCK();
+	if (dynamic_add) {
+		/* Bump up the refcount so that when the timer
+		 * completes it will drop back down.
+		 */
+		struct sctp_laddr *wi;
+
+		atomic_add_int(&sctp_ifap->refcount, 1);
+		wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+		if (wi == NULL) {
+			/*
+			 * Gak, what can we do? We have lost an address
+			 * change can you say HOSED?
+			 */
+			SCTPDBG(SCTP_DEBUG_PCB4, "Lost an address change?\n");
+			/* Opps, must decrement the count */
+			sctp_del_addr_from_vrf(vrf_id, addr, ifn_index,
+					       if_name);
+			return (NULL);
+		}
+		SCTP_INCR_LADDR_COUNT();
+		bzero(wi, sizeof(*wi));
+		(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
+		wi->ifa = sctp_ifap;
+		wi->action = SCTP_ADD_IP_ADDRESS;
+
+		SCTP_WQ_ADDR_LOCK();
+		LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
+		SCTP_WQ_ADDR_UNLOCK();
+
+		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
+				 (struct sctp_inpcb *)NULL,
+				 (struct sctp_tcb *)NULL,
+				 (struct sctp_nets *)NULL);
+	} else {
+		/* it's ready for use */
+		sctp_ifap->localifa_flags &= ~SCTP_ADDR_DEFER_USE;
+	}
+	return (sctp_ifap);
+}
+
+void
+sctp_del_addr_from_vrf(uint32_t vrf_id, struct sockaddr *addr,
+		       uint32_t ifn_index, const char *if_name)
+{
+	struct sctp_vrf *vrf;
+	struct sctp_ifa *sctp_ifap = NULL;
+
+	SCTP_IPI_ADDR_WLOCK();
+	vrf = sctp_find_vrf(vrf_id);
+	if (vrf == NULL) {
+		SCTPDBG(SCTP_DEBUG_PCB4, "Can't find vrf_id 0x%x\n", vrf_id);
+		goto out_now;
+	}
+
+#ifdef SCTP_DEBUG
+	SCTPDBG(SCTP_DEBUG_PCB4, "vrf_id 0x%x: deleting address:", vrf_id);
+	SCTPDBG_ADDR(SCTP_DEBUG_PCB4, addr);
+#endif
+	sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, SCTP_ADDR_LOCKED);
+	if (sctp_ifap) {
+		/* Validate the delete */
+		if (sctp_ifap->ifn_p) {
+			int valid = 0;
+			/*-
+			 * The name has priority over the ifn_index
+			 * if its given. We do this especially for
+			 * panda who might recycle indexes fast.
+			 */
+			if (if_name) {
+				if (strncmp(if_name, sctp_ifap->ifn_p->ifn_name, SCTP_IFNAMSIZ) == 0) {
+					/* They match its a correct delete */
+					valid = 1;
+				}
+			}
+			if (!valid) {
+				/* last ditch check ifn_index */
+				if (ifn_index == sctp_ifap->ifn_p->ifn_index) {
+					valid = 1;
+				}
+			}
+			if (!valid) {
+				SCTPDBG(SCTP_DEBUG_PCB4, "ifn:%d ifname:%s does not match addresses\n",
+					ifn_index, ((if_name == NULL) ? "NULL" : if_name));
+				SCTPDBG(SCTP_DEBUG_PCB4, "ifn:%d ifname:%s - ignoring delete\n",
+					sctp_ifap->ifn_p->ifn_index, sctp_ifap->ifn_p->ifn_name);
+				SCTP_IPI_ADDR_WUNLOCK();
+				return;
+			}
+		}
+		SCTPDBG(SCTP_DEBUG_PCB4, "Deleting ifa %p\n", (void *)sctp_ifap);
+		sctp_ifap->localifa_flags &= SCTP_ADDR_VALID;
+                /*
+		 * We don't set the flag. This means that the structure will
+		 * hang around in EP's that have bound specific to it until
+		 * they close. This gives us TCP like behavior if someone
+		 * removes an address (or for that matter adds it right back).
+		 */
+		/* sctp_ifap->localifa_flags |= SCTP_BEING_DELETED; */
+		vrf->total_ifa_count--;
+		LIST_REMOVE(sctp_ifap, next_bucket);
+		sctp_remove_ifa_from_ifn(sctp_ifap);
+	}
+#ifdef SCTP_DEBUG
+	else {
+		SCTPDBG(SCTP_DEBUG_PCB4, "Del Addr-ifn:%d Could not find address:",
+			ifn_index);
+		SCTPDBG_ADDR(SCTP_DEBUG_PCB1, addr);
+	}
+#endif
+
+ out_now:
+	SCTP_IPI_ADDR_WUNLOCK();
+	if (sctp_ifap) {
+		struct sctp_laddr *wi;
+
+		wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+		if (wi == NULL) {
+			/*
+			 * Gak, what can we do? We have lost an address
+			 * change can you say HOSED?
+			 */
+			SCTPDBG(SCTP_DEBUG_PCB4, "Lost an address change?\n");
+
+			/* Oops, must decrement the count */
+			sctp_free_ifa(sctp_ifap);
+			return;
+		}
+		SCTP_INCR_LADDR_COUNT();
+		bzero(wi, sizeof(*wi));
+		(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
+		wi->ifa = sctp_ifap;
+		wi->action = SCTP_DEL_IP_ADDRESS;
+		SCTP_WQ_ADDR_LOCK();
+		/*
+		 * Should this really be a tailq? As it is we will process the
+		 * newest first :-0
+		 */
+		LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
+		SCTP_WQ_ADDR_UNLOCK();
+
+		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
+				 (struct sctp_inpcb *)NULL,
+				 (struct sctp_tcb *)NULL,
+				 (struct sctp_nets *)NULL);
+	}
+	return;
+}
+
+
+static int
+sctp_does_stcb_own_this_addr(struct sctp_tcb *stcb, struct sockaddr *to)
+{
+	int loopback_scope;
+#if defined(INET)
+	int ipv4_local_scope, ipv4_addr_legal;
+#endif
+#if defined(INET6)
+	int local_scope, site_scope, ipv6_addr_legal;
+#endif
+#if defined(__Userspace__)
+	int conn_addr_legal;
+#endif
+	struct sctp_vrf *vrf;
+	struct sctp_ifn *sctp_ifn;
+	struct sctp_ifa *sctp_ifa;
+
+	loopback_scope = stcb->asoc.scope.loopback_scope;
+#if defined(INET)
+	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
+	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
+#endif
+#if defined(INET6)
+	local_scope = stcb->asoc.scope.local_scope;
+	site_scope = stcb->asoc.scope.site_scope;
+	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
+#endif
+#if defined(__Userspace__)
+	conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
+#endif
+
+	SCTP_IPI_ADDR_RLOCK();
+	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
+	if (vrf == NULL) {
+		/* no vrf, no addresses */
+		SCTP_IPI_ADDR_RUNLOCK();
+		return (0);
+	}
+
+	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+			if ((loopback_scope == 0) &&
+			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+				continue;
+			}
+			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+				if (sctp_is_addr_restricted(stcb, sctp_ifa) &&
+				    (!sctp_is_addr_pending(stcb, sctp_ifa))) {
+					/* We allow pending addresses, where we
+					 * have sent an asconf-add to be considered
+					 * valid.
+					 */
+					continue;
+				}
+				if (sctp_ifa->address.sa.sa_family != to->sa_family) {
+					continue;
+				}
+				switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+				case AF_INET:
+					if (ipv4_addr_legal) {
+						struct sockaddr_in *sin, *rsin;
+
+						sin = &sctp_ifa->address.sin;
+						rsin = (struct sockaddr_in *)to;
+						if ((ipv4_local_scope == 0) &&
+						    IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+							continue;
+						}
+#if defined(__FreeBSD__)
+						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
+						                     &sin->sin_addr) != 0) {
+							continue;
+						}
+#endif
+						if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) {
+							SCTP_IPI_ADDR_RUNLOCK();
+							return (1);
+						}
+					}
+					break;
+#endif
+#ifdef INET6
+				case AF_INET6:
+					if (ipv6_addr_legal) {
+						struct sockaddr_in6 *sin6, *rsin6;
+#if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME)
+						struct sockaddr_in6 lsa6;
+#endif
+						sin6 = &sctp_ifa->address.sin6;
+						rsin6 = (struct sockaddr_in6 *)to;
+#if defined(__FreeBSD__)
+						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
+						                     &sin6->sin6_addr) != 0) {
+							continue;
+						}
+#endif
+						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+							if (local_scope == 0)
+								continue;
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+							if (sin6->sin6_scope_id == 0) {
+#ifdef SCTP_KAME
+								if (sa6_recoverscope(sin6) != 0)
+									continue;
+#else
+								lsa6 = *sin6;
+								if (in6_recoverscope(&lsa6,
+								                     &lsa6.sin6_addr,
+								                     NULL))
+									continue;
+								sin6 = &lsa6;
+#endif /* SCTP_KAME */
+							}
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+						}
+						if ((site_scope == 0) &&
+						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
+							continue;
+						}
+						if (SCTP6_ARE_ADDR_EQUAL(sin6, rsin6)) {
+							SCTP_IPI_ADDR_RUNLOCK();
+							return (1);
+						}
+					}
+					break;
+#endif
+#if defined(__Userspace__)
+				case AF_CONN:
+					if (conn_addr_legal) {
+						struct sockaddr_conn *sconn, *rsconn;
+
+						sconn = &sctp_ifa->address.sconn;
+						rsconn = (struct sockaddr_conn *)to;
+						if (sconn->sconn_addr == rsconn->sconn_addr) {
+							SCTP_IPI_ADDR_RUNLOCK();
+							return (1);
+						}
+					}
+					break;
+#endif
+				default:
+					/* TSNH */
+					break;
+				}
+			}
+		}
+	} else {
+		struct sctp_laddr *laddr;
+
+		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, sctp_nxt_addr) {
+			if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
+				SCTPDBG(SCTP_DEBUG_PCB1, "ifa being deleted\n");
+				continue;
+			}
+			if (sctp_is_addr_restricted(stcb, laddr->ifa) &&
+			    (!sctp_is_addr_pending(stcb, laddr->ifa))) {
+				/* We allow pending addresses, where we
+				 * have sent an asconf-add to be considered
+				 * valid.
+				 */
+				continue;
+			}
+			if (laddr->ifa->address.sa.sa_family != to->sa_family) {
+				continue;
+			}
+			switch (to->sa_family) {
+#ifdef INET
+			case AF_INET:
+			{
+				struct sockaddr_in *sin, *rsin;
+
+				sin = &laddr->ifa->address.sin;
+				rsin = (struct sockaddr_in *)to;
+				if (sin->sin_addr.s_addr == rsin->sin_addr.s_addr) {
+					SCTP_IPI_ADDR_RUNLOCK();
+					return (1);
+				}
+				break;
+			}
+#endif
+#ifdef INET6
+			case AF_INET6:
+			{
+				struct sockaddr_in6 *sin6, *rsin6;
+
+				sin6 = &laddr->ifa->address.sin6;
+				rsin6 = (struct sockaddr_in6 *)to;
+				if (SCTP6_ARE_ADDR_EQUAL(sin6, rsin6)) {
+					SCTP_IPI_ADDR_RUNLOCK();
+					return (1);
+				}
+				break;
+			}
+
+#endif
+#if defined(__Userspace__)
+			case AF_CONN:
+			{
+				struct sockaddr_conn *sconn, *rsconn;
+
+				sconn = &laddr->ifa->address.sconn;
+				rsconn = (struct sockaddr_conn *)to;
+				if (sconn->sconn_addr == rsconn->sconn_addr) {
+					SCTP_IPI_ADDR_RUNLOCK();
+					return (1);
+				}
+				break;
+			}
+#endif
+			default:
+				/* TSNH */
+				break;
+			}
+
+		}
+	}
+	SCTP_IPI_ADDR_RUNLOCK();
+	return (0);
+}
+
+
+static struct sctp_tcb *
+sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from,
+    struct sockaddr *to, struct sctp_nets **netp, uint32_t vrf_id)
+{
+	/**** ASSUMES THE CALLER holds the INP_INFO_RLOCK */
+	/*
+	 * If we support the TCP model, then we must now dig through to see
+	 * if we can find our endpoint in the list of tcp ep's.
+	 */
+	uint16_t lport, rport;
+	struct sctppcbhead *ephead;
+	struct sctp_inpcb *inp;
+	struct sctp_laddr *laddr;
+	struct sctp_tcb *stcb;
+	struct sctp_nets *net;
+#ifdef SCTP_MVRF
+	int fnd, i;
+#endif
+
+	if ((to == NULL) || (from == NULL)) {
+		return (NULL);
+	}
+
+	switch (to->sa_family) {
+#ifdef INET
+	case AF_INET:
+		if (from->sa_family == AF_INET) {
+			lport = ((struct sockaddr_in *)to)->sin_port;
+			rport = ((struct sockaddr_in *)from)->sin_port;
+		} else {
+			return (NULL);
+		}
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		if (from->sa_family == AF_INET6) {
+			lport = ((struct sockaddr_in6 *)to)->sin6_port;
+			rport = ((struct sockaddr_in6 *)from)->sin6_port;
+		} else {
+			return (NULL);
+		}
+		break;
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+		if (from->sa_family == AF_CONN) {
+			lport = ((struct sockaddr_conn *)to)->sconn_port;
+			rport = ((struct sockaddr_conn *)from)->sconn_port;
+		} else {
+			return (NULL);
+		}
+		break;
+#endif
+	default:
+		return (NULL);
+	}
+	ephead = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR((lport | rport), SCTP_BASE_INFO(hashtcpmark))];
+	/*
+	 * Ok now for each of the guys in this bucket we must look and see:
+	 * - Does the remote port match. - Does there single association's
+	 * addresses match this address (to). If so we update p_ep to point
+	 * to this ep and return the tcb from it.
+	 */
+	LIST_FOREACH(inp, ephead, sctp_hash) {
+		SCTP_INP_RLOCK(inp);
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+			SCTP_INP_RUNLOCK(inp);
+			continue;
+		}
+		if (lport != inp->sctp_lport) {
+			SCTP_INP_RUNLOCK(inp);
+			continue;
+		}
+#if defined(__FreeBSD__)
+		switch (to->sa_family) {
+#ifdef INET
+		case AF_INET:
+		{
+			struct sockaddr_in *sin;
+
+			sin = (struct sockaddr_in *)to;
+			if (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+			                     &sin->sin_addr) != 0) {
+				SCTP_INP_RUNLOCK(inp);
+				continue;
+			}
+			break;
+		}
+#endif
+#ifdef INET6
+		case AF_INET6:
+		{
+			struct sockaddr_in6 *sin6;
+
+			sin6 = (struct sockaddr_in6 *)to;
+			if (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+			                     &sin6->sin6_addr) != 0) {
+				SCTP_INP_RUNLOCK(inp);
+				continue;
+			}
+			break;
+		}
+#endif
+		default:
+			SCTP_INP_RUNLOCK(inp);
+			continue;
+		}
+#endif
+#ifdef SCTP_MVRF
+		fnd = 0;
+		for (i = 0; i < inp->num_vrfs; i++) {
+			if (inp->m_vrf_ids[i] == vrf_id) {
+				fnd = 1;
+				break;
+			}
+		}
+		if (fnd == 0) {
+			SCTP_INP_RUNLOCK(inp);
+			continue;
+		}
+#else
+		if (inp->def_vrf_id != vrf_id) {
+			SCTP_INP_RUNLOCK(inp);
+			continue;
+		}
+#endif
+		/* check to see if the ep has one of the addresses */
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+			/* We are NOT bound all, so look further */
+			int match = 0;
+
+			LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+
+				if (laddr->ifa == NULL) {
+					SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n", __func__);
+					continue;
+				}
+				if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
+					SCTPDBG(SCTP_DEBUG_PCB1, "ifa being deleted\n");
+					continue;
+				}
+				if (laddr->ifa->address.sa.sa_family ==
+				    to->sa_family) {
+					/* see if it matches */
+#ifdef INET
+					if (from->sa_family == AF_INET) {
+						struct sockaddr_in *intf_addr, *sin;
+
+						intf_addr = &laddr->ifa->address.sin;
+						sin = (struct sockaddr_in *)to;
+						if (sin->sin_addr.s_addr ==
+						    intf_addr->sin_addr.s_addr) {
+							match = 1;
+							break;
+						}
+					}
+#endif
+#ifdef INET6
+					if (from->sa_family == AF_INET6) {
+						struct sockaddr_in6 *intf_addr6;
+						struct sockaddr_in6 *sin6;
+
+						sin6 = (struct sockaddr_in6 *)
+						    to;
+						intf_addr6 = &laddr->ifa->address.sin6;
+
+						if (SCTP6_ARE_ADDR_EQUAL(sin6,
+						    intf_addr6)) {
+							match = 1;
+							break;
+						}
+					}
+#endif
+#if defined(__Userspace__)
+					if (from->sa_family == AF_CONN) {
+						struct sockaddr_conn *intf_addr, *sconn;
+
+						intf_addr = &laddr->ifa->address.sconn;
+						sconn = (struct sockaddr_conn *)to;
+						if (sconn->sconn_addr ==
+						    intf_addr->sconn_addr) {
+							match = 1;
+							break;
+						}
+					}
+#endif
+				}
+			}
+			if (match == 0) {
+				/* This endpoint does not have this address */
+				SCTP_INP_RUNLOCK(inp);
+				continue;
+			}
+		}
+		/*
+		 * Ok if we hit here the ep has the address, does it hold
+		 * the tcb?
+		 */
+		/* XXX: Why don't we TAILQ_FOREACH through sctp_asoc_list? */
+		stcb = LIST_FIRST(&inp->sctp_asoc_list);
+		if (stcb == NULL) {
+			SCTP_INP_RUNLOCK(inp);
+			continue;
+		}
+		SCTP_TCB_LOCK(stcb);
+		if (!sctp_does_stcb_own_this_addr(stcb, to)) {
+			SCTP_TCB_UNLOCK(stcb);
+			SCTP_INP_RUNLOCK(inp);
+			continue;
+		}
+		if (stcb->rport != rport) {
+			/* remote port does not match. */
+			SCTP_TCB_UNLOCK(stcb);
+			SCTP_INP_RUNLOCK(inp);
+			continue;
+		}
+		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+			SCTP_TCB_UNLOCK(stcb);
+			SCTP_INP_RUNLOCK(inp);
+			continue;
+		}
+		if (!sctp_does_stcb_own_this_addr(stcb, to)) {
+			SCTP_TCB_UNLOCK(stcb);
+			SCTP_INP_RUNLOCK(inp);
+			continue;
+		}
+		/* Does this TCB have a matching address? */
+		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+
+			if (net->ro._l_addr.sa.sa_family != from->sa_family) {
+				/* not the same family, can't be a match */
+				continue;
+			}
+			switch (from->sa_family) {
+#ifdef INET
+			case AF_INET:
+			{
+				struct sockaddr_in *sin, *rsin;
+
+				sin = (struct sockaddr_in *)&net->ro._l_addr;
+				rsin = (struct sockaddr_in *)from;
+				if (sin->sin_addr.s_addr ==
+				    rsin->sin_addr.s_addr) {
+					/* found it */
+					if (netp != NULL) {
+						*netp = net;
+					}
+					/* Update the endpoint pointer */
+					*inp_p = inp;
+					SCTP_INP_RUNLOCK(inp);
+					return (stcb);
+				}
+				break;
+			}
+#endif
+#ifdef INET6
+			case AF_INET6:
+			{
+				struct sockaddr_in6 *sin6, *rsin6;
+
+				sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+				rsin6 = (struct sockaddr_in6 *)from;
+				if (SCTP6_ARE_ADDR_EQUAL(sin6,
+				    rsin6)) {
+					/* found it */
+					if (netp != NULL) {
+						*netp = net;
+					}
+					/* Update the endpoint pointer */
+					*inp_p = inp;
+					SCTP_INP_RUNLOCK(inp);
+					return (stcb);
+				}
+				break;
+			}
+#endif
+#if defined(__Userspace__)
+			case AF_CONN:
+			{
+				struct sockaddr_conn *sconn, *rsconn;
+
+				sconn = (struct sockaddr_conn *)&net->ro._l_addr;
+				rsconn = (struct sockaddr_conn *)from;
+				if (sconn->sconn_addr == rsconn->sconn_addr) {
+					/* found it */
+					if (netp != NULL) {
+						*netp = net;
+					}
+					/* Update the endpoint pointer */
+					*inp_p = inp;
+					SCTP_INP_RUNLOCK(inp);
+					return (stcb);
+				}
+				break;
+			}
+#endif
+			default:
+				/* TSNH */
+				break;
+			}
+		}
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_INP_RUNLOCK(inp);
+	}
+	return (NULL);
+}
+
+
+/*
+ * rules for use
+ *
+ * 1) If I return a NULL you must decrement any INP ref cnt. 2) If I find an
+ * stcb, both will be locked (locked_tcb and stcb) but decrement will be done
+ * (if locked == NULL). 3) Decrement happens on return ONLY if locked ==
+ * NULL.
+ */
+
+struct sctp_tcb *
+sctp_findassociation_ep_addr(struct sctp_inpcb **inp_p, struct sockaddr *remote,
+    struct sctp_nets **netp, struct sockaddr *local, struct sctp_tcb *locked_tcb)
+{
+	struct sctpasochead *head;
+	struct sctp_inpcb *inp;
+	struct sctp_tcb *stcb = NULL;
+	struct sctp_nets *net;
+	uint16_t rport;
+
+	inp = *inp_p;
+	switch (remote->sa_family) {
+#ifdef INET
+	case AF_INET:
+		rport = (((struct sockaddr_in *)remote)->sin_port);
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		rport = (((struct sockaddr_in6 *)remote)->sin6_port);
+		break;
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+		rport = (((struct sockaddr_in6 *)remote)->sin6_port);
+		break;
+#endif
+	default:
+		return (NULL);
+	}
+	if (locked_tcb) {
+		/*
+		 * UN-lock so we can do proper locking here this occurs when
+		 * called from load_addresses_from_init.
+		 */
+		atomic_add_int(&locked_tcb->asoc.refcnt, 1);
+		SCTP_TCB_UNLOCK(locked_tcb);
+	}
+	SCTP_INP_INFO_RLOCK();
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+		/*-
+		 * Now either this guy is our listener or it's the
+		 * connector. If it is the one that issued the connect, then
+		 * it's only chance is to be the first TCB in the list. If
+		 * it is the acceptor, then do the special_lookup to hash
+		 * and find the real inp.
+		 */
+		if ((inp->sctp_socket) && (inp->sctp_socket->so_qlimit)) {
+			/* to is peer addr, from is my addr */
+#ifndef SCTP_MVRF
+			stcb = sctp_tcb_special_locate(inp_p, remote, local,
+			    netp, inp->def_vrf_id);
+			if ((stcb != NULL) && (locked_tcb == NULL)) {
+				/* we have a locked tcb, lower refcount */
+				SCTP_INP_DECR_REF(inp);
+			}
+			if ((locked_tcb != NULL) && (locked_tcb != stcb)) {
+				SCTP_INP_RLOCK(locked_tcb->sctp_ep);
+				SCTP_TCB_LOCK(locked_tcb);
+				atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+				SCTP_INP_RUNLOCK(locked_tcb->sctp_ep);
+			}
+#else
+			/*-
+			 * MVRF is tricky, we must look in every VRF
+			 * the endpoint has.
+			 */
+			int i;
+
+			for (i = 0; i < inp->num_vrfs; i++) {
+				stcb = sctp_tcb_special_locate(inp_p, remote, local,
+				                               netp, inp->m_vrf_ids[i]);
+				if ((stcb != NULL) && (locked_tcb == NULL)) {
+					/* we have a locked tcb, lower refcount */
+					SCTP_INP_DECR_REF(inp);
+					break;
+				}
+				if ((locked_tcb != NULL) && (locked_tcb != stcb)) {
+					SCTP_INP_RLOCK(locked_tcb->sctp_ep);
+					SCTP_TCB_LOCK(locked_tcb);
+					atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+					SCTP_INP_RUNLOCK(locked_tcb->sctp_ep);
+					break;
+				}
+			}
+#endif
+			SCTP_INP_INFO_RUNLOCK();
+			return (stcb);
+		} else {
+			SCTP_INP_WLOCK(inp);
+			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+				goto null_return;
+			}
+			stcb = LIST_FIRST(&inp->sctp_asoc_list);
+			if (stcb == NULL) {
+				goto null_return;
+			}
+			SCTP_TCB_LOCK(stcb);
+
+			if (stcb->rport != rport) {
+				/* remote port does not match. */
+				SCTP_TCB_UNLOCK(stcb);
+				goto null_return;
+			}
+			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+				SCTP_TCB_UNLOCK(stcb);
+				goto null_return;
+			}
+			if (local && !sctp_does_stcb_own_this_addr(stcb, local)) {
+				SCTP_TCB_UNLOCK(stcb);
+				goto null_return;
+			}
+			/* now look at the list of remote addresses */
+			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+#ifdef INVARIANTS
+				if (net == (TAILQ_NEXT(net, sctp_next))) {
+					panic("Corrupt net list");
+				}
+#endif
+				if (net->ro._l_addr.sa.sa_family !=
+				    remote->sa_family) {
+					/* not the same family */
+					continue;
+				}
+				switch (remote->sa_family) {
+#ifdef INET
+				case AF_INET:
+				{
+					struct sockaddr_in *sin, *rsin;
+
+					sin = (struct sockaddr_in *)
+					    &net->ro._l_addr;
+					rsin = (struct sockaddr_in *)remote;
+					if (sin->sin_addr.s_addr ==
+					    rsin->sin_addr.s_addr) {
+						/* found it */
+						if (netp != NULL) {
+							*netp = net;
+						}
+						if (locked_tcb == NULL) {
+							SCTP_INP_DECR_REF(inp);
+						} else if (locked_tcb != stcb) {
+							SCTP_TCB_LOCK(locked_tcb);
+						}
+						if (locked_tcb) {
+							atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+						}
+
+						SCTP_INP_WUNLOCK(inp);
+						SCTP_INP_INFO_RUNLOCK();
+						return (stcb);
+					}
+					break;
+				}
+#endif
+#ifdef INET6
+				case AF_INET6:
+				{
+					struct sockaddr_in6 *sin6, *rsin6;
+
+					sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+					rsin6 = (struct sockaddr_in6 *)remote;
+					if (SCTP6_ARE_ADDR_EQUAL(sin6,
+					    rsin6)) {
+						/* found it */
+						if (netp != NULL) {
+							*netp = net;
+						}
+						if (locked_tcb == NULL) {
+							SCTP_INP_DECR_REF(inp);
+						} else if (locked_tcb != stcb) {
+							SCTP_TCB_LOCK(locked_tcb);
+						}
+						if (locked_tcb) {
+							atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+						}
+						SCTP_INP_WUNLOCK(inp);
+						SCTP_INP_INFO_RUNLOCK();
+						return (stcb);
+					}
+					break;
+				}
+#endif
+#if defined(__Userspace__)
+				case AF_CONN:
+				{
+					struct sockaddr_conn *sconn, *rsconn;
+
+					sconn = (struct sockaddr_conn *)&net->ro._l_addr;
+					rsconn = (struct sockaddr_conn *)remote;
+					if (sconn->sconn_addr == rsconn->sconn_addr) {
+						/* found it */
+						if (netp != NULL) {
+							*netp = net;
+						}
+						if (locked_tcb == NULL) {
+							SCTP_INP_DECR_REF(inp);
+						} else if (locked_tcb != stcb) {
+							SCTP_TCB_LOCK(locked_tcb);
+						}
+						if (locked_tcb) {
+							atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+						}
+						SCTP_INP_WUNLOCK(inp);
+						SCTP_INP_INFO_RUNLOCK();
+						return (stcb);
+					}
+					break;
+				}
+#endif
+				default:
+					/* TSNH */
+					break;
+				}
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		}
+	} else {
+		SCTP_INP_WLOCK(inp);
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+			goto null_return;
+		}
+		head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(rport,
+		                                               inp->sctp_hashmark)];
+		LIST_FOREACH(stcb, head, sctp_tcbhash) {
+			if (stcb->rport != rport) {
+				/* remote port does not match */
+				continue;
+			}
+			SCTP_TCB_LOCK(stcb);
+			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+				SCTP_TCB_UNLOCK(stcb);
+				continue;
+			}
+			if (local && !sctp_does_stcb_own_this_addr(stcb, local)) {
+				SCTP_TCB_UNLOCK(stcb);
+				continue;
+			}
+			/* now look at the list of remote addresses */
+			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+#ifdef INVARIANTS
+				if (net == (TAILQ_NEXT(net, sctp_next))) {
+					panic("Corrupt net list");
+				}
+#endif
+				if (net->ro._l_addr.sa.sa_family !=
+				    remote->sa_family) {
+					/* not the same family */
+					continue;
+				}
+				switch (remote->sa_family) {
+#ifdef INET
+				case AF_INET:
+				{
+					struct sockaddr_in *sin, *rsin;
+
+					sin = (struct sockaddr_in *)
+					    &net->ro._l_addr;
+					rsin = (struct sockaddr_in *)remote;
+					if (sin->sin_addr.s_addr ==
+					    rsin->sin_addr.s_addr) {
+						/* found it */
+						if (netp != NULL) {
+							*netp = net;
+						}
+						if (locked_tcb == NULL) {
+							SCTP_INP_DECR_REF(inp);
+						} else if (locked_tcb != stcb) {
+							SCTP_TCB_LOCK(locked_tcb);
+						}
+						if (locked_tcb) {
+							atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+						}
+						SCTP_INP_WUNLOCK(inp);
+						SCTP_INP_INFO_RUNLOCK();
+						return (stcb);
+					}
+					break;
+				}
+#endif
+#ifdef INET6
+				case AF_INET6:
+				{
+					struct sockaddr_in6 *sin6, *rsin6;
+
+					sin6 = (struct sockaddr_in6 *)
+					    &net->ro._l_addr;
+					rsin6 = (struct sockaddr_in6 *)remote;
+					if (SCTP6_ARE_ADDR_EQUAL(sin6,
+					    rsin6)) {
+						/* found it */
+						if (netp != NULL) {
+							*netp = net;
+						}
+						if (locked_tcb == NULL) {
+							SCTP_INP_DECR_REF(inp);
+						} else if (locked_tcb != stcb) {
+							SCTP_TCB_LOCK(locked_tcb);
+						}
+						if (locked_tcb) {
+							atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+						}
+						SCTP_INP_WUNLOCK(inp);
+						SCTP_INP_INFO_RUNLOCK();
+						return (stcb);
+					}
+					break;
+				}
+#endif
+#if defined(__Userspace__)
+				case AF_CONN:
+				{
+					struct sockaddr_conn *sconn, *rsconn;
+
+					sconn = (struct sockaddr_conn *)&net->ro._l_addr;
+					rsconn = (struct sockaddr_conn *)remote;
+					if (sconn->sconn_addr == rsconn->sconn_addr) {
+						/* found it */
+						if (netp != NULL) {
+							*netp = net;
+						}
+						if (locked_tcb == NULL) {
+							SCTP_INP_DECR_REF(inp);
+						} else if (locked_tcb != stcb) {
+							SCTP_TCB_LOCK(locked_tcb);
+						}
+						if (locked_tcb) {
+							atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+						}
+						SCTP_INP_WUNLOCK(inp);
+						SCTP_INP_INFO_RUNLOCK();
+						return (stcb);
+					}
+					break;
+				}
+#endif
+				default:
+					/* TSNH */
+					break;
+				}
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		}
+	}
+null_return:
+	/* clean up for returning null */
+	if (locked_tcb) {
+		SCTP_TCB_LOCK(locked_tcb);
+		atomic_subtract_int(&locked_tcb->asoc.refcnt, 1);
+	}
+	SCTP_INP_WUNLOCK(inp);
+	SCTP_INP_INFO_RUNLOCK();
+	/* not found */
+	return (NULL);
+}
+
+
+/*
+ * Find an association for a specific endpoint using the association id given
+ * out in the COMM_UP notification
+ */
+struct sctp_tcb *
+sctp_findasoc_ep_asocid_locked(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock)
+{
+	/*
+	 * Use my the assoc_id to find a endpoint
+	 */
+	struct sctpasochead *head;
+	struct sctp_tcb *stcb;
+	uint32_t id;
+
+	if (inp == NULL) {
+		SCTP_PRINTF("TSNH ep_associd\n");
+		return (NULL);
+	}
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+		SCTP_PRINTF("TSNH ep_associd0\n");
+		return (NULL);
+	}
+	id = (uint32_t)asoc_id;
+	head = &inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(id, inp->hashasocidmark)];
+	if (head == NULL) {
+		/* invalid id TSNH */
+		SCTP_PRINTF("TSNH ep_associd1\n");
+		return (NULL);
+	}
+	LIST_FOREACH(stcb, head, sctp_tcbasocidhash) {
+		if (stcb->asoc.assoc_id == id) {
+			if (inp != stcb->sctp_ep) {
+				/*
+				 * some other guy has the same id active (id
+				 * collision ??).
+				 */
+				SCTP_PRINTF("TSNH ep_associd2\n");
+				continue;
+			}
+			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+				continue;
+			}
+			if (want_lock) {
+				SCTP_TCB_LOCK(stcb);
+			}
+			return (stcb);
+		}
+	}
+	return (NULL);
+}
+
+
+struct sctp_tcb *
+sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock)
+{
+	struct sctp_tcb *stcb;
+
+	SCTP_INP_RLOCK(inp);
+	stcb = sctp_findasoc_ep_asocid_locked(inp, asoc_id, want_lock);
+	SCTP_INP_RUNLOCK(inp);
+	return (stcb);
+}
+
+
+/*
+ * Endpoint probe expects that the INP_INFO is locked.
+ */
+static struct sctp_inpcb *
+sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head,
+		    uint16_t lport, uint32_t vrf_id)
+{
+	struct sctp_inpcb *inp;
+	struct sctp_laddr *laddr;
+#ifdef INET
+	struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *sin6;
+	struct sockaddr_in6 *intf_addr6;
+#endif
+#if defined(__Userspace__)
+	struct sockaddr_conn *sconn;
+#endif
+#ifdef SCTP_MVRF
+	int i;
+#endif
+	int  fnd;
+
+#ifdef INET
+	sin = NULL;
+#endif
+#ifdef INET6
+	sin6 = NULL;
+#endif
+#if defined(__Userspace__)
+	sconn = NULL;
+#endif
+	switch (nam->sa_family) {
+#ifdef INET
+	case AF_INET:
+		sin = (struct sockaddr_in *)nam;
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		sin6 = (struct sockaddr_in6 *)nam;
+		break;
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+		sconn = (struct sockaddr_conn *)nam;
+		break;
+#endif
+	default:
+		/* unsupported family */
+		return (NULL);
+	}
+
+	if (head == NULL)
+		return (NULL);
+
+	LIST_FOREACH(inp, head, sctp_hash) {
+		SCTP_INP_RLOCK(inp);
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+			SCTP_INP_RUNLOCK(inp);
+			continue;
+		}
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) &&
+		    (inp->sctp_lport == lport)) {
+			/* got it */
+			switch (nam->sa_family) {
+#ifdef INET
+			case AF_INET:
+				if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+				    SCTP_IPV6_V6ONLY(inp)) {
+					/* IPv4 on a IPv6 socket with ONLY IPv6 set */
+					SCTP_INP_RUNLOCK(inp);
+					continue;
+				}
+#if defined(__FreeBSD__)
+				if (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+				                     &sin->sin_addr) != 0) {
+					SCTP_INP_RUNLOCK(inp);
+					continue;
+				}
+#endif
+				break;
+#endif
+#ifdef INET6
+			case AF_INET6:
+				/* A V6 address and the endpoint is NOT bound V6 */
+				if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+					SCTP_INP_RUNLOCK(inp);
+					continue;
+				}
+#if defined(__FreeBSD__)
+				if (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+				                     &sin6->sin6_addr) != 0) {
+					SCTP_INP_RUNLOCK(inp);
+					continue;
+				}
+#endif
+				break;
+#endif
+			default:
+				break;
+			}
+			/* does a VRF id match? */
+			fnd = 0;
+#ifdef SCTP_MVRF
+			for (i = 0; i < inp->num_vrfs; i++) {
+				if (inp->m_vrf_ids[i] == vrf_id) {
+					fnd = 1;
+					break;
+				}
+			}
+#else
+			if (inp->def_vrf_id == vrf_id)
+				fnd = 1;
+#endif
+
+			SCTP_INP_RUNLOCK(inp);
+			if (!fnd)
+				continue;
+			return (inp);
+		}
+		SCTP_INP_RUNLOCK(inp);
+	}
+	switch (nam->sa_family) {
+#ifdef INET
+	case AF_INET:
+		if (sin->sin_addr.s_addr == INADDR_ANY) {
+			/* Can't hunt for one that has no address specified */
+			return (NULL);
+		}
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+			/* Can't hunt for one that has no address specified */
+			return (NULL);
+		}
+		break;
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+		if (sconn->sconn_addr == NULL) {
+			return (NULL);
+		}
+		break;
+#endif
+	default:
+		break;
+	}
+	/*
+	 * ok, not bound to all so see if we can find a EP bound to this
+	 * address.
+	 */
+	LIST_FOREACH(inp, head, sctp_hash) {
+		SCTP_INP_RLOCK(inp);
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+			SCTP_INP_RUNLOCK(inp);
+			continue;
+		}
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)) {
+			SCTP_INP_RUNLOCK(inp);
+			continue;
+		}
+		/*
+		 * Ok this could be a likely candidate, look at all of its
+		 * addresses
+		 */
+		if (inp->sctp_lport != lport) {
+			SCTP_INP_RUNLOCK(inp);
+			continue;
+		}
+		/* does a VRF id match? */
+		fnd = 0;
+#ifdef SCTP_MVRF
+		for (i = 0; i < inp->num_vrfs; i++) {
+			if (inp->m_vrf_ids[i] == vrf_id) {
+				fnd = 1;
+				break;
+			}
+		}
+#else
+		if (inp->def_vrf_id == vrf_id)
+			fnd = 1;
+
+#endif
+		if (!fnd) {
+			SCTP_INP_RUNLOCK(inp);
+			continue;
+		}
+		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+			if (laddr->ifa == NULL) {
+				SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n",
+					__func__);
+				continue;
+			}
+			SCTPDBG(SCTP_DEBUG_PCB1, "Ok laddr->ifa:%p is possible, ",
+				(void *)laddr->ifa);
+			if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
+				SCTPDBG(SCTP_DEBUG_PCB1, "Huh IFA being deleted\n");
+				continue;
+			}
+			if (laddr->ifa->address.sa.sa_family == nam->sa_family) {
+				/* possible, see if it matches */
+				switch (nam->sa_family) {
+#ifdef INET
+				case AF_INET:
+#if defined(__APPLE__)
+					if (sin == NULL) {
+						/* TSNH */
+						break;
+					}
+#endif
+					if (sin->sin_addr.s_addr ==
+					    laddr->ifa->address.sin.sin_addr.s_addr) {
+						SCTP_INP_RUNLOCK(inp);
+						return (inp);
+					}
+					break;
+#endif
+#ifdef INET6
+				case AF_INET6:
+					intf_addr6 = &laddr->ifa->address.sin6;
+					if (SCTP6_ARE_ADDR_EQUAL(sin6,
+					    intf_addr6)) {
+						SCTP_INP_RUNLOCK(inp);
+						return (inp);
+					}
+					break;
+#endif
+#if defined(__Userspace__)
+				case AF_CONN:
+					if (sconn->sconn_addr == laddr->ifa->address.sconn.sconn_addr) {
+						SCTP_INP_RUNLOCK(inp);
+						return (inp);
+					}
+					break;
+#endif
+				}
+			}
+		}
+		SCTP_INP_RUNLOCK(inp);
+	}
+	return (NULL);
+}
+
+
+static struct sctp_inpcb *
+sctp_isport_inuse(struct sctp_inpcb *inp, uint16_t lport, uint32_t vrf_id)
+{
+	struct sctppcbhead *head;
+	struct sctp_inpcb *t_inp;
+#ifdef SCTP_MVRF
+	int i;
+#endif
+	int fnd;
+
+	head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport,
+	    SCTP_BASE_INFO(hashmark))];
+	LIST_FOREACH(t_inp, head, sctp_hash) {
+		if (t_inp->sctp_lport != lport) {
+			continue;
+		}
+		/* is it in the VRF in question */
+		fnd = 0;
+#ifdef SCTP_MVRF
+		for (i = 0; i < inp->num_vrfs; i++) {
+			if (t_inp->m_vrf_ids[i] == vrf_id) {
+				fnd = 1;
+				break;
+			}
+		}
+#else
+		if (t_inp->def_vrf_id == vrf_id)
+			fnd = 1;
+#endif
+		if (!fnd)
+			continue;
+
+		/* This one is in use. */
+		/* check the v6/v4 binding issue */
+		if ((t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+		    SCTP_IPV6_V6ONLY(t_inp)) {
+			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+				/* collision in V6 space */
+				return (t_inp);
+			} else {
+				/* inp is BOUND_V4 no conflict */
+				continue;
+			}
+		} else if (t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+			/* t_inp is bound v4 and v6, conflict always */
+			return (t_inp);
+		} else {
+			/* t_inp is bound only V4 */
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+			    SCTP_IPV6_V6ONLY(inp)) {
+				/* no conflict */
+				continue;
+			}
+			/* else fall through to conflict */
+		}
+		return (t_inp);
+	}
+	return (NULL);
+}
+
+
+int
+sctp_swap_inpcb_for_listen(struct sctp_inpcb *inp)
+{
+	/* For 1-2-1 with port reuse */
+	struct sctppcbhead *head;
+	struct sctp_inpcb *tinp, *ninp;
+
+	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE)) {
+		/* only works with port reuse on */
+		return (-1);
+	}
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
+		return (0);
+	}
+	SCTP_INP_RUNLOCK(inp);
+	SCTP_INP_INFO_WLOCK();
+	head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(inp->sctp_lport,
+	                                    SCTP_BASE_INFO(hashmark))];
+	/* Kick out all non-listeners to the TCP hash */
+	LIST_FOREACH_SAFE(tinp, head, sctp_hash, ninp) {
+		if (tinp->sctp_lport != inp->sctp_lport) {
+			continue;
+		}
+		if (tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+			continue;
+		}
+		if (tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+			continue;
+		}
+		if (tinp->sctp_socket->so_qlimit) {
+			continue;
+		}
+		SCTP_INP_WLOCK(tinp);
+		LIST_REMOVE(tinp, sctp_hash);
+		head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR(tinp->sctp_lport, SCTP_BASE_INFO(hashtcpmark))];
+		tinp->sctp_flags |= SCTP_PCB_FLAGS_IN_TCPPOOL;
+		LIST_INSERT_HEAD(head, tinp, sctp_hash);
+		SCTP_INP_WUNLOCK(tinp);
+	}
+	SCTP_INP_WLOCK(inp);
+	/* Pull from where he was */
+	LIST_REMOVE(inp, sctp_hash);
+	inp->sctp_flags &= ~SCTP_PCB_FLAGS_IN_TCPPOOL;
+	head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(inp->sctp_lport, SCTP_BASE_INFO(hashmark))];
+	LIST_INSERT_HEAD(head, inp, sctp_hash);
+	SCTP_INP_WUNLOCK(inp);
+	SCTP_INP_RLOCK(inp);
+	SCTP_INP_INFO_WUNLOCK();
+	return (0);
+}
+
+
+struct sctp_inpcb *
+sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock,
+		uint32_t vrf_id)
+{
+	/*
+	 * First we check the hash table to see if someone has this port
+	 * bound with just the port.
+	 */
+	struct sctp_inpcb *inp;
+	struct sctppcbhead *head;
+	int lport;
+	unsigned int i;
+#ifdef INET
+	struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *sin6;
+#endif
+#if defined(__Userspace__)
+	struct sockaddr_conn *sconn;
+#endif
+
+	switch (nam->sa_family) {
+#ifdef INET
+	case AF_INET:
+		sin = (struct sockaddr_in *)nam;
+		lport = sin->sin_port;
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		sin6 = (struct sockaddr_in6 *)nam;
+		lport = sin6->sin6_port;
+		break;
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+		sconn = (struct sockaddr_conn *)nam;
+		lport = sconn->sconn_port;
+		break;
+#endif
+	default:
+		return (NULL);
+	}
+	/*
+	 * I could cheat here and just cast to one of the types but we will
+	 * do it right. It also provides the check against an Unsupported
+	 * type too.
+	 */
+	/* Find the head of the ALLADDR chain */
+	if (have_lock == 0) {
+		SCTP_INP_INFO_RLOCK();
+	}
+	head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport,
+	    SCTP_BASE_INFO(hashmark))];
+	inp = sctp_endpoint_probe(nam, head, lport, vrf_id);
+
+	/*
+	 * If the TCP model exists it could be that the main listening
+	 * endpoint is gone but there still exists a connected socket for this
+	 * guy. If so we can return the first one that we find. This may NOT
+	 * be the correct one so the caller should be wary on the returned INP.
+	 * Currently the only caller that sets find_tcp_pool is in bindx where
+	 * we are verifying that a user CAN bind the address. He either
+	 * has bound it already, or someone else has, or its open to bind,
+	 * so this is good enough.
+	 */
+	if (inp == NULL && find_tcp_pool) {
+		for (i = 0; i < SCTP_BASE_INFO(hashtcpmark) + 1; i++) {
+			head = &SCTP_BASE_INFO(sctp_tcpephash)[i];
+			inp = sctp_endpoint_probe(nam, head, lport, vrf_id);
+			if (inp) {
+				break;
+			}
+		}
+	}
+	if (inp) {
+		SCTP_INP_INCR_REF(inp);
+	}
+	if (have_lock == 0) {
+		SCTP_INP_INFO_RUNLOCK();
+	}
+	return (inp);
+}
+
+
+/*
+ * Find an association for an endpoint with the pointer to whom you want to
+ * send to and the endpoint pointer. The address can be IPv4 or IPv6. We may
+ * need to change the *to to some other struct like a mbuf...
+ */
+struct sctp_tcb *
+sctp_findassociation_addr_sa(struct sockaddr *from, struct sockaddr *to,
+    struct sctp_inpcb **inp_p, struct sctp_nets **netp, int find_tcp_pool,
+    uint32_t vrf_id)
+{
+	struct sctp_inpcb *inp = NULL;
+	struct sctp_tcb *stcb;
+
+	SCTP_INP_INFO_RLOCK();
+	if (find_tcp_pool) {
+		if (inp_p != NULL) {
+			stcb = sctp_tcb_special_locate(inp_p, from, to, netp,
+			                               vrf_id);
+		} else {
+			stcb = sctp_tcb_special_locate(&inp, from, to, netp,
+			                               vrf_id);
+		}
+		if (stcb != NULL) {
+			SCTP_INP_INFO_RUNLOCK();
+			return (stcb);
+		}
+	}
+	inp = sctp_pcb_findep(to, 0, 1, vrf_id);
+	if (inp_p != NULL) {
+		*inp_p = inp;
+	}
+	SCTP_INP_INFO_RUNLOCK();
+	if (inp == NULL) {
+		return (NULL);
+	}
+	/*
+	 * ok, we have an endpoint, now lets find the assoc for it (if any)
+	 * we now place the source address or from in the to of the find
+	 * endpoint call. Since in reality this chain is used from the
+	 * inbound packet side.
+	 */
+	if (inp_p != NULL) {
+		stcb = sctp_findassociation_ep_addr(inp_p, from, netp, to,
+		                                    NULL);
+	} else {
+		stcb = sctp_findassociation_ep_addr(&inp, from, netp, to,
+		                                    NULL);
+	}
+	return (stcb);
+}
+
+
+/*
+ * This routine will grub through the mbuf that is a INIT or INIT-ACK and
+ * find all addresses that the sender has specified in any address list. Each
+ * address will be used to lookup the TCB and see if one exits.
+ */
+static struct sctp_tcb *
+sctp_findassociation_special_addr(struct mbuf *m, int offset,
+    struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp,
+    struct sockaddr *dst)
+{
+	struct sctp_paramhdr *phdr, parm_buf;
+#if defined(INET) || defined(INET6)
+	struct sctp_tcb *stcb;
+	uint16_t ptype;
+#endif
+	uint16_t plen;
+#ifdef INET
+	struct sockaddr_in sin4;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 sin6;
+#endif
+
+#ifdef INET
+	memset(&sin4, 0, sizeof(sin4));
+#ifdef HAVE_SIN_LEN
+	sin4.sin_len = sizeof(sin4);
+#endif
+	sin4.sin_family = AF_INET;
+	sin4.sin_port = sh->src_port;
+#endif
+#ifdef INET6
+	memset(&sin6, 0, sizeof(sin6));
+#ifdef HAVE_SIN6_LEN
+	sin6.sin6_len = sizeof(sin6);
+#endif
+	sin6.sin6_family = AF_INET6;
+	sin6.sin6_port = sh->src_port;
+#endif
+
+	offset += sizeof(struct sctp_init_chunk);
+
+	phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
+	while (phdr != NULL) {
+		/* now we must see if we want the parameter */
+#if defined(INET) || defined(INET6)
+		ptype = ntohs(phdr->param_type);
+#endif
+		plen = ntohs(phdr->param_length);
+		if (plen == 0) {
+			break;
+		}
+#ifdef INET
+		if (ptype == SCTP_IPV4_ADDRESS &&
+		    plen == sizeof(struct sctp_ipv4addr_param)) {
+			/* Get the rest of the address */
+			struct sctp_ipv4addr_param ip4_parm, *p4;
+
+			phdr = sctp_get_next_param(m, offset,
+			    (struct sctp_paramhdr *)&ip4_parm, min(plen, sizeof(ip4_parm)));
+			if (phdr == NULL) {
+				return (NULL);
+			}
+			p4 = (struct sctp_ipv4addr_param *)phdr;
+			memcpy(&sin4.sin_addr, &p4->addr, sizeof(p4->addr));
+			/* look it up */
+			stcb = sctp_findassociation_ep_addr(inp_p,
+			    (struct sockaddr *)&sin4, netp, dst, NULL);
+			if (stcb != NULL) {
+				return (stcb);
+			}
+		}
+#endif
+#ifdef INET6
+		if (ptype == SCTP_IPV6_ADDRESS &&
+		    plen == sizeof(struct sctp_ipv6addr_param)) {
+			/* Get the rest of the address */
+			struct sctp_ipv6addr_param ip6_parm, *p6;
+
+			phdr = sctp_get_next_param(m, offset,
+			    (struct sctp_paramhdr *)&ip6_parm, min(plen,sizeof(ip6_parm)));
+			if (phdr == NULL) {
+				return (NULL);
+			}
+			p6 = (struct sctp_ipv6addr_param *)phdr;
+			memcpy(&sin6.sin6_addr, &p6->addr, sizeof(p6->addr));
+			/* look it up */
+			stcb = sctp_findassociation_ep_addr(inp_p,
+			    (struct sockaddr *)&sin6, netp, dst, NULL);
+			if (stcb != NULL) {
+				return (stcb);
+			}
+		}
+#endif
+		offset += SCTP_SIZE32(plen);
+		phdr = sctp_get_next_param(m, offset, &parm_buf,
+					   sizeof(parm_buf));
+	}
+	return (NULL);
+}
+
+static struct sctp_tcb *
+sctp_findassoc_by_vtag(struct sockaddr *from, struct sockaddr *to, uint32_t vtag,
+		       struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint16_t rport,
+		       uint16_t lport, int skip_src_check, uint32_t vrf_id, uint32_t remote_tag)
+{
+	/*
+	 * Use my vtag to hash. If we find it we then verify the source addr
+	 * is in the assoc. If all goes well we save a bit on rec of a
+	 * packet.
+	 */
+	struct sctpasochead *head;
+	struct sctp_nets *net;
+	struct sctp_tcb *stcb;
+#ifdef SCTP_MVRF
+	unsigned int i;
+#endif
+
+	SCTP_INP_INFO_RLOCK();
+	head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(vtag,
+	                                                        SCTP_BASE_INFO(hashasocmark))];
+	LIST_FOREACH(stcb, head, sctp_asocs) {
+		SCTP_INP_RLOCK(stcb->sctp_ep);
+		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+			SCTP_INP_RUNLOCK(stcb->sctp_ep);
+			continue;
+		}
+#ifdef SCTP_MVRF
+		for (i = 0; i < stcb->sctp_ep->num_vrfs; i++) {
+			if (stcb->sctp_ep->m_vrf_ids[i] == vrf_id) {
+				break;
+			}
+		}
+		if (i == stcb->sctp_ep->num_vrfs) {
+			SCTP_INP_RUNLOCK(inp);
+			continue;
+		}
+#else
+		if (stcb->sctp_ep->def_vrf_id != vrf_id) {
+			SCTP_INP_RUNLOCK(stcb->sctp_ep);
+			continue;
+		}
+#endif
+		SCTP_TCB_LOCK(stcb);
+		SCTP_INP_RUNLOCK(stcb->sctp_ep);
+		if (stcb->asoc.my_vtag == vtag) {
+			/* candidate */
+			if (stcb->rport != rport) {
+				SCTP_TCB_UNLOCK(stcb);
+				continue;
+			}
+			if (stcb->sctp_ep->sctp_lport != lport) {
+				SCTP_TCB_UNLOCK(stcb);
+				continue;
+			}
+			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+				SCTP_TCB_UNLOCK(stcb);
+				continue;
+			}
+			/* RRS:Need toaddr check here */
+			if (sctp_does_stcb_own_this_addr(stcb, to) == 0) {
+			        /* Endpoint does not own this address */
+				SCTP_TCB_UNLOCK(stcb);
+				continue;
+			}
+			if (remote_tag) {
+				/* If we have both vtags that's all we match on */
+				if (stcb->asoc.peer_vtag == remote_tag) {
+					/* If both tags match we consider it conclusive
+					 * and check NO source/destination addresses
+					 */
+					goto conclusive;
+				}
+			}
+			if (skip_src_check) {
+			conclusive:
+			        if (from) {
+					*netp = sctp_findnet(stcb, from);
+				} else {
+					*netp = NULL;	/* unknown */
+				}
+				if (inp_p)
+					*inp_p = stcb->sctp_ep;
+				SCTP_INP_INFO_RUNLOCK();
+				return (stcb);
+			}
+			net = sctp_findnet(stcb, from);
+			if (net) {
+				/* yep its him. */
+				*netp = net;
+				SCTP_STAT_INCR(sctps_vtagexpress);
+				*inp_p = stcb->sctp_ep;
+				SCTP_INP_INFO_RUNLOCK();
+				return (stcb);
+			} else {
+				/*
+				 * not him, this should only happen in rare
+				 * cases so I peg it.
+				 */
+				SCTP_STAT_INCR(sctps_vtagbogus);
+			}
+		}
+		SCTP_TCB_UNLOCK(stcb);
+	}
+	SCTP_INP_INFO_RUNLOCK();
+	return (NULL);
+}
+
+
+/*
+ * Find an association with the pointer to the inbound IP packet. This can be
+ * a IPv4 or IPv6 packet.
+ */
+struct sctp_tcb *
+sctp_findassociation_addr(struct mbuf *m, int offset,
+    struct sockaddr *src, struct sockaddr *dst,
+    struct sctphdr *sh, struct sctp_chunkhdr *ch,
+    struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint32_t vrf_id)
+{
+	struct sctp_tcb *stcb;
+	struct sctp_inpcb *inp;
+
+	if (sh->v_tag) {
+		/* we only go down this path if vtag is non-zero */
+		stcb = sctp_findassoc_by_vtag(src, dst, ntohl(sh->v_tag),
+		                              inp_p, netp, sh->src_port, sh->dest_port, 0, vrf_id, 0);
+		if (stcb) {
+			return (stcb);
+		}
+	}
+
+	if (inp_p) {
+		stcb = sctp_findassociation_addr_sa(src, dst, inp_p, netp,
+		                                    1, vrf_id);
+		inp = *inp_p;
+	} else {
+		stcb = sctp_findassociation_addr_sa(src, dst, &inp, netp,
+		                                    1, vrf_id);
+	}
+	SCTPDBG(SCTP_DEBUG_PCB1, "stcb:%p inp:%p\n", (void *)stcb, (void *)inp);
+	if (stcb == NULL && inp) {
+		/* Found a EP but not this address */
+		if ((ch->chunk_type == SCTP_INITIATION) ||
+		    (ch->chunk_type == SCTP_INITIATION_ACK)) {
+			/*-
+			 * special hook, we do NOT return linp or an
+			 * association that is linked to an existing
+			 * association that is under the TCP pool (i.e. no
+			 * listener exists). The endpoint finding routine
+			 * will always find a listener before examining the
+			 * TCP pool.
+			 */
+			if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
+				if (inp_p) {
+					*inp_p = NULL;
+				}
+				return (NULL);
+			}
+			stcb = sctp_findassociation_special_addr(m,
+			    offset, sh, &inp, netp, dst);
+			if (inp_p != NULL) {
+				*inp_p = inp;
+			}
+		}
+	}
+	SCTPDBG(SCTP_DEBUG_PCB1, "stcb is %p\n", (void *)stcb);
+	return (stcb);
+}
+
+/*
+ * lookup an association by an ASCONF lookup address.
+ * if the lookup address is 0.0.0.0 or ::0, use the vtag to do the lookup
+ */
+struct sctp_tcb *
+sctp_findassociation_ep_asconf(struct mbuf *m, int offset,
+			       struct sockaddr *dst, struct sctphdr *sh,
+                               struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint32_t vrf_id)
+{
+	struct sctp_tcb *stcb;
+	union sctp_sockstore remote_store;
+	struct sctp_paramhdr parm_buf, *phdr;
+	int ptype;
+	int zero_address = 0;
+#ifdef INET
+	struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *sin6;
+#endif
+
+	memset(&remote_store, 0, sizeof(remote_store));
+	phdr = sctp_get_next_param(m, offset + sizeof(struct sctp_asconf_chunk),
+				   &parm_buf, sizeof(struct sctp_paramhdr));
+	if (phdr == NULL) {
+		SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf lookup addr\n",
+			__func__);
+		return NULL;
+	}
+	ptype = (int)((uint32_t) ntohs(phdr->param_type));
+	/* get the correlation address */
+	switch (ptype) {
+#ifdef INET6
+	case SCTP_IPV6_ADDRESS:
+	{
+		/* ipv6 address param */
+		struct sctp_ipv6addr_param *p6, p6_buf;
+
+		if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv6addr_param)) {
+			return NULL;
+		}
+		p6 = (struct sctp_ipv6addr_param *)sctp_get_next_param(m,
+								       offset + sizeof(struct sctp_asconf_chunk),
+								       &p6_buf.ph, sizeof(*p6));
+		if (p6 == NULL) {
+			SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf v6 lookup addr\n",
+				__func__);
+			return (NULL);
+		}
+		sin6 = &remote_store.sin6;
+		sin6->sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+		sin6->sin6_len = sizeof(*sin6);
+#endif
+		sin6->sin6_port = sh->src_port;
+		memcpy(&sin6->sin6_addr, &p6->addr, sizeof(struct in6_addr));
+		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+			zero_address = 1;
+		break;
+	}
+#endif
+#ifdef INET
+	case SCTP_IPV4_ADDRESS:
+	{
+		/* ipv4 address param */
+		struct sctp_ipv4addr_param *p4, p4_buf;
+
+		if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv4addr_param)) {
+			return NULL;
+		}
+		p4 = (struct sctp_ipv4addr_param *)sctp_get_next_param(m,
+								       offset + sizeof(struct sctp_asconf_chunk),
+								       &p4_buf.ph, sizeof(*p4));
+		if (p4 == NULL) {
+			SCTPDBG(SCTP_DEBUG_INPUT3, "%s: failed to get asconf v4 lookup addr\n",
+				__func__);
+			return (NULL);
+		}
+		sin = &remote_store.sin;
+		sin->sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+		sin->sin_len = sizeof(*sin);
+#endif
+		sin->sin_port = sh->src_port;
+		memcpy(&sin->sin_addr, &p4->addr, sizeof(struct in_addr));
+		if (sin->sin_addr.s_addr == INADDR_ANY)
+			zero_address = 1;
+		break;
+	}
+#endif
+	default:
+		/* invalid address param type */
+		return NULL;
+	}
+
+	if (zero_address) {
+	        stcb = sctp_findassoc_by_vtag(NULL, dst, ntohl(sh->v_tag), inp_p,
+					      netp, sh->src_port, sh->dest_port, 1, vrf_id, 0);
+		if (stcb != NULL) {
+			SCTP_INP_DECR_REF(*inp_p);
+		}
+	} else {
+		stcb = sctp_findassociation_ep_addr(inp_p,
+		    &remote_store.sa, netp,
+		    dst, NULL);
+	}
+	return (stcb);
+}
+
+
+/*
+ * allocate a sctp_inpcb and setup a temporary binding to a port/all
+ * addresses. This way if we don't get a bind we by default pick a ephemeral
+ * port with all addresses bound.
+ */
+int
+sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id)
+{
+	/*
+	 * we get called when a new endpoint starts up. We need to allocate
+	 * the sctp_inpcb structure from the zone and init it. Mark it as
+	 * unbound and find a port that we can use as an ephemeral with
+	 * INADDR_ANY. If the user binds later no problem we can then add in
+	 * the specific addresses. And setup the default parameters for the
+	 * EP.
+	 */
+	int i, error;
+	struct sctp_inpcb *inp;
+	struct sctp_pcb *m;
+	struct timeval time;
+	sctp_sharedkey_t *null_key;
+
+	error = 0;
+
+	SCTP_INP_INFO_WLOCK();
+	inp = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_ep), struct sctp_inpcb);
+	if (inp == NULL) {
+		SCTP_PRINTF("Out of SCTP-INPCB structures - no resources\n");
+		SCTP_INP_INFO_WUNLOCK();
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
+		return (ENOBUFS);
+	}
+	/* zap it */
+	bzero(inp, sizeof(*inp));
+
+	/* bump generations */
+#if defined(__APPLE__)
+	inp->ip_inp.inp.inp_state = INPCB_STATE_INUSE;
+#endif
+	/* setup socket pointers */
+	inp->sctp_socket = so;
+	inp->ip_inp.inp.inp_socket = so;
+#if defined(__FreeBSD__)
+	inp->ip_inp.inp.inp_cred = crhold(so->so_cred);
+#endif
+#ifdef INET6
+#if !defined(__Userspace__) && !defined(__Windows__)
+	if (INP_SOCKAF(so) == AF_INET6) {
+		if (MODULE_GLOBAL(ip6_auto_flowlabel)) {
+			inp->ip_inp.inp.inp_flags |= IN6P_AUTOFLOWLABEL;
+		}
+		if (MODULE_GLOBAL(ip6_v6only)) {
+			inp->ip_inp.inp.inp_flags |= IN6P_IPV6_V6ONLY;
+		}
+	}
+#endif
+#endif
+	inp->sctp_associd_counter = 1;
+	inp->partial_delivery_point = SCTP_SB_LIMIT_RCV(so) >> SCTP_PARTIAL_DELIVERY_SHIFT;
+	inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
+	inp->max_cwnd = 0;
+	inp->sctp_cmt_on_off = SCTP_BASE_SYSCTL(sctp_cmt_on_off);
+	inp->ecn_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_ecn_enable);
+	inp->prsctp_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_pr_enable);
+	inp->auth_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_auth_enable);
+	inp->asconf_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_asconf_enable);
+	inp->reconfig_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_reconfig_enable);
+	inp->nrsack_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_nrsack_enable);
+	inp->pktdrop_supported = (uint8_t)SCTP_BASE_SYSCTL(sctp_pktdrop_enable);
+	inp->idata_supported = 0;
+
+#if defined(__FreeBSD__)
+	inp->fibnum = so->so_fibnum;
+#else
+	inp->fibnum = 0;
+#endif
+#if defined(__Userspace__)
+	inp->ulp_info = NULL;
+	inp->recv_callback = NULL;
+	inp->send_callback = NULL;
+	inp->send_sb_threshold = 0;
+#endif
+	/* init the small hash table we use to track asocid <-> tcb */
+	inp->sctp_asocidhash = SCTP_HASH_INIT(SCTP_STACK_VTAG_HASH_SIZE, &inp->hashasocidmark);
+	if (inp->sctp_asocidhash == NULL) {
+#if defined(__FreeBSD__)
+		crfree(inp->ip_inp.inp.inp_cred);
+#endif
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+		SCTP_INP_INFO_WUNLOCK();
+		return (ENOBUFS);
+	}
+#ifdef IPSEC
+#if !(defined(__APPLE__))
+	error = ipsec_init_policy(so, &inp->ip_inp.inp.inp_sp);
+#else
+	error = 0;
+#endif
+	if (error != 0) {
+#if defined(__FreeBSD__)
+		crfree(inp->ip_inp.inp.inp_cred);
+#endif
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+		SCTP_INP_INFO_WUNLOCK();
+		return error;
+	}
+#endif				/* IPSEC */
+	SCTP_INCR_EP_COUNT();
+	inp->ip_inp.inp.inp_ip_ttl = MODULE_GLOBAL(ip_defttl);
+	SCTP_INP_INFO_WUNLOCK();
+
+	so->so_pcb = (caddr_t)inp;
+
+#if defined(__FreeBSD__) && __FreeBSD_version < 803000
+	if ((SCTP_SO_TYPE(so) == SOCK_DGRAM) ||
+	    (SCTP_SO_TYPE(so) == SOCK_SEQPACKET)) {
+#else
+	if (SCTP_SO_TYPE(so) == SOCK_SEQPACKET) {
+#endif
+		/* UDP style socket */
+		inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
+		    SCTP_PCB_FLAGS_UNBOUND);
+		/* Be sure it is NON-BLOCKING IO for UDP */
+		/* SCTP_SET_SO_NBIO(so); */
+	} else if (SCTP_SO_TYPE(so) == SOCK_STREAM) {
+		/* TCP style socket */
+		inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
+		    SCTP_PCB_FLAGS_UNBOUND);
+		/* Be sure we have blocking IO by default */
+		SCTP_CLEAR_SO_NBIO(so);
+#if defined(__Panda__)
+	} else if (SCTP_SO_TYPE(so) == SOCK_FASTSEQPACKET) {
+		inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
+		    SCTP_PCB_FLAGS_UNBOUND);
+		sctp_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE);
+	} else if (SCTP_SO_TYPE(so) == SOCK_FASTSTREAM) {
+		inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
+		    SCTP_PCB_FLAGS_UNBOUND);
+		sctp_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE);
+#endif
+	} else {
+		/*
+		 * unsupported socket type (RAW, etc)- in case we missed it
+		 * in protosw
+		 */
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EOPNOTSUPP);
+		so->so_pcb = NULL;
+#if defined(__FreeBSD__)
+		crfree(inp->ip_inp.inp.inp_cred);
+#ifdef IPSEC
+		ipsec_delete_pcbpolicy(&inp->ip_inp.inp);
+#endif
+#endif
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+		return (EOPNOTSUPP);
+	}
+	if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_1) {
+		sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+		sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+	} else if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_2) {
+		sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+		sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+	} else if (SCTP_BASE_SYSCTL(sctp_default_frag_interleave) == SCTP_FRAG_LEVEL_0) {
+		sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+		sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+	}
+	inp->sctp_tcbhash = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_pcbtblsize),
+					   &inp->sctp_hashmark);
+	if (inp->sctp_tcbhash == NULL) {
+		SCTP_PRINTF("Out of SCTP-INPCB->hashinit - no resources\n");
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
+		so->so_pcb = NULL;
+#if defined(__FreeBSD__)
+		crfree(inp->ip_inp.inp.inp_cred);
+#ifdef IPSEC
+		ipsec_delete_pcbpolicy(&inp->ip_inp.inp);
+#endif
+#endif
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+		return (ENOBUFS);
+	}
+#ifdef SCTP_MVRF
+	inp->vrf_size = SCTP_DEFAULT_VRF_SIZE;
+	SCTP_MALLOC(inp->m_vrf_ids, uint32_t *,
+		    (sizeof(uint32_t) * inp->vrf_size), SCTP_M_MVRF);
+	if (inp->m_vrf_ids == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
+		so->so_pcb = NULL;
+		SCTP_HASH_FREE(inp->sctp_tcbhash, inp->sctp_hashmark);
+#if defined(__FreeBSD__)
+		crfree(inp->ip_inp.inp.inp_cred);
+#ifdef IPSEC
+		ipsec_delete_pcbpolicy(&inp->ip_inp.inp);
+#endif
+#endif
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+		return (ENOBUFS);
+	}
+	inp->m_vrf_ids[0] = vrf_id;
+	inp->num_vrfs = 1;
+#endif
+	inp->def_vrf_id = vrf_id;
+
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+	inp->ip_inp.inp.inpcb_mtx = lck_mtx_alloc_init(SCTP_BASE_INFO(sctbinfo).mtx_grp, SCTP_BASE_INFO(sctbinfo).mtx_attr);
+	if (inp->ip_inp.inp.inpcb_mtx == NULL) {
+		SCTP_PRINTF("in_pcballoc: can't alloc mutex! so=%p\n", (void *)so);
+#ifdef SCTP_MVRF
+		SCTP_FREE(inp->m_vrf_ids, SCTP_M_MVRF);
+#endif
+		SCTP_HASH_FREE(inp->sctp_tcbhash, inp->sctp_hashmark);
+		so->so_pcb = NULL;
+#if defined(__FreeBSD__)
+		crfree(inp->ip_inp.inp.inp_cred);
+#ifdef IPSEC
+		ipsec_delete_pcbpolicy(&inp->ip_inp.inp);
+#endif
+#endif
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+		SCTP_UNLOCK_EXC(SCTP_BASE_INFO(sctbinfo).ipi_lock);
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOMEM);
+		return (ENOMEM);
+	}
+#elif defined(APPLE_LION) || defined(APPLE_MOUNTAINLION)
+	lck_mtx_init(&inp->ip_inp.inp.inpcb_mtx, SCTP_BASE_INFO(sctbinfo).mtx_grp, SCTP_BASE_INFO(sctbinfo).mtx_attr);
+#else
+	lck_mtx_init(&inp->ip_inp.inp.inpcb_mtx, SCTP_BASE_INFO(sctbinfo).ipi_lock_grp, SCTP_BASE_INFO(sctbinfo).ipi_lock_attr);
+#endif
+#endif
+	SCTP_INP_INFO_WLOCK();
+	SCTP_INP_LOCK_INIT(inp);
+#if defined(__FreeBSD__)
+	INP_LOCK_INIT(&inp->ip_inp.inp, "inp", "sctpinp");
+#endif
+	SCTP_INP_READ_INIT(inp);
+	SCTP_ASOC_CREATE_LOCK_INIT(inp);
+	/* lock the new ep */
+	SCTP_INP_WLOCK(inp);
+
+	/* add it to the info area */
+	LIST_INSERT_HEAD(&SCTP_BASE_INFO(listhead), inp, sctp_list);
+#if defined(__APPLE__)
+	inp->ip_inp.inp.inp_pcbinfo = &SCTP_BASE_INFO(sctbinfo);
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION)
+	LIST_INSERT_HEAD(SCTP_BASE_INFO(sctbinfo).listhead, &inp->ip_inp.inp, inp_list);
+#else
+	LIST_INSERT_HEAD(SCTP_BASE_INFO(sctbinfo).ipi_listhead, &inp->ip_inp.inp, inp_list);
+#endif
+#endif
+	SCTP_INP_INFO_WUNLOCK();
+
+	TAILQ_INIT(&inp->read_queue);
+	LIST_INIT(&inp->sctp_addr_list);
+
+	LIST_INIT(&inp->sctp_asoc_list);
+
+#ifdef SCTP_TRACK_FREED_ASOCS
+	/* TEMP CODE */
+	LIST_INIT(&inp->sctp_asoc_free_list);
+#endif
+	/* Init the timer structure for signature change */
+	SCTP_OS_TIMER_INIT(&inp->sctp_ep.signature_change.timer);
+	inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NEWCOOKIE;
+
+	/* now init the actual endpoint default data */
+	m = &inp->sctp_ep;
+
+	/* setup the base timeout information */
+	m->sctp_timeoutticks[SCTP_TIMER_SEND] = SEC_TO_TICKS(SCTP_SEND_SEC);	/* needed ? */
+	m->sctp_timeoutticks[SCTP_TIMER_INIT] = SEC_TO_TICKS(SCTP_INIT_SEC);	/* needed ? */
+	m->sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default));
+	m->sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default));
+	m->sctp_timeoutticks[SCTP_TIMER_PMTU] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default));
+	m->sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default));
+	m->sctp_timeoutticks[SCTP_TIMER_SIGNATURE] = SEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_secret_lifetime_default));
+	/* all max/min max are in ms */
+	m->sctp_maxrto = SCTP_BASE_SYSCTL(sctp_rto_max_default);
+	m->sctp_minrto = SCTP_BASE_SYSCTL(sctp_rto_min_default);
+	m->initial_rto = SCTP_BASE_SYSCTL(sctp_rto_initial_default);
+	m->initial_init_rto_max = SCTP_BASE_SYSCTL(sctp_init_rto_max_default);
+	m->sctp_sack_freq = SCTP_BASE_SYSCTL(sctp_sack_freq_default);
+	m->max_init_times = SCTP_BASE_SYSCTL(sctp_init_rtx_max_default);
+	m->max_send_times = SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default);
+	m->def_net_failure = SCTP_BASE_SYSCTL(sctp_path_rtx_max_default);
+	m->def_net_pf_threshold = SCTP_BASE_SYSCTL(sctp_path_pf_threshold);
+	m->sctp_sws_sender = SCTP_SWS_SENDER_DEF;
+	m->sctp_sws_receiver = SCTP_SWS_RECEIVER_DEF;
+	m->max_burst = SCTP_BASE_SYSCTL(sctp_max_burst_default);
+	m->fr_max_burst = SCTP_BASE_SYSCTL(sctp_fr_max_burst_default);
+
+	m->sctp_default_cc_module = SCTP_BASE_SYSCTL(sctp_default_cc_module);
+	m->sctp_default_ss_module = SCTP_BASE_SYSCTL(sctp_default_ss_module);
+	m->max_open_streams_intome = SCTP_BASE_SYSCTL(sctp_nr_incoming_streams_default);
+	/* number of streams to pre-open on a association */
+	m->pre_open_stream_count = SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default);
+
+	/* Add adaptation cookie */
+	m->adaptation_layer_indicator = 0;
+	m->adaptation_layer_indicator_provided = 0;
+
+	/* seed random number generator */
+	m->random_counter = 1;
+	m->store_at = SCTP_SIGNATURE_SIZE;
+	SCTP_READ_RANDOM(m->random_numbers, sizeof(m->random_numbers));
+	sctp_fill_random_store(m);
+
+	/* Minimum cookie size */
+	m->size_of_a_cookie = (sizeof(struct sctp_init_msg) * 2) +
+	    sizeof(struct sctp_state_cookie);
+	m->size_of_a_cookie += SCTP_SIGNATURE_SIZE;
+
+	/* Setup the initial secret */
+	(void)SCTP_GETTIME_TIMEVAL(&time);
+	m->time_of_secret_change = time.tv_sec;
+
+	for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
+		m->secret_key[0][i] = sctp_select_initial_TSN(m);
+	}
+	sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
+
+	/* How long is a cookie good for ? */
+	m->def_cookie_life = MSEC_TO_TICKS(SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default));
+	/*
+	 * Initialize authentication parameters
+	 */
+	m->local_hmacs = sctp_default_supported_hmaclist();
+	m->local_auth_chunks = sctp_alloc_chunklist();
+	if (inp->asconf_supported) {
+		sctp_auth_add_chunk(SCTP_ASCONF, m->local_auth_chunks);
+		sctp_auth_add_chunk(SCTP_ASCONF_ACK, m->local_auth_chunks);
+	}
+	m->default_dscp = 0;
+#ifdef INET6
+	m->default_flowlabel = 0;
+#endif
+	m->port = 0; /* encapsulation disabled by default */
+	LIST_INIT(&m->shared_keys);
+	/* add default NULL key as key id 0 */
+	null_key = sctp_alloc_sharedkey();
+	sctp_insert_sharedkey(&m->shared_keys, null_key);
+	SCTP_INP_WUNLOCK(inp);
+#ifdef SCTP_LOG_CLOSING
+	sctp_log_closing(inp, NULL, 12);
+#endif
+	return (error);
+}
+
+
+void
+sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp,
+    struct sctp_tcb *stcb)
+{
+	struct sctp_nets *net;
+	uint16_t lport, rport;
+	struct sctppcbhead *head;
+	struct sctp_laddr *laddr, *oladdr;
+
+	atomic_add_int(&stcb->asoc.refcnt, 1);
+	SCTP_TCB_UNLOCK(stcb);
+	SCTP_INP_INFO_WLOCK();
+	SCTP_INP_WLOCK(old_inp);
+	SCTP_INP_WLOCK(new_inp);
+	SCTP_TCB_LOCK(stcb);
+	atomic_subtract_int(&stcb->asoc.refcnt, 1);
+
+	new_inp->sctp_ep.time_of_secret_change =
+	    old_inp->sctp_ep.time_of_secret_change;
+	memcpy(new_inp->sctp_ep.secret_key, old_inp->sctp_ep.secret_key,
+	    sizeof(old_inp->sctp_ep.secret_key));
+	new_inp->sctp_ep.current_secret_number =
+	    old_inp->sctp_ep.current_secret_number;
+	new_inp->sctp_ep.last_secret_number =
+	    old_inp->sctp_ep.last_secret_number;
+	new_inp->sctp_ep.size_of_a_cookie = old_inp->sctp_ep.size_of_a_cookie;
+
+	/* make it so new data pours into the new socket */
+	stcb->sctp_socket = new_inp->sctp_socket;
+	stcb->sctp_ep = new_inp;
+
+	/* Copy the port across */
+	lport = new_inp->sctp_lport = old_inp->sctp_lport;
+	rport = stcb->rport;
+	/* Pull the tcb from the old association */
+	LIST_REMOVE(stcb, sctp_tcbhash);
+	LIST_REMOVE(stcb, sctp_tcblist);
+	if (stcb->asoc.in_asocid_hash) {
+		LIST_REMOVE(stcb, sctp_tcbasocidhash);
+	}
+	/* Now insert the new_inp into the TCP connected hash */
+	head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR((lport | rport), SCTP_BASE_INFO(hashtcpmark))];
+
+	LIST_INSERT_HEAD(head, new_inp, sctp_hash);
+	/* Its safe to access */
+	new_inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND;
+
+	/* Now move the tcb into the endpoint list */
+	LIST_INSERT_HEAD(&new_inp->sctp_asoc_list, stcb, sctp_tcblist);
+	/*
+	 * Question, do we even need to worry about the ep-hash since we
+	 * only have one connection? Probably not :> so lets get rid of it
+	 * and not suck up any kernel memory in that.
+	 */
+	if (stcb->asoc.in_asocid_hash) {
+		struct sctpasochead *lhd;
+		lhd = &new_inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id,
+			new_inp->hashasocidmark)];
+		LIST_INSERT_HEAD(lhd, stcb, sctp_tcbasocidhash);
+	}
+	/* Ok. Let's restart timer. */
+	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+		sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, new_inp,
+		    stcb, net);
+	}
+
+	SCTP_INP_INFO_WUNLOCK();
+	if (new_inp->sctp_tcbhash != NULL) {
+		SCTP_HASH_FREE(new_inp->sctp_tcbhash, new_inp->sctp_hashmark);
+		new_inp->sctp_tcbhash = NULL;
+	}
+	if ((new_inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+		/* Subset bound, so copy in the laddr list from the old_inp */
+		LIST_FOREACH(oladdr, &old_inp->sctp_addr_list, sctp_nxt_addr) {
+			laddr = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+			if (laddr == NULL) {
+				/*
+				 * Gak, what can we do? This assoc is really
+				 * HOSED. We probably should send an abort
+				 * here.
+				 */
+				SCTPDBG(SCTP_DEBUG_PCB1, "Association hosed in TCP model, out of laddr memory\n");
+				continue;
+			}
+			SCTP_INCR_LADDR_COUNT();
+			bzero(laddr, sizeof(*laddr));
+			(void)SCTP_GETTIME_TIMEVAL(&laddr->start_time);
+			laddr->ifa = oladdr->ifa;
+			atomic_add_int(&laddr->ifa->refcount, 1);
+			LIST_INSERT_HEAD(&new_inp->sctp_addr_list, laddr,
+			    sctp_nxt_addr);
+			new_inp->laddr_count++;
+			if (oladdr == stcb->asoc.last_used_address) {
+				stcb->asoc.last_used_address = laddr;
+			}
+		}
+	}
+	/* Now any running timers need to be adjusted
+	 * since we really don't care if they are running
+	 * or not just blast in the new_inp into all of
+	 * them.
+	 */
+
+	stcb->asoc.dack_timer.ep = (void *)new_inp;
+	stcb->asoc.asconf_timer.ep = (void *)new_inp;
+	stcb->asoc.strreset_timer.ep = (void *)new_inp;
+	stcb->asoc.shut_guard_timer.ep = (void *)new_inp;
+	stcb->asoc.autoclose_timer.ep = (void *)new_inp;
+	stcb->asoc.delayed_event_timer.ep = (void *)new_inp;
+	stcb->asoc.delete_prim_timer.ep = (void *)new_inp;
+	/* now what about the nets? */
+	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+		net->pmtu_timer.ep = (void *)new_inp;
+		net->hb_timer.ep = (void *)new_inp;
+		net->rxt_timer.ep = (void *)new_inp;
+	}
+	SCTP_INP_WUNLOCK(new_inp);
+	SCTP_INP_WUNLOCK(old_inp);
+}
+
+/*
+ * insert an laddr entry with the given ifa for the desired list
+ */
+static int
+sctp_insert_laddr(struct sctpladdr *list, struct sctp_ifa *ifa, uint32_t act)
+{
+	struct sctp_laddr *laddr;
+
+	laddr = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+	if (laddr == NULL) {
+		/* out of memory? */
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+		return (EINVAL);
+	}
+	SCTP_INCR_LADDR_COUNT();
+	bzero(laddr, sizeof(*laddr));
+	(void)SCTP_GETTIME_TIMEVAL(&laddr->start_time);
+	laddr->ifa = ifa;
+	laddr->action = act;
+	atomic_add_int(&ifa->refcount, 1);
+	/* insert it */
+	LIST_INSERT_HEAD(list, laddr, sctp_nxt_addr);
+
+	return (0);
+}
+
+/*
+ * Remove an laddr entry from the local address list (on an assoc)
+ */
+static void
+sctp_remove_laddr(struct sctp_laddr *laddr)
+{
+
+	/* remove from the list */
+	LIST_REMOVE(laddr, sctp_nxt_addr);
+	sctp_free_ifa(laddr->ifa);
+	SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), laddr);
+	SCTP_DECR_LADDR_COUNT();
+}
+
+#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__))
+/*
+ * Don't know why, but without this there is an unknown reference when
+ * compiling NetBSD... hmm
+ */
+extern void in6_sin6_2_sin(struct sockaddr_in *, struct sockaddr_in6 *sin6);
+#endif
+
+
+/* sctp_ifap is used to bypass normal local address validation checks */
+int
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
+                struct sctp_ifa *sctp_ifap, struct thread *p)
+#elif defined(__Windows__)
+sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
+                struct sctp_ifa *sctp_ifap, PKTHREAD p)
+#else
+sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
+                struct sctp_ifa *sctp_ifap, struct proc *p)
+#endif
+{
+	/* bind a ep to a socket address */
+	struct sctppcbhead *head;
+	struct sctp_inpcb *inp, *inp_tmp;
+#if defined(INET) || (defined(INET6) && defined(__APPLE__)) || defined(__FreeBSD__) || defined(__APPLE__)
+	struct inpcb *ip_inp;
+#endif
+	int port_reuse_active = 0;
+	int bindall;
+#ifdef SCTP_MVRF
+	int i;
+#endif
+	uint16_t lport;
+	int error;
+	uint32_t vrf_id;
+
+	lport = 0;
+	bindall = 1;
+	inp = (struct sctp_inpcb *)so->so_pcb;
+#if defined(INET) || (defined(INET6) && defined(__APPLE__)) || defined(__FreeBSD__) || defined(__APPLE__)
+	ip_inp = (struct inpcb *)so->so_pcb;
+#endif
+#ifdef SCTP_DEBUG
+	if (addr) {
+		SCTPDBG(SCTP_DEBUG_PCB1, "Bind called port: %d\n",
+			ntohs(((struct sockaddr_in *)addr)->sin_port));
+		SCTPDBG(SCTP_DEBUG_PCB1, "Addr: ");
+		SCTPDBG_ADDR(SCTP_DEBUG_PCB1, addr);
+	}
+#endif
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
+		/* already did a bind, subsequent binds NOT allowed ! */
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+		return (EINVAL);
+	}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+#ifdef INVARIANTS
+	if (p == NULL)
+		panic("null proc/thread");
+#endif
+#endif
+	if (addr != NULL) {
+		switch (addr->sa_family) {
+#ifdef INET
+		case AF_INET:
+		{
+			struct sockaddr_in *sin;
+
+			/* IPV6_V6ONLY socket? */
+			if (SCTP_IPV6_V6ONLY(ip_inp)) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+				return (EINVAL);
+			}
+#ifdef HAVE_SA_LEN
+			if (addr->sa_len != sizeof(*sin)) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+				return (EINVAL);
+			}
+#endif
+
+			sin = (struct sockaddr_in *)addr;
+			lport = sin->sin_port;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+			/*
+			 * For LOOPBACK the prison_local_ip4() call will transmute the ip address
+			 * to the proper value.
+			 */
+			if (p && (error = prison_local_ip4(p->td_ucred, &sin->sin_addr)) != 0) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error);
+				return (error);
+			}
+#endif
+			if (sin->sin_addr.s_addr != INADDR_ANY) {
+				bindall = 0;
+			}
+			break;
+		}
+#endif
+#ifdef INET6
+		case AF_INET6:
+		{
+			/* Only for pure IPv6 Address. (No IPv4 Mapped!) */
+			struct sockaddr_in6 *sin6;
+
+			sin6 = (struct sockaddr_in6 *)addr;
+
+#ifdef HAVE_SA_LEN
+			if (addr->sa_len != sizeof(*sin6)) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+				return (EINVAL);
+			}
+#endif
+			lport = sin6->sin6_port;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+			/*
+			 * For LOOPBACK the prison_local_ip6() call will transmute the ipv6 address
+			 * to the proper value.
+			 */
+			if (p && (error = prison_local_ip6(p->td_ucred, &sin6->sin6_addr,
+			    (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error);
+				return (error);
+			}
+#endif
+			if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+				bindall = 0;
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+				/* KAME hack: embed scopeid */
+#if defined(SCTP_KAME)
+				if (sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)) != 0) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+					return (EINVAL);
+				}
+#elif defined(__APPLE__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+				if (in6_embedscope(&sin6->sin6_addr, sin6, ip_inp, NULL) != 0) {
+#else
+				if (in6_embedscope(&sin6->sin6_addr, sin6, ip_inp, NULL, NULL) != 0) {
+#endif
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+					return (EINVAL);
+				}
+#elif defined(__FreeBSD__)
+				error = scope6_check_id(sin6, MODULE_GLOBAL(ip6_use_defzone));
+				if (error != 0) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error);
+					return (error);
+				}
+#else
+				if (in6_embedscope(&sin6->sin6_addr, sin6) != 0) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+					return (EINVAL);
+				}
+#endif
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+			}
+#ifndef SCOPEDROUTING
+			/* this must be cleared for ifa_ifwithaddr() */
+			sin6->sin6_scope_id = 0;
+#endif /* SCOPEDROUTING */
+			break;
+		}
+#endif
+#if defined(__Userspace__)
+		case AF_CONN:
+		{
+			struct sockaddr_conn *sconn;
+
+#ifdef HAVE_SA_LEN
+			if (addr->sa_len != sizeof(struct sockaddr_conn)) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+				return (EINVAL);
+			}
+#endif
+			sconn = (struct sockaddr_conn *)addr;
+			lport = sconn->sconn_port;
+			if (sconn->sconn_addr != NULL) {
+				bindall = 0;
+			}
+			break;
+		}
+#endif
+		default:
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EAFNOSUPPORT);
+			return (EAFNOSUPPORT);
+		}
+	}
+	SCTP_INP_INFO_WLOCK();
+	SCTP_INP_WLOCK(inp);
+	/* Setup a vrf_id to be the default for the non-bind-all case. */
+	vrf_id = inp->def_vrf_id;
+
+	/* increase our count due to the unlock we do */
+	SCTP_INP_INCR_REF(inp);
+	if (lport) {
+		/*
+		 * Did the caller specify a port? if so we must see if an ep
+		 * already has this one bound.
+		 */
+		/* got to be root to get at low ports */
+#if !defined(__Windows__)
+		if (ntohs(lport) < IPPORT_RESERVED) {
+			if (p && (error =
+#ifdef __FreeBSD__
+#if __FreeBSD_version > 602000
+				  priv_check(p, PRIV_NETINET_RESERVEDPORT)
+#elif __FreeBSD_version >= 500000
+				  suser_cred(p->td_ucred, 0)
+#else
+				  suser(p)
+#endif
+#elif defined(__APPLE__)
+				  suser(p->p_ucred, &p->p_acflag)
+#elif defined(__Userspace__) /* must be true to use raw socket */
+				  1
+#else
+				  suser(p, 0)
+#endif
+				    )) {
+				SCTP_INP_DECR_REF(inp);
+				SCTP_INP_WUNLOCK(inp);
+				SCTP_INP_INFO_WUNLOCK();
+				return (error);
+			}
+#if defined(__Panda__)
+			if (!SCTP_IS_PRIVILEDGED(so)) {
+				SCTP_INP_DECR_REF(inp);
+				SCTP_INP_WUNLOCK(inp);
+				SCTP_INP_INFO_WUNLOCK();
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EACCES);
+				return (EACCES);
+			}
+#endif
+		}
+#endif /* __Windows__ */
+		SCTP_INP_WUNLOCK(inp);
+		if (bindall) {
+#ifdef SCTP_MVRF
+			for (i = 0; i < inp->num_vrfs; i++) {
+				vrf_id = inp->m_vrf_ids[i];
+#else
+				vrf_id = inp->def_vrf_id;
+#endif
+				inp_tmp = sctp_pcb_findep(addr, 0, 1, vrf_id);
+				if (inp_tmp != NULL) {
+					/*
+					 * lock guy returned and lower count
+					 * note that we are not bound so
+					 * inp_tmp should NEVER be inp. And
+					 * it is this inp (inp_tmp) that gets
+					 * the reference bump, so we must
+					 * lower it.
+					 */
+					SCTP_INP_DECR_REF(inp_tmp);
+					/* unlock info */
+					if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
+					    (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) {
+						/* Ok, must be one-2-one and allowing port re-use */
+						port_reuse_active = 1;
+						goto continue_anyway;
+					}
+					SCTP_INP_DECR_REF(inp);
+					SCTP_INP_INFO_WUNLOCK();
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE);
+					return (EADDRINUSE);
+				}
+#ifdef SCTP_MVRF
+			}
+#endif
+		} else {
+			inp_tmp = sctp_pcb_findep(addr, 0, 1, vrf_id);
+			if (inp_tmp != NULL) {
+				/*
+				 * lock guy returned and lower count note
+				 * that we are not bound so inp_tmp should
+				 * NEVER be inp. And it is this inp (inp_tmp)
+				 * that gets the reference bump, so we must
+				 * lower it.
+				 */
+				SCTP_INP_DECR_REF(inp_tmp);
+				/* unlock info */
+				if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
+				    (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) {
+					/* Ok, must be one-2-one and allowing port re-use */
+					port_reuse_active = 1;
+					goto continue_anyway;
+				}
+				SCTP_INP_DECR_REF(inp);
+				SCTP_INP_INFO_WUNLOCK();
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE);
+				return (EADDRINUSE);
+			}
+		}
+	continue_anyway:
+		SCTP_INP_WLOCK(inp);
+		if (bindall) {
+			/* verify that no lport is not used by a singleton */
+			if ((port_reuse_active == 0) &&
+			    (inp_tmp = sctp_isport_inuse(inp, lport, vrf_id))) {
+				/* Sorry someone already has this one bound */
+				if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
+				    (sctp_is_feature_on(inp_tmp, SCTP_PCB_FLAGS_PORTREUSE))) {
+					port_reuse_active = 1;
+				} else {
+					SCTP_INP_DECR_REF(inp);
+					SCTP_INP_WUNLOCK(inp);
+					SCTP_INP_INFO_WUNLOCK();
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE);
+					return (EADDRINUSE);
+				}
+			}
+		}
+	} else {
+		uint16_t first, last, candidate;
+		uint16_t count;
+		int done;
+
+#if defined(__Windows__)
+		first = 1;
+		last = 0xffff;
+#else
+#if defined(__Userspace__)
+		/* TODO ensure uid is 0, etc... */
+#elif defined(__FreeBSD__) || defined(__APPLE__)
+		if (ip_inp->inp_flags & INP_HIGHPORT) {
+			first = MODULE_GLOBAL(ipport_hifirstauto);
+			last = MODULE_GLOBAL(ipport_hilastauto);
+		} else if (ip_inp->inp_flags & INP_LOWPORT) {
+			if (p && (error =
+#ifdef __FreeBSD__
+#if __FreeBSD_version > 602000
+				  priv_check(p, PRIV_NETINET_RESERVEDPORT)
+#elif __FreeBSD_version >= 500000
+				  suser_cred(p->td_ucred, 0)
+#else
+				  suser(p)
+#endif
+#elif defined(__APPLE__)
+				  suser(p->p_ucred, &p->p_acflag)
+#else
+				  suser(p, 0)
+#endif
+				    )) {
+				SCTP_INP_DECR_REF(inp);
+				SCTP_INP_WUNLOCK(inp);
+				SCTP_INP_INFO_WUNLOCK();
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, error);
+				return (error);
+			}
+			first = MODULE_GLOBAL(ipport_lowfirstauto);
+			last = MODULE_GLOBAL(ipport_lowlastauto);
+		} else {
+#endif
+			first = MODULE_GLOBAL(ipport_firstauto);
+			last = MODULE_GLOBAL(ipport_lastauto);
+#if defined(__FreeBSD__) || defined(__APPLE__)
+		}
+#endif
+#endif /* __Windows__ */
+		if (first > last) {
+			uint16_t temp;
+
+			temp = first;
+			first = last;
+			last = temp;
+		}
+		count = last - first + 1; /* number of candidates */
+		candidate = first + sctp_select_initial_TSN(&inp->sctp_ep) % (count);
+
+		done = 0;
+		while (!done) {
+#ifdef SCTP_MVRF
+			for (i = 0; i < inp->num_vrfs; i++) {
+				if (sctp_isport_inuse(inp, htons(candidate), inp->m_vrf_ids[i]) != NULL) {
+					break;
+				}
+			}
+			if (i == inp->num_vrfs) {
+				done = 1;
+			}
+#else
+			if (sctp_isport_inuse(inp, htons(candidate), inp->def_vrf_id) == NULL) {
+				done = 1;
+			}
+#endif
+			if (!done) {
+				if (--count == 0) {
+					SCTP_INP_DECR_REF(inp);
+					SCTP_INP_WUNLOCK(inp);
+					SCTP_INP_INFO_WUNLOCK();
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRINUSE);
+					return (EADDRINUSE);
+				}
+				if (candidate == last)
+					candidate = first;
+				else
+					candidate = candidate + 1;
+			}
+		}
+		lport = htons(candidate);
+	}
+	SCTP_INP_DECR_REF(inp);
+	if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE |
+			       SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+		/*
+		 * this really should not happen. The guy did a non-blocking
+		 * bind and then did a close at the same time.
+		 */
+		SCTP_INP_WUNLOCK(inp);
+		SCTP_INP_INFO_WUNLOCK();
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+		return (EINVAL);
+	}
+	/* ok we look clear to give out this port, so lets setup the binding */
+	if (bindall) {
+		/* binding to all addresses, so just set in the proper flags */
+		inp->sctp_flags |= SCTP_PCB_FLAGS_BOUNDALL;
+		/* set the automatic addr changes from kernel flag */
+		if (SCTP_BASE_SYSCTL(sctp_auto_asconf) == 0) {
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF);
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
+		} else {
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF);
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
+		}
+		if (SCTP_BASE_SYSCTL(sctp_multiple_asconfs) == 0) {
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS);
+		} else {
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_MULTIPLE_ASCONFS);
+		}
+		/* set the automatic mobility_base from kernel
+		   flag (by micchie)
+		*/
+		if (SCTP_BASE_SYSCTL(sctp_mobility_base) == 0) {
+			sctp_mobility_feature_off(inp, SCTP_MOBILITY_BASE);
+			sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+		} else {
+			sctp_mobility_feature_on(inp, SCTP_MOBILITY_BASE);
+			sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+		}
+		/* set the automatic mobility_fasthandoff from kernel
+		   flag (by micchie)
+		*/
+		if (SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff) == 0) {
+			sctp_mobility_feature_off(inp, SCTP_MOBILITY_FASTHANDOFF);
+			sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+		} else {
+			sctp_mobility_feature_on(inp, SCTP_MOBILITY_FASTHANDOFF);
+			sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+		}
+	} else {
+		/*
+		 * bind specific, make sure flags is off and add a new
+		 * address structure to the sctp_addr_list inside the ep
+		 * structure.
+		 *
+		 * We will need to allocate one and insert it at the head. The
+		 * socketopt call can just insert new addresses in there as
+		 * well. It will also have to do the embed scope kame hack
+		 * too (before adding).
+		 */
+		struct sctp_ifa *ifa;
+		union sctp_sockstore store;
+
+		memset(&store, 0, sizeof(store));
+		switch (addr->sa_family) {
+#ifdef INET
+		case AF_INET:
+			memcpy(&store.sin, addr, sizeof(struct sockaddr_in));
+			store.sin.sin_port = 0;
+			break;
+#endif
+#ifdef INET6
+		case AF_INET6:
+			memcpy(&store.sin6, addr, sizeof(struct sockaddr_in6));
+			store.sin6.sin6_port = 0;
+			break;
+#endif
+#if defined(__Userspace__)
+		case AF_CONN:
+			memcpy(&store.sconn, addr, sizeof(struct sockaddr_conn));
+			store.sconn.sconn_port = 0;
+			break;
+#endif
+		default:
+			break;
+		}
+		/*
+		 * first find the interface with the bound address need to
+		 * zero out the port to find the address! yuck! can't do
+		 * this earlier since need port for sctp_pcb_findep()
+		 */
+		if (sctp_ifap != NULL) {
+			ifa = sctp_ifap;
+		} else {
+			/* Note for BSD we hit here always other
+			 * O/S's will pass things in via the
+			 * sctp_ifap argument (Panda).
+			 */
+			ifa = sctp_find_ifa_by_addr(&store.sa,
+						    vrf_id, SCTP_ADDR_NOT_LOCKED);
+		}
+		if (ifa == NULL) {
+			/* Can't find an interface with that address */
+			SCTP_INP_WUNLOCK(inp);
+			SCTP_INP_INFO_WUNLOCK();
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EADDRNOTAVAIL);
+			return (EADDRNOTAVAIL);
+		}
+#ifdef INET6
+		if (addr->sa_family == AF_INET6) {
+			/* GAK, more FIXME IFA lock? */
+			if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+				/* Can't bind a non-existent addr. */
+				SCTP_INP_WUNLOCK(inp);
+				SCTP_INP_INFO_WUNLOCK();
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+				return (EINVAL);
+			}
+		}
+#endif
+		/* we're not bound all */
+		inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUNDALL;
+		/* allow bindx() to send ASCONF's for binding changes */
+		sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF);
+		/* clear automatic addr changes from kernel flag */
+		sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
+
+		/* add this address to the endpoint list */
+		error = sctp_insert_laddr(&inp->sctp_addr_list, ifa, 0);
+		if (error != 0) {
+			SCTP_INP_WUNLOCK(inp);
+			SCTP_INP_INFO_WUNLOCK();
+			return (error);
+		}
+		inp->laddr_count++;
+	}
+	/* find the bucket */
+	if (port_reuse_active) {
+		/* Put it into tcp 1-2-1 hash */
+		head = &SCTP_BASE_INFO(sctp_tcpephash)[SCTP_PCBHASH_ALLADDR(lport, SCTP_BASE_INFO(hashtcpmark))];
+		inp->sctp_flags |= SCTP_PCB_FLAGS_IN_TCPPOOL;
+	} else {
+		head = &SCTP_BASE_INFO(sctp_ephash)[SCTP_PCBHASH_ALLADDR(lport, SCTP_BASE_INFO(hashmark))];
+	}
+	/* put it in the bucket */
+	LIST_INSERT_HEAD(head, inp, sctp_hash);
+	SCTPDBG(SCTP_DEBUG_PCB1, "Main hash to bind at head:%p, bound port:%d - in tcp_pool=%d\n",
+		(void *)head, ntohs(lport), port_reuse_active);
+	/* set in the port */
+	inp->sctp_lport = lport;
+
+	/* turn off just the unbound flag */
+	inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND;
+	SCTP_INP_WUNLOCK(inp);
+	SCTP_INP_INFO_WUNLOCK();
+	return (0);
+}
+
+
+static void
+sctp_iterator_inp_being_freed(struct sctp_inpcb *inp)
+{
+	struct sctp_iterator *it, *nit;
+
+	/*
+	 * We enter with the only the ITERATOR_LOCK in place and a write
+	 * lock on the inp_info stuff.
+	 */
+	it = sctp_it_ctl.cur_it;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+	if (it && (it->vn != curvnet)) {
+		/* Its not looking at our VNET */
+		return;
+	}
+#endif
+	if (it && (it->inp == inp)) {
+		/*
+		 * This is tricky and we hold the iterator lock,
+		 * but when it returns and gets the lock (when we
+		 * release it) the iterator will try to operate on
+		 * inp. We need to stop that from happening. But
+		 * of course the iterator has a reference on the
+		 * stcb and inp. We can mark it and it will stop.
+		 *
+		 * If its a single iterator situation, we
+		 * set the end iterator flag. Otherwise
+		 * we set the iterator to go to the next inp.
+		 *
+		 */
+		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
+			sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_IT;
+		} else {
+			sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_INP;
+		}
+	}
+	/* Now go through and remove any single reference to
+	 * our inp that may be still pending on the list
+	 */
+	SCTP_IPI_ITERATOR_WQ_LOCK();
+	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+		if (it->vn != curvnet) {
+			continue;
+		}
+#endif
+		if (it->inp == inp) {
+			/* This one points to me is it inp specific? */
+			if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
+				/* Remove and free this one */
+				TAILQ_REMOVE(&sctp_it_ctl.iteratorhead,
+				    it, sctp_nxt_itr);
+				if (it->function_atend != NULL) {
+					(*it->function_atend) (it->pointer, it->val);
+				}
+				SCTP_FREE(it, SCTP_M_ITER);
+			} else {
+				it->inp = LIST_NEXT(it->inp, sctp_list);
+				if (it->inp) {
+					SCTP_INP_INCR_REF(it->inp);
+				}
+			}
+			/* When its put in the refcnt is incremented so decr it */
+			SCTP_INP_DECR_REF(inp);
+		}
+	}
+	SCTP_IPI_ITERATOR_WQ_UNLOCK();
+}
+
+/* release sctp_inpcb unbind the port */
+void
+sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
+{
+	/*
+	 * Here we free a endpoint. We must find it (if it is in the Hash
+	 * table) and remove it from there. Then we must also find it in the
+	 * overall list and remove it from there. After all removals are
+	 * complete then any timer has to be stopped. Then start the actual
+	 * freeing. a) Any local lists. b) Any associations. c) The hash of
+	 * all associations. d) finally the ep itself.
+	 */
+	struct sctp_tcb *asoc, *nasoc;
+	struct sctp_laddr *laddr, *nladdr;
+	struct inpcb *ip_pcb;
+	struct socket *so;
+	int being_refed = 0;
+	struct sctp_queued_to_read *sq, *nsq;
+#if !defined(__Panda__) && !defined(__Userspace__)
+#if !defined(__FreeBSD__) || __FreeBSD_version < 500000
+	sctp_rtentry_t *rt;
+#endif
+#endif
+	int cnt;
+	sctp_sharedkey_t *shared_key, *nshared_key;
+
+
+#if defined(__APPLE__)
+	sctp_lock_assert(SCTP_INP_SO(inp));
+#endif
+#ifdef SCTP_LOG_CLOSING
+	sctp_log_closing(inp, NULL, 0);
+#endif
+	SCTP_ITERATOR_LOCK();
+	/* mark any iterators on the list or being processed */
+	sctp_iterator_inp_being_freed(inp);
+	SCTP_ITERATOR_UNLOCK();
+	so = inp->sctp_socket;
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+		/* been here before.. eeks.. get out of here */
+		SCTP_PRINTF("This conflict in free SHOULD not be happening! from %d, imm %d\n", from, immediate);
+#ifdef SCTP_LOG_CLOSING
+		sctp_log_closing(inp, NULL, 1);
+#endif
+		return;
+	}
+	SCTP_ASOC_CREATE_LOCK(inp);
+	SCTP_INP_INFO_WLOCK();
+
+	SCTP_INP_WLOCK(inp);
+	if (from == SCTP_CALLED_AFTER_CMPSET_OFCLOSE) {
+		inp->sctp_flags &= ~SCTP_PCB_FLAGS_CLOSE_IP;
+		/* socket is gone, so no more wakeups allowed */
+		inp->sctp_flags |= SCTP_PCB_FLAGS_DONT_WAKE;
+		inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
+		inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
+
+	}
+	/* First time through we have the socket lock, after that no more. */
+	sctp_timer_stop(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL,
+			SCTP_FROM_SCTP_PCB + SCTP_LOC_1);
+
+	if (inp->control) {
+		sctp_m_freem(inp->control);
+		inp->control = NULL;
+	}
+	if (inp->pkt) {
+		sctp_m_freem(inp->pkt);
+		inp->pkt = NULL;
+	}
+	ip_pcb = &inp->ip_inp.inp;	/* we could just cast the main pointer
+					 * here but I will be nice :> (i.e.
+					 * ip_pcb = ep;) */
+	if (immediate == SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE) {
+		int cnt_in_sd;
+
+		cnt_in_sd = 0;
+		LIST_FOREACH_SAFE(asoc, &inp->sctp_asoc_list, sctp_tcblist, nasoc) {
+			SCTP_TCB_LOCK(asoc);
+			if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+				/* Skip guys being freed */
+				cnt_in_sd++;
+				if (asoc->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE) {
+					/*
+					 * Special case - we did not start a kill
+					 * timer on the asoc due to it was not
+					 * closed. So go ahead and start it now.
+					 */
+					asoc->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE;
+					sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, asoc, NULL);
+				}
+				SCTP_TCB_UNLOCK(asoc);
+				continue;
+			}
+			if (((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_WAIT) ||
+			    (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_ECHOED)) &&
+			    (asoc->asoc.total_output_queue_size == 0)) {
+				/* If we have data in queue, we don't want to just
+				 * free since the app may have done, send()/close
+				 * or connect/send/close. And it wants the data
+				 * to get across first.
+				 */
+				/* Just abandon things in the front states */
+				if (sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE,
+						   SCTP_FROM_SCTP_PCB + SCTP_LOC_2) == 0) {
+					cnt_in_sd++;
+				}
+				continue;
+			}
+			/* Disconnect the socket please */
+			asoc->sctp_socket = NULL;
+			asoc->asoc.state |= SCTP_STATE_CLOSED_SOCKET;
+			if ((asoc->asoc.size_on_reasm_queue > 0) ||
+			    (asoc->asoc.control_pdapi) ||
+			    (asoc->asoc.size_on_all_streams > 0) ||
+			    (so && (so->so_rcv.sb_cc > 0))) {
+				/* Left with Data unread */
+				struct mbuf *op_err;
+
+				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
+				asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_3;
+				sctp_send_abort_tcb(asoc, op_err, SCTP_SO_LOCKED);
+				SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+				if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
+				    (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+				}
+				if (sctp_free_assoc(inp, asoc,
+						    SCTP_PCBFREE_NOFORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_4) == 0) {
+					cnt_in_sd++;
+				}
+				continue;
+			} else if (TAILQ_EMPTY(&asoc->asoc.send_queue) &&
+			           TAILQ_EMPTY(&asoc->asoc.sent_queue) &&
+			           (asoc->asoc.stream_queue_cnt == 0)) {
+				if ((*asoc->asoc.ss_functions.sctp_ss_is_user_msgs_incomplete)(asoc, &asoc->asoc)) {
+					goto abort_anyway;
+				}
+				if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+				    (SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+					struct sctp_nets *netp;
+
+					/*
+					 * there is nothing queued to send,
+					 * so I send shutdown
+					 */
+					if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
+					    (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+					}
+					SCTP_SET_STATE(&asoc->asoc, SCTP_STATE_SHUTDOWN_SENT);
+					SCTP_CLEAR_SUBSTATE(&asoc->asoc, SCTP_STATE_SHUTDOWN_PENDING);
+					sctp_stop_timers_for_shutdown(asoc);
+					if (asoc->asoc.alternate) {
+						netp = asoc->asoc.alternate;
+					} else {
+						netp = asoc->asoc.primary_destination;
+					}
+					sctp_send_shutdown(asoc, netp);
+					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, asoc->sctp_ep, asoc,
+					    netp);
+					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc,
+					    asoc->asoc.primary_destination);
+					sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_LOCKED);
+				}
+			} else {
+				/* mark into shutdown pending */
+				asoc->asoc.state |= SCTP_STATE_SHUTDOWN_PENDING;
+				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc,
+						 asoc->asoc.primary_destination);
+				if ((*asoc->asoc.ss_functions.sctp_ss_is_user_msgs_incomplete)(asoc, &asoc->asoc)) {
+					asoc->asoc.state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+				}
+				if (TAILQ_EMPTY(&asoc->asoc.send_queue) &&
+				    TAILQ_EMPTY(&asoc->asoc.sent_queue) &&
+				    (asoc->asoc.state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+					struct mbuf *op_err;
+				abort_anyway:
+					op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
+					asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_5;
+					sctp_send_abort_tcb(asoc, op_err, SCTP_SO_LOCKED);
+					SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+					if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
+					    (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+					}
+					if (sctp_free_assoc(inp, asoc,
+							    SCTP_PCBFREE_NOFORCE,
+							    SCTP_FROM_SCTP_PCB + SCTP_LOC_6) == 0) {
+						cnt_in_sd++;
+					}
+					continue;
+				} else {
+					sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
+				}
+			}
+			cnt_in_sd++;
+			SCTP_TCB_UNLOCK(asoc);
+		}
+		/* now is there some left in our SHUTDOWN state? */
+		if (cnt_in_sd) {
+#ifdef SCTP_LOG_CLOSING
+			sctp_log_closing(inp, NULL, 2);
+#endif
+			inp->sctp_socket = NULL;
+			SCTP_INP_WUNLOCK(inp);
+			SCTP_ASOC_CREATE_UNLOCK(inp);
+			SCTP_INP_INFO_WUNLOCK();
+			return;
+		}
+	}
+	inp->sctp_socket = NULL;
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) !=
+	    SCTP_PCB_FLAGS_UNBOUND) {
+		/*
+		 * ok, this guy has been bound. It's port is
+		 * somewhere in the SCTP_BASE_INFO(hash table). Remove
+		 * it!
+		 */
+		LIST_REMOVE(inp, sctp_hash);
+		inp->sctp_flags |= SCTP_PCB_FLAGS_UNBOUND;
+	}
+
+	/* If there is a timer running to kill us,
+	 * forget it, since it may have a contest
+	 * on the INP lock.. which would cause us
+	 * to die ...
+	 */
+	cnt = 0;
+	LIST_FOREACH_SAFE(asoc, &inp->sctp_asoc_list, sctp_tcblist, nasoc) {
+		SCTP_TCB_LOCK(asoc);
+		if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+			if (asoc->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE) {
+				asoc->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE;
+				sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, asoc, NULL);
+			}
+		        cnt++;
+			SCTP_TCB_UNLOCK(asoc);
+			continue;
+		}
+		/* Free associations that are NOT killing us */
+		if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_COOKIE_WAIT) &&
+		    ((asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) {
+			struct mbuf *op_err;
+
+			op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
+			asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_7;
+			sctp_send_abort_tcb(asoc, op_err, SCTP_SO_LOCKED);
+			SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+		} else if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+			cnt++;
+			SCTP_TCB_UNLOCK(asoc);
+			continue;
+		}
+		if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
+		    (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+		}
+		if (sctp_free_assoc(inp, asoc, SCTP_PCBFREE_FORCE,
+		                    SCTP_FROM_SCTP_PCB + SCTP_LOC_8) == 0) {
+			cnt++;
+		}
+	}
+	if (cnt) {
+		/* Ok we have someone out there that will kill us */
+		(void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer);
+#ifdef SCTP_LOG_CLOSING
+		sctp_log_closing(inp, NULL, 3);
+#endif
+		SCTP_INP_WUNLOCK(inp);
+		SCTP_ASOC_CREATE_UNLOCK(inp);
+		SCTP_INP_INFO_WUNLOCK();
+		return;
+	}
+	if (SCTP_INP_LOCK_CONTENDED(inp))
+		being_refed++;
+	if (SCTP_INP_READ_CONTENDED(inp))
+		being_refed++;
+	if (SCTP_ASOC_CREATE_LOCK_CONTENDED(inp))
+		being_refed++;
+
+	if ((inp->refcount) ||
+	    (being_refed) ||
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_CLOSE_IP)) {
+		(void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer);
+#ifdef SCTP_LOG_CLOSING
+		sctp_log_closing(inp, NULL, 4);
+#endif
+		sctp_timer_start(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
+		SCTP_INP_WUNLOCK(inp);
+		SCTP_ASOC_CREATE_UNLOCK(inp);
+		SCTP_INP_INFO_WUNLOCK();
+		return;
+	}
+	inp->sctp_ep.signature_change.type = 0;
+	inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_ALLGONE;
+	/* Remove it from the list .. last thing we need a
+	 * lock for.
+	 */
+	LIST_REMOVE(inp, sctp_list);
+	SCTP_INP_WUNLOCK(inp);
+	SCTP_ASOC_CREATE_UNLOCK(inp);
+	SCTP_INP_INFO_WUNLOCK();
+	/* Now we release all locks. Since this INP
+	 * cannot be found anymore except possibly by the
+	 * kill timer that might be running. We call
+	 * the drain function here. It should hit the case
+	 * were it sees the ACTIVE flag cleared and exit
+	 * out freeing us to proceed and destroy everything.
+	 */
+	if (from != SCTP_CALLED_FROM_INPKILL_TIMER) {
+		(void)SCTP_OS_TIMER_STOP_DRAIN(&inp->sctp_ep.signature_change.timer);
+	} else {
+		/* Probably un-needed */
+		(void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer);
+	}
+
+#ifdef SCTP_LOG_CLOSING
+	sctp_log_closing(inp, NULL, 5);
+#endif
+
+#if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__))
+#if !defined(__FreeBSD__) || __FreeBSD_version < 500000
+	rt = ip_pcb->inp_route.ro_rt;
+#endif
+#endif
+
+#if defined(__Panda__)
+	if (inp->pak_to_read) {
+		(void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.zero_copy_timer.timer);
+		SCTP_RELEASE_PKT(inp->pak_to_read);
+		inp->pak_to_read = NULL;
+	}
+	if (inp->pak_to_read_sendq) {
+		(void)SCTP_OS_TIMER_STOP(&inp->sctp_ep.zero_copy_sendq_timer.timer);
+		SCTP_RELEASE_PKT(inp->pak_to_read_sendq);
+		inp->pak_to_read_sendq = NULL;
+	}
+#endif
+	if ((inp->sctp_asocidhash) != NULL) {
+		SCTP_HASH_FREE(inp->sctp_asocidhash, inp->hashasocidmark);
+		inp->sctp_asocidhash = NULL;
+	}
+	/*sa_ignore FREED_MEMORY*/
+	TAILQ_FOREACH_SAFE(sq, &inp->read_queue, next, nsq) {
+		/* Its only abandoned if it had data left */
+		if (sq->length)
+			SCTP_STAT_INCR(sctps_left_abandon);
+
+		TAILQ_REMOVE(&inp->read_queue, sq, next);
+		sctp_free_remote_addr(sq->whoFrom);
+		if (so)
+			so->so_rcv.sb_cc -= sq->length;
+		if (sq->data) {
+			sctp_m_freem(sq->data);
+			sq->data = NULL;
+		}
+		/*
+		 * no need to free the net count, since at this point all
+		 * assoc's are gone.
+		 */
+		sctp_free_a_readq(NULL, sq);
+	}
+	/* Now the sctp_pcb things */
+	/*
+	 * free each asoc if it is not already closed/free. we can't use the
+	 * macro here since le_next will get freed as part of the
+	 * sctp_free_assoc() call.
+	 */
+#ifdef IPSEC
+	ipsec_delete_pcbpolicy(ip_pcb);
+#endif
+#ifndef __Panda__
+	if (ip_pcb->inp_options) {
+		(void)sctp_m_free(ip_pcb->inp_options);
+		ip_pcb->inp_options = 0;
+	}
+#endif
+
+#if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__))
+#if !defined(__FreeBSD__) || __FreeBSD_version < 500000
+	if (rt) {
+		RTFREE(rt);
+		ip_pcb->inp_route.ro_rt = 0;
+	}
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_version < 803000
+#ifdef INET
+	if (ip_pcb->inp_moptions) {
+		inp_freemoptions(ip_pcb->inp_moptions);
+		ip_pcb->inp_moptions = 0;
+	}
+#endif
+#endif
+#endif
+
+#ifdef INET6
+#if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__))
+#if defined(__FreeBSD__) || defined(__APPLE__)
+	if (ip_pcb->inp_vflag & INP_IPV6) {
+#else
+	if (inp->inp_vflag & INP_IPV6) {
+#endif
+		struct in6pcb *in6p;
+
+		in6p = (struct in6pcb *)inp;
+		ip6_freepcbopts(in6p->in6p_outputopts);
+	}
+#endif
+#endif				/* INET6 */
+#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__))
+	inp->inp_vflag = 0;
+#else
+	ip_pcb->inp_vflag = 0;
+#endif
+	/* free up authentication fields */
+	if (inp->sctp_ep.local_auth_chunks != NULL)
+		sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
+	if (inp->sctp_ep.local_hmacs != NULL)
+		sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
+
+	LIST_FOREACH_SAFE(shared_key, &inp->sctp_ep.shared_keys, next, nshared_key) {
+		LIST_REMOVE(shared_key, next);
+		sctp_free_sharedkey(shared_key);
+		/*sa_ignore FREED_MEMORY*/
+	}
+
+#if defined(__APPLE__)
+	inp->ip_inp.inp.inp_state = INPCB_STATE_DEAD;
+	if (in_pcb_checkstate(&inp->ip_inp.inp, WNT_STOPUSING, 1) != WNT_STOPUSING) {
+#ifdef INVARIANTS
+		panic("sctp_inpcb_free inp = %p couldn't set to STOPUSING\n", (void *)inp);
+#else
+		SCTP_PRINTF("sctp_inpcb_free inp = %p couldn't set to STOPUSING\n", (void *)inp);
+#endif
+	}
+	inp->ip_inp.inp.inp_socket->so_flags |= SOF_PCBCLEARING;
+#endif
+	/*
+	 * if we have an address list the following will free the list of
+	 * ifaddr's that are set into this ep. Again macro limitations here,
+	 * since the LIST_FOREACH could be a bad idea.
+	 */
+	LIST_FOREACH_SAFE(laddr, &inp->sctp_addr_list, sctp_nxt_addr, nladdr) {
+		sctp_remove_laddr(laddr);
+	}
+
+#ifdef SCTP_TRACK_FREED_ASOCS
+	/* TEMP CODE */
+	LIST_FOREACH_SAFE(asoc, &inp->sctp_asoc_free_list, sctp_tcblist, nasoc) {
+		LIST_REMOVE(asoc, sctp_tcblist);
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), asoc);
+		SCTP_DECR_ASOC_COUNT();
+	}
+	/* *** END TEMP CODE ****/
+#endif
+#ifdef SCTP_MVRF
+	SCTP_FREE(inp->m_vrf_ids, SCTP_M_MVRF);
+#endif
+	/* Now lets see about freeing the EP hash table. */
+	if (inp->sctp_tcbhash != NULL) {
+		SCTP_HASH_FREE(inp->sctp_tcbhash, inp->sctp_hashmark);
+		inp->sctp_tcbhash = NULL;
+	}
+	/* Now we must put the ep memory back into the zone pool */
+#if defined(__FreeBSD__)
+	crfree(inp->ip_inp.inp.inp_cred);
+	INP_LOCK_DESTROY(&inp->ip_inp.inp);
+#endif
+	SCTP_INP_LOCK_DESTROY(inp);
+	SCTP_INP_READ_DESTROY(inp);
+	SCTP_ASOC_CREATE_LOCK_DESTROY(inp);
+#if !defined(__APPLE__)
+	SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_ep), inp);
+	SCTP_DECR_EP_COUNT();
+#else
+	/* For Tiger, we will do this later... */
+#endif
+}
+
+
+struct sctp_nets *
+sctp_findnet(struct sctp_tcb *stcb, struct sockaddr *addr)
+{
+	struct sctp_nets *net;
+	/* locate the address */
+	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+		if (sctp_cmpaddr(addr, (struct sockaddr *)&net->ro._l_addr))
+			return (net);
+	}
+	return (NULL);
+}
+
+
+int
+sctp_is_address_on_local_host(struct sockaddr *addr, uint32_t vrf_id)
+{
+#ifdef __Panda__
+	return (0);
+#else
+	struct sctp_ifa *sctp_ifa;
+	sctp_ifa = sctp_find_ifa_by_addr(addr, vrf_id, SCTP_ADDR_NOT_LOCKED);
+	if (sctp_ifa) {
+		return (1);
+	} else {
+		return (0);
+	}
+#endif
+}
+
+/*
+ * add's a remote endpoint address, done with the INIT/INIT-ACK as well as
+ * when a ASCONF arrives that adds it. It will also initialize all the cwnd
+ * stats of stuff.
+ */
+int
+sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
+    struct sctp_nets **netp, uint16_t port, int set_scope, int from)
+{
+	/*
+	 * The following is redundant to the same lines in the
+	 * sctp_aloc_assoc() but is needed since others call the add
+	 * address function
+	 */
+	struct sctp_nets *net, *netfirst;
+	int addr_inscope;
+
+	SCTPDBG(SCTP_DEBUG_PCB1, "Adding an address (from:%d) to the peer: ",
+		from);
+	SCTPDBG_ADDR(SCTP_DEBUG_PCB1, newaddr);
+
+	netfirst = sctp_findnet(stcb, newaddr);
+	if (netfirst) {
+		/*
+		 * Lie and return ok, we don't want to make the association
+		 * go away for this behavior. It will happen in the TCP
+		 * model in a connected socket. It does not reach the hash
+		 * table until after the association is built so it can't be
+		 * found. Mark as reachable, since the initial creation will
+		 * have been cleared and the NOT_IN_ASSOC flag will have
+		 * been added... and we don't want to end up removing it
+		 * back out.
+		 */
+		if (netfirst->dest_state & SCTP_ADDR_UNCONFIRMED) {
+			netfirst->dest_state = (SCTP_ADDR_REACHABLE |
+			    SCTP_ADDR_UNCONFIRMED);
+		} else {
+			netfirst->dest_state = SCTP_ADDR_REACHABLE;
+		}
+
+		return (0);
+	}
+	addr_inscope = 1;
+	switch (newaddr->sa_family) {
+#ifdef INET
+	case AF_INET:
+	{
+		struct sockaddr_in *sin;
+
+		sin = (struct sockaddr_in *)newaddr;
+		if (sin->sin_addr.s_addr == 0) {
+			/* Invalid address */
+			return (-1);
+		}
+		/* zero out the bzero area */
+		memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
+
+		/* assure len is set */
+#ifdef HAVE_SIN_LEN
+		sin->sin_len = sizeof(struct sockaddr_in);
+#endif
+		if (set_scope) {
+			if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
+				stcb->asoc.scope.ipv4_local_scope = 1;
+			}
+		} else {
+			/* Validate the address is in scope */
+			if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) &&
+			    (stcb->asoc.scope.ipv4_local_scope == 0)) {
+				addr_inscope = 0;
+			}
+		}
+		break;
+	}
+#endif
+#ifdef INET6
+	case AF_INET6:
+	{
+		struct sockaddr_in6 *sin6;
+
+		sin6 = (struct sockaddr_in6 *)newaddr;
+		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+			/* Invalid address */
+			return (-1);
+		}
+		/* assure len is set */
+#ifdef HAVE_SIN6_LEN
+		sin6->sin6_len = sizeof(struct sockaddr_in6);
+#endif
+		if (set_scope) {
+			if (sctp_is_address_on_local_host(newaddr, stcb->asoc.vrf_id)) {
+				stcb->asoc.scope.loopback_scope = 1;
+				stcb->asoc.scope.local_scope = 0;
+				stcb->asoc.scope.ipv4_local_scope = 1;
+				stcb->asoc.scope.site_scope = 1;
+			} else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+				/*
+				 * If the new destination is a LINK_LOCAL we
+				 * must have common site scope. Don't set
+				 * the local scope since we may not share
+				 * all links, only loopback can do this.
+				 * Links on the local network would also be
+				 * on our private network for v4 too.
+				 */
+				stcb->asoc.scope.ipv4_local_scope = 1;
+				stcb->asoc.scope.site_scope = 1;
+			} else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
+				/*
+				 * If the new destination is SITE_LOCAL then
+				 * we must have site scope in common.
+				 */
+				stcb->asoc.scope.site_scope = 1;
+			}
+		} else {
+			/* Validate the address is in scope */
+			if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr) &&
+			    (stcb->asoc.scope.loopback_scope == 0)) {
+				addr_inscope = 0;
+			} else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) &&
+			    (stcb->asoc.scope.local_scope == 0)) {
+				addr_inscope = 0;
+			} else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr) &&
+			    (stcb->asoc.scope.site_scope == 0)) {
+				addr_inscope = 0;
+			}
+		}
+		break;
+	}
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+	{
+		struct sockaddr_conn *sconn;
+
+		sconn = (struct sockaddr_conn *)newaddr;
+		if (sconn->sconn_addr == NULL) {
+			/* Invalid address */
+			return (-1);
+		}
+#ifdef HAVE_SCONN_LEN
+		sconn->sconn_len = sizeof(struct sockaddr_conn);
+#endif
+		break;
+	}
+#endif
+	default:
+		/* not supported family type */
+		return (-1);
+	}
+	net = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_net), struct sctp_nets);
+	if (net == NULL) {
+		return (-1);
+	}
+	SCTP_INCR_RADDR_COUNT();
+	bzero(net, sizeof(struct sctp_nets));
+	(void)SCTP_GETTIME_TIMEVAL(&net->start_time);
+#ifdef HAVE_SA_LEN
+	memcpy(&net->ro._l_addr, newaddr, newaddr->sa_len);
+#endif
+	switch (newaddr->sa_family) {
+#ifdef INET
+	case AF_INET:
+#ifndef HAVE_SA_LEN
+		memcpy(&net->ro._l_addr, newaddr, sizeof(struct sockaddr_in));
+#endif
+		((struct sockaddr_in *)&net->ro._l_addr)->sin_port = stcb->rport;
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+#ifndef HAVE_SA_LEN
+		memcpy(&net->ro._l_addr, newaddr, sizeof(struct sockaddr_in6));
+#endif
+		((struct sockaddr_in6 *)&net->ro._l_addr)->sin6_port = stcb->rport;
+		break;
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+#ifndef HAVE_SA_LEN
+		memcpy(&net->ro._l_addr, newaddr, sizeof(struct sockaddr_conn));
+#endif
+		((struct sockaddr_conn *)&net->ro._l_addr)->sconn_port = stcb->rport;
+		break;
+#endif
+	default:
+		break;
+	}
+	net->addr_is_local = sctp_is_address_on_local_host(newaddr, stcb->asoc.vrf_id);
+	if (net->addr_is_local && ((set_scope || (from == SCTP_ADDR_IS_CONFIRMED)))) {
+		stcb->asoc.scope.loopback_scope = 1;
+		stcb->asoc.scope.ipv4_local_scope = 1;
+		stcb->asoc.scope.local_scope = 0;
+		stcb->asoc.scope.site_scope = 1;
+		addr_inscope = 1;
+	}
+	net->failure_threshold = stcb->asoc.def_net_failure;
+	net->pf_threshold = stcb->asoc.def_net_pf_threshold;
+	if (addr_inscope == 0) {
+		net->dest_state = (SCTP_ADDR_REACHABLE |
+		    SCTP_ADDR_OUT_OF_SCOPE);
+	} else {
+		if (from == SCTP_ADDR_IS_CONFIRMED)
+			/* SCTP_ADDR_IS_CONFIRMED is passed by connect_x */
+			net->dest_state = SCTP_ADDR_REACHABLE;
+		else
+			net->dest_state = SCTP_ADDR_REACHABLE |
+			    SCTP_ADDR_UNCONFIRMED;
+	}
+	/* We set this to 0, the timer code knows that
+	 * this means its an initial value
+	 */
+	net->rto_needed = 1;
+	net->RTO = 0;
+	net->RTO_measured = 0;
+	stcb->asoc.numnets++;
+	net->ref_count = 1;
+	net->cwr_window_tsn = net->last_cwr_tsn = stcb->asoc.sending_seq - 1;
+	net->port = port;
+	net->dscp = stcb->asoc.default_dscp;
+#ifdef INET6
+	net->flowlabel = stcb->asoc.default_flowlabel;
+#endif
+	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) {
+		net->dest_state |= SCTP_ADDR_NOHB;
+	} else {
+		net->dest_state &= ~SCTP_ADDR_NOHB;
+	}
+	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD)) {
+		net->dest_state |= SCTP_ADDR_NO_PMTUD;
+	} else {
+		net->dest_state &= ~SCTP_ADDR_NO_PMTUD;
+	}
+	net->heart_beat_delay = stcb->asoc.heart_beat_delay;
+	/* Init the timer structure */
+	SCTP_OS_TIMER_INIT(&net->rxt_timer.timer);
+	SCTP_OS_TIMER_INIT(&net->pmtu_timer.timer);
+	SCTP_OS_TIMER_INIT(&net->hb_timer.timer);
+
+	/* Now generate a route for this guy */
+#ifdef INET6
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+	/* KAME hack: embed scopeid */
+	if (newaddr->sa_family == AF_INET6) {
+		struct sockaddr_in6 *sin6;
+
+		sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+		(void)in6_embedscope(&sin6->sin6_addr, sin6, &stcb->sctp_ep->ip_inp.inp, NULL);
+#else
+		(void)in6_embedscope(&sin6->sin6_addr, sin6, &stcb->sctp_ep->ip_inp.inp, NULL, NULL);
+#endif
+#elif defined(SCTP_KAME)
+		(void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
+#else
+		(void)in6_embedscope(&sin6->sin6_addr, sin6);
+#endif
+#ifndef SCOPEDROUTING
+		sin6->sin6_scope_id = 0;
+#endif
+	}
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+#endif
+	SCTP_RTALLOC((sctp_route_t *)&net->ro,
+	             stcb->asoc.vrf_id,
+	             stcb->sctp_ep->fibnum);
+
+#if defined(__Userspace__)
+	net->src_addr_selected = 0;
+#else
+	if (SCTP_ROUTE_HAS_VALID_IFN(&net->ro)) {
+		/* Get source address */
+		net->ro._s_addr = sctp_source_address_selection(stcb->sctp_ep,
+								stcb,
+								(sctp_route_t *)&net->ro,
+								net,
+								0,
+								stcb->asoc.vrf_id);
+		if (net->ro._s_addr != NULL) {
+			net->src_addr_selected = 1;
+			/* Now get the interface MTU */
+			if (net->ro._s_addr->ifn_p != NULL) {
+				net->mtu = SCTP_GATHER_MTU_FROM_INTFC(net->ro._s_addr->ifn_p);
+			}
+		} else {
+			net->src_addr_selected = 0;
+		}
+		if (net->mtu > 0) {
+			uint32_t rmtu;
+
+			rmtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._l_addr.sa, net->ro.ro_rt);
+			if (rmtu == 0) {
+				/* Start things off to match mtu of interface please. */
+				SCTP_SET_MTU_OF_ROUTE(&net->ro._l_addr.sa,
+						      net->ro.ro_rt, net->mtu);
+			} else {
+				/* we take the route mtu over the interface, since
+				 * the route may be leading out the loopback, or
+				 * a different interface.
+				 */
+				net->mtu = rmtu;
+			}
+		}
+	} else {
+		net->src_addr_selected = 0;
+	}
+#endif
+	if (net->mtu == 0) {
+		switch (newaddr->sa_family) {
+#ifdef INET
+		case AF_INET:
+			net->mtu = SCTP_DEFAULT_MTU;
+			break;
+#endif
+#ifdef INET6
+		case AF_INET6:
+			net->mtu = 1280;
+			break;
+#endif
+#if defined(__Userspace__)
+		case AF_CONN:
+			net->mtu = 1280;
+			break;
+#endif
+		default:
+			break;
+		}
+	}
+#if defined(INET) || defined(INET6)
+	if (net->port) {
+		net->mtu -= (uint32_t)sizeof(struct udphdr);
+	}
+#endif
+	if (from == SCTP_ALLOC_ASOC) {
+		stcb->asoc.smallest_mtu = net->mtu;
+	}
+	if (stcb->asoc.smallest_mtu > net->mtu) {
+		sctp_pathmtu_adjustment(stcb, net->mtu);
+	}
+#ifdef INET6
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+	if (newaddr->sa_family == AF_INET6) {
+		struct sockaddr_in6 *sin6;
+
+		sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+#ifdef SCTP_KAME
+		(void)sa6_recoverscope(sin6);
+#else
+		(void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
+#endif /* SCTP_KAME */
+	}
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+#endif
+
+	/* JRS - Use the congestion control given in the CC module */
+	if (stcb->asoc.cc_functions.sctp_set_initial_cc_param != NULL)
+		(*stcb->asoc.cc_functions.sctp_set_initial_cc_param)(stcb, net);
+
+	/*
+	 * CMT: CUC algo - set find_pseudo_cumack to TRUE (1) at beginning
+	 * of assoc (2005/06/27, iyengar@cis.udel.edu)
+	 */
+	net->find_pseudo_cumack = 1;
+	net->find_rtx_pseudo_cumack = 1;
+#if defined(__FreeBSD__)
+	/* Choose an initial flowid. */
+	net->flowid = stcb->asoc.my_vtag ^
+	              ntohs(stcb->rport) ^
+	              ntohs(stcb->sctp_ep->sctp_lport);
+	net->flowtype = M_HASHTYPE_OPAQUE_HASH;
+#endif
+	if (netp) {
+		*netp = net;
+	}
+	netfirst = TAILQ_FIRST(&stcb->asoc.nets);
+	if (net->ro.ro_rt == NULL) {
+		/* Since we have no route put it at the back */
+		TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next);
+	} else if (netfirst == NULL) {
+		/* We are the first one in the pool. */
+		TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
+	} else if (netfirst->ro.ro_rt == NULL) {
+		/*
+		 * First one has NO route. Place this one ahead of the first
+		 * one.
+		 */
+		TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
+#ifndef __Panda__
+	} else if (net->ro.ro_rt->rt_ifp != netfirst->ro.ro_rt->rt_ifp) {
+		/*
+		 * This one has a different interface than the one at the
+		 * top of the list. Place it ahead.
+		 */
+		TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
+#endif
+	} else {
+		/*
+		 * Ok we have the same interface as the first one. Move
+		 * forward until we find either a) one with a NULL route...
+		 * insert ahead of that b) one with a different ifp.. insert
+		 * after that. c) end of the list.. insert at the tail.
+		 */
+		struct sctp_nets *netlook;
+
+		do {
+			netlook = TAILQ_NEXT(netfirst, sctp_next);
+			if (netlook == NULL) {
+				/* End of the list */
+				TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next);
+				break;
+			} else if (netlook->ro.ro_rt == NULL) {
+				/* next one has NO route */
+				TAILQ_INSERT_BEFORE(netfirst, net, sctp_next);
+				break;
+			}
+#ifndef __Panda__
+			else if (netlook->ro.ro_rt->rt_ifp != net->ro.ro_rt->rt_ifp)
+#else
+			else
+#endif
+			{
+				TAILQ_INSERT_AFTER(&stcb->asoc.nets, netlook,
+						   net, sctp_next);
+				break;
+			}
+#ifndef __Panda__
+			/* Shift forward */
+			netfirst = netlook;
+#endif
+		} while (netlook != NULL);
+	}
+
+	/* got to have a primary set */
+	if (stcb->asoc.primary_destination == 0) {
+		stcb->asoc.primary_destination = net;
+	} else if ((stcb->asoc.primary_destination->ro.ro_rt == NULL) &&
+		    (net->ro.ro_rt) &&
+	    ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) {
+		/* No route to current primary adopt new primary */
+		stcb->asoc.primary_destination = net;
+	}
+	/* Validate primary is first */
+	net = TAILQ_FIRST(&stcb->asoc.nets);
+	if ((net != stcb->asoc.primary_destination) &&
+	    (stcb->asoc.primary_destination)) {
+		/* first one on the list is NOT the primary
+		 * sctp_cmpaddr() is much more efficient if
+		 * the primary is the first on the list, make it
+		 * so.
+		 */
+		TAILQ_REMOVE(&stcb->asoc.nets,
+			     stcb->asoc.primary_destination, sctp_next);
+		TAILQ_INSERT_HEAD(&stcb->asoc.nets,
+				  stcb->asoc.primary_destination, sctp_next);
+	}
+	return (0);
+}
+
+
+static uint32_t
+sctp_aloc_a_assoc_id(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
+{
+	uint32_t id;
+	struct sctpasochead *head;
+	struct sctp_tcb *lstcb;
+
+	SCTP_INP_WLOCK(inp);
+ try_again:
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+		/* TSNH */
+		SCTP_INP_WUNLOCK(inp);
+		return (0);
+	}
+	/*
+	 * We don't allow assoc id to be one of SCTP_FUTURE_ASSOC,
+	 * SCTP_CURRENT_ASSOC and SCTP_ALL_ASSOC.
+	 */
+	if (inp->sctp_associd_counter <= SCTP_ALL_ASSOC) {
+		inp->sctp_associd_counter = SCTP_ALL_ASSOC + 1;
+	}
+	id = inp->sctp_associd_counter;
+	inp->sctp_associd_counter++;
+	lstcb = sctp_findasoc_ep_asocid_locked(inp, (sctp_assoc_t)id, 0);
+	if (lstcb) {
+		goto try_again;
+	}
+	head = &inp->sctp_asocidhash[SCTP_PCBHASH_ASOC(id, inp->hashasocidmark)];
+	LIST_INSERT_HEAD(head, stcb, sctp_tcbasocidhash);
+	stcb->asoc.in_asocid_hash = 1;
+	SCTP_INP_WUNLOCK(inp);
+	return id;
+}
+
+/*
+ * allocate an association and add it to the endpoint. The caller must be
+ * careful to add all additional addresses once they are know right away or
+ * else the assoc will be may experience a blackout scenario.
+ */
+struct sctp_tcb *
+sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
+                int *error, uint32_t override_tag, uint32_t vrf_id,
+                uint16_t o_streams, uint16_t port,
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+		struct thread *p
+#elif defined(__Windows__)
+		PKTHREAD p
+#else
+#if defined(__Userspace__)
+                /*  __Userspace__ NULL proc is going to be passed here. See sctp_lower_sosend */
+#endif
+		struct proc *p
+#endif
+)
+{
+	/* note the p argument is only valid in unbound sockets */
+
+	struct sctp_tcb *stcb;
+	struct sctp_association *asoc;
+	struct sctpasochead *head;
+	uint16_t rport;
+	int err;
+
+	/*
+	 * Assumption made here: Caller has done a
+	 * sctp_findassociation_ep_addr(ep, addr's); to make sure the
+	 * address does not exist already.
+	 */
+	if (SCTP_BASE_INFO(ipi_count_asoc) >= SCTP_MAX_NUM_OF_ASOC) {
+		/* Hit max assoc, sorry no more */
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
+		*error = ENOBUFS;
+		return (NULL);
+	}
+	if (firstaddr == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+		*error = EINVAL;
+		return (NULL);
+	}
+	SCTP_INP_RLOCK(inp);
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
+	    ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE)) ||
+	     (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
+		/*
+		 * If its in the TCP pool, its NOT allowed to create an
+		 * association. The parent listener needs to call
+		 * sctp_aloc_assoc.. or the one-2-many socket. If a peeled
+		 * off, or connected one does this.. its an error.
+		 */
+		SCTP_INP_RUNLOCK(inp);
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+		*error = EINVAL;
+		return (NULL);
+	}
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) {
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) ||
+		    (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED)) {
+			SCTP_INP_RUNLOCK(inp);
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+			*error = EINVAL;
+			return (NULL);
+		}
+	}
+	SCTPDBG(SCTP_DEBUG_PCB3, "Allocate an association for peer:");
+#ifdef SCTP_DEBUG
+	if (firstaddr) {
+		SCTPDBG_ADDR(SCTP_DEBUG_PCB3, firstaddr);
+		switch (firstaddr->sa_family) {
+#ifdef INET
+		case AF_INET:
+			SCTPDBG(SCTP_DEBUG_PCB3, "Port:%d\n",
+			        ntohs(((struct sockaddr_in *)firstaddr)->sin_port));
+			break;
+#endif
+#ifdef INET6
+		case AF_INET6:
+			SCTPDBG(SCTP_DEBUG_PCB3, "Port:%d\n",
+			        ntohs(((struct sockaddr_in6 *)firstaddr)->sin6_port));
+			break;
+#endif
+#if defined(__Userspace__)
+		case AF_CONN:
+			SCTPDBG(SCTP_DEBUG_PCB3, "Port:%d\n",
+			        ntohs(((struct sockaddr_conn *)firstaddr)->sconn_port));
+			break;
+#endif
+		default:
+			break;
+		}
+	} else {
+		SCTPDBG(SCTP_DEBUG_PCB3,"None\n");
+	}
+#endif				/* SCTP_DEBUG */
+	switch (firstaddr->sa_family) {
+#ifdef INET
+	case AF_INET:
+	{
+		struct sockaddr_in *sin;
+
+		sin = (struct sockaddr_in *)firstaddr;
+		if ((ntohs(sin->sin_port) == 0) ||
+		    (sin->sin_addr.s_addr == INADDR_ANY) ||
+		    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
+		    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
+			/* Invalid address */
+			SCTP_INP_RUNLOCK(inp);
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+			*error = EINVAL;
+			return (NULL);
+		}
+		rport = sin->sin_port;
+		break;
+	}
+#endif
+#ifdef INET6
+	case AF_INET6:
+	{
+		struct sockaddr_in6 *sin6;
+
+		sin6 = (struct sockaddr_in6 *)firstaddr;
+		if ((ntohs(sin6->sin6_port) == 0) ||
+		    IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
+		    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
+			/* Invalid address */
+			SCTP_INP_RUNLOCK(inp);
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+			*error = EINVAL;
+			return (NULL);
+		}
+		rport = sin6->sin6_port;
+		break;
+	}
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+	{
+		struct sockaddr_conn *sconn;
+
+		sconn = (struct sockaddr_conn *)firstaddr;
+		if ((ntohs(sconn->sconn_port) == 0) ||
+		    (sconn->sconn_addr == NULL)) {
+			/* Invalid address */
+			SCTP_INP_RUNLOCK(inp);
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+			*error = EINVAL;
+			return (NULL);
+		}
+		rport = sconn->sconn_port;
+		break;
+	}
+#endif
+	default:
+		/* not supported family type */
+		SCTP_INP_RUNLOCK(inp);
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+		*error = EINVAL;
+		return (NULL);
+	}
+	SCTP_INP_RUNLOCK(inp);
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
+		/*
+		 * If you have not performed a bind, then we need to do the
+		 * ephemeral bind for you.
+		 */
+		if ((err = sctp_inpcb_bind(inp->sctp_socket,
+		    (struct sockaddr *)NULL,
+		    (struct sctp_ifa *)NULL,
+#ifndef __Panda__
+					   p
+#else
+					   (struct proc *)NULL
+#endif
+		    ))) {
+			/* bind error, probably perm */
+			*error = err;
+			return (NULL);
+		}
+	}
+	stcb = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_asoc), struct sctp_tcb);
+	if (stcb == NULL) {
+		/* out of memory? */
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOMEM);
+		*error = ENOMEM;
+		return (NULL);
+	}
+	SCTP_INCR_ASOC_COUNT();
+
+	bzero(stcb, sizeof(*stcb));
+	asoc = &stcb->asoc;
+
+	asoc->assoc_id = sctp_aloc_a_assoc_id(inp, stcb);
+	SCTP_TCB_LOCK_INIT(stcb);
+	SCTP_TCB_SEND_LOCK_INIT(stcb);
+	stcb->rport = rport;
+	/* setup back pointer's */
+	stcb->sctp_ep = inp;
+	stcb->sctp_socket = inp->sctp_socket;
+	if ((err = sctp_init_asoc(inp, stcb, override_tag, vrf_id, o_streams))) {
+		/* failed */
+		SCTP_TCB_LOCK_DESTROY(stcb);
+		SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+		LIST_REMOVE(stcb, sctp_tcbasocidhash);
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
+		SCTP_DECR_ASOC_COUNT();
+		*error = err;
+		return (NULL);
+	}
+	/* and the port */
+	SCTP_INP_INFO_WLOCK();
+	SCTP_INP_WLOCK(inp);
+	if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+		/* inpcb freed while alloc going on */
+		SCTP_TCB_LOCK_DESTROY(stcb);
+		SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+		LIST_REMOVE(stcb, sctp_tcbasocidhash);
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
+		SCTP_INP_WUNLOCK(inp);
+		SCTP_INP_INFO_WUNLOCK();
+		SCTP_DECR_ASOC_COUNT();
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, EINVAL);
+		*error = EINVAL;
+		return (NULL);
+	}
+	SCTP_TCB_LOCK(stcb);
+
+	/* now that my_vtag is set, add it to the hash */
+	head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))];
+	/* put it in the bucket in the vtag hash of assoc's for the system */
+	LIST_INSERT_HEAD(head, stcb, sctp_asocs);
+	SCTP_INP_INFO_WUNLOCK();
+
+	if ((err = sctp_add_remote_addr(stcb, firstaddr, NULL, port, SCTP_DO_SETSCOPE, SCTP_ALLOC_ASOC))) {
+		/* failure.. memory error? */
+		if (asoc->strmout) {
+			SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
+			asoc->strmout = NULL;
+		}
+		if (asoc->mapping_array) {
+			SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
+			asoc->mapping_array = NULL;
+		}
+		if (asoc->nr_mapping_array) {
+			SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
+			asoc->nr_mapping_array = NULL;
+		}
+		SCTP_DECR_ASOC_COUNT();
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_TCB_LOCK_DESTROY(stcb);
+		SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+		LIST_REMOVE(stcb, sctp_tcbasocidhash);
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
+		SCTP_INP_WUNLOCK(inp);
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOBUFS);
+		*error = ENOBUFS;
+		return (NULL);
+	}
+	/* Init all the timers */
+	SCTP_OS_TIMER_INIT(&asoc->dack_timer.timer);
+	SCTP_OS_TIMER_INIT(&asoc->strreset_timer.timer);
+	SCTP_OS_TIMER_INIT(&asoc->asconf_timer.timer);
+	SCTP_OS_TIMER_INIT(&asoc->shut_guard_timer.timer);
+	SCTP_OS_TIMER_INIT(&asoc->autoclose_timer.timer);
+	SCTP_OS_TIMER_INIT(&asoc->delayed_event_timer.timer);
+	SCTP_OS_TIMER_INIT(&asoc->delete_prim_timer.timer);
+
+	LIST_INSERT_HEAD(&inp->sctp_asoc_list, stcb, sctp_tcblist);
+	/* now file the port under the hash as well */
+	if (inp->sctp_tcbhash != NULL) {
+		head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(stcb->rport,
+		    inp->sctp_hashmark)];
+		LIST_INSERT_HEAD(head, stcb, sctp_tcbhash);
+	}
+	SCTP_INP_WUNLOCK(inp);
+	SCTPDBG(SCTP_DEBUG_PCB1, "Association %p now allocated\n", (void *)stcb);
+	return (stcb);
+}
+
+
+void
+sctp_remove_net(struct sctp_tcb *stcb, struct sctp_nets *net)
+{
+	struct sctp_association *asoc;
+
+	asoc = &stcb->asoc;
+	asoc->numnets--;
+	TAILQ_REMOVE(&asoc->nets, net, sctp_next);
+	if (net == asoc->primary_destination) {
+		/* Reset primary */
+		struct sctp_nets *lnet;
+
+		lnet = TAILQ_FIRST(&asoc->nets);
+		/* Mobility adaptation
+		   Ideally, if deleted destination is the primary, it becomes
+		   a fast retransmission trigger by the subsequent SET PRIMARY.
+		   (by micchie)
+		 */
+		if (sctp_is_mobility_feature_on(stcb->sctp_ep,
+		                                SCTP_MOBILITY_BASE) ||
+		    sctp_is_mobility_feature_on(stcb->sctp_ep,
+		                                SCTP_MOBILITY_FASTHANDOFF)) {
+			SCTPDBG(SCTP_DEBUG_ASCONF1, "remove_net: primary dst is deleting\n");
+			if (asoc->deleted_primary != NULL) {
+				SCTPDBG(SCTP_DEBUG_ASCONF1, "remove_net: deleted primary may be already stored\n");
+				goto out;
+			}
+			asoc->deleted_primary = net;
+			atomic_add_int(&net->ref_count, 1);
+			memset(&net->lastsa, 0, sizeof(net->lastsa));
+			memset(&net->lastsv, 0, sizeof(net->lastsv));
+			sctp_mobility_feature_on(stcb->sctp_ep,
+						 SCTP_MOBILITY_PRIM_DELETED);
+			sctp_timer_start(SCTP_TIMER_TYPE_PRIM_DELETED,
+					 stcb->sctp_ep, stcb, NULL);
+		}
+out:
+		/* Try to find a confirmed primary */
+		asoc->primary_destination = sctp_find_alternate_net(stcb, lnet, 0);
+	}
+	if (net == asoc->last_data_chunk_from) {
+		/* Reset primary */
+		asoc->last_data_chunk_from = TAILQ_FIRST(&asoc->nets);
+	}
+	if (net == asoc->last_control_chunk_from) {
+		/* Clear net */
+		asoc->last_control_chunk_from = NULL;
+	}
+	if (net == stcb->asoc.alternate) {
+		sctp_free_remote_addr(stcb->asoc.alternate);
+		stcb->asoc.alternate = NULL;
+	}
+	sctp_free_remote_addr(net);
+}
+
+/*
+ * remove a remote endpoint address from an association, it will fail if the
+ * address does not exist.
+ */
+int
+sctp_del_remote_addr(struct sctp_tcb *stcb, struct sockaddr *remaddr)
+{
+	/*
+	 * Here we need to remove a remote address. This is quite simple, we
+	 * first find it in the list of address for the association
+	 * (tasoc->asoc.nets) and then if it is there, we do a LIST_REMOVE
+	 * on that item. Note we do not allow it to be removed if there are
+	 * no other addresses.
+	 */
+	struct sctp_association *asoc;
+	struct sctp_nets *net, *nnet;
+
+	asoc = &stcb->asoc;
+
+	/* locate the address */
+	TAILQ_FOREACH_SAFE(net, &asoc->nets, sctp_next, nnet) {
+		if (net->ro._l_addr.sa.sa_family != remaddr->sa_family) {
+			continue;
+		}
+		if (sctp_cmpaddr((struct sockaddr *)&net->ro._l_addr,
+		    remaddr)) {
+			/* we found the guy */
+			if (asoc->numnets < 2) {
+				/* Must have at LEAST two remote addresses */
+				return (-1);
+			} else {
+				sctp_remove_net(stcb, net);
+				return (0);
+			}
+		}
+	}
+	/* not found. */
+	return (-2);
+}
+
+void
+sctp_delete_from_timewait(uint32_t tag, uint16_t lport, uint16_t rport)
+{
+	struct sctpvtaghead *chain;
+	struct sctp_tagblock *twait_block;
+	int found = 0;
+	int i;
+
+	chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
+	LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
+		for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
+		  if ((twait_block->vtag_block[i].v_tag == tag) &&
+		      (twait_block->vtag_block[i].lport == lport) &&
+		      (twait_block->vtag_block[i].rport == rport)) {
+				twait_block->vtag_block[i].tv_sec_at_expire = 0;
+				twait_block->vtag_block[i].v_tag = 0;
+				twait_block->vtag_block[i].lport = 0;
+				twait_block->vtag_block[i].rport = 0;
+				found = 1;
+				break;
+			}
+		}
+		if (found)
+			break;
+	}
+}
+
+int
+sctp_is_in_timewait(uint32_t tag, uint16_t lport, uint16_t rport)
+{
+	struct sctpvtaghead *chain;
+	struct sctp_tagblock *twait_block;
+	int found = 0;
+	int i;
+
+	SCTP_INP_INFO_WLOCK();
+	chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
+	LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
+		for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
+			if ((twait_block->vtag_block[i].v_tag == tag)  &&
+			    (twait_block->vtag_block[i].lport == lport)  &&
+			    (twait_block->vtag_block[i].rport == rport)) {
+				found = 1;
+				break;
+			}
+		}
+		if (found)
+			break;
+	}
+	SCTP_INP_INFO_WUNLOCK();
+	return (found);
+}
+
+
+void
+sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time, uint16_t lport, uint16_t rport)
+{
+	struct sctpvtaghead *chain;
+	struct sctp_tagblock *twait_block;
+	struct timeval now;
+	int set, i;
+
+	if (time == 0) {
+		/* Its disabled */
+		return;
+	}
+	(void)SCTP_GETTIME_TIMEVAL(&now);
+	chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
+	set = 0;
+	LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
+		/* Block(s) present, lets find space, and expire on the fly */
+		for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
+			if ((twait_block->vtag_block[i].v_tag == 0) &&
+			    !set) {
+				twait_block->vtag_block[i].tv_sec_at_expire =
+					now.tv_sec + time;
+				twait_block->vtag_block[i].v_tag = tag;
+				twait_block->vtag_block[i].lport = lport;
+				twait_block->vtag_block[i].rport = rport;
+				set = 1;
+			} else if ((twait_block->vtag_block[i].v_tag) &&
+				    ((long)twait_block->vtag_block[i].tv_sec_at_expire < now.tv_sec)) {
+				/* Audit expires this guy */
+				twait_block->vtag_block[i].tv_sec_at_expire = 0;
+				twait_block->vtag_block[i].v_tag = 0;
+				twait_block->vtag_block[i].lport = 0;
+				twait_block->vtag_block[i].rport = 0;
+				if (set == 0) {
+					/* Reuse it for my new tag */
+					twait_block->vtag_block[i].tv_sec_at_expire = now.tv_sec + time;
+					twait_block->vtag_block[i].v_tag = tag;
+					twait_block->vtag_block[i].lport = lport;
+					twait_block->vtag_block[i].rport = rport;
+					set = 1;
+				}
+			}
+		}
+		if (set) {
+			/*
+			 * We only do up to the block where we can
+			 * place our tag for audits
+			 */
+			break;
+		}
+	}
+	/* Need to add a new block to chain */
+	if (!set) {
+		SCTP_MALLOC(twait_block, struct sctp_tagblock *,
+		    sizeof(struct sctp_tagblock), SCTP_M_TIMW);
+		if (twait_block == NULL) {
+#ifdef INVARIANTS
+			panic("Can not alloc tagblock");
+#endif
+			return;
+		}
+		memset(twait_block, 0, sizeof(struct sctp_tagblock));
+		LIST_INSERT_HEAD(chain, twait_block, sctp_nxt_tagblock);
+		twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec + time;
+		twait_block->vtag_block[0].v_tag = tag;
+		twait_block->vtag_block[0].lport = lport;
+		twait_block->vtag_block[0].rport = rport;
+	}
+}
+
+void
+sctp_clean_up_stream(struct sctp_tcb *stcb, struct sctp_readhead *rh)
+{
+	struct sctp_tmit_chunk *chk, *nchk;
+	struct sctp_queued_to_read *ctl, *nctl;
+	TAILQ_FOREACH_SAFE(ctl, rh, next_instrm, nctl) {
+		TAILQ_REMOVE(rh, ctl, next_instrm);
+		ctl->on_strm_q = 0;
+		if (ctl->on_read_q == 0) {
+			sctp_free_remote_addr(ctl->whoFrom);
+			if (ctl->data) {
+				sctp_m_freem(ctl->data);
+				ctl->data = NULL;
+			}
+		}
+		/* Reassembly free? */
+		TAILQ_FOREACH_SAFE(chk, &ctl->reasm, sctp_next, nchk) {
+			TAILQ_REMOVE(&ctl->reasm, chk, sctp_next);
+			if (chk->data) {
+				sctp_m_freem(chk->data);
+				chk->data = NULL;
+			}
+			if (chk->holds_key_ref)
+				sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
+			sctp_free_remote_addr(chk->whoTo);
+			SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+			SCTP_DECR_CHK_COUNT();
+			/*sa_ignore FREED_MEMORY*/
+		}
+		/*
+		 * We don't free the address here
+		 * since all the net's were freed
+		 * above.
+		 */
+		if (ctl->on_read_q == 0) {
+			sctp_free_a_readq(stcb, ctl);
+		}
+	}
+}
+
+#ifdef __Panda__
+void panda_wakeup_socket(struct socket *so);
+#endif
+
+/*-
+ * Free the association after un-hashing the remote port. This
+ * function ALWAYS returns holding NO LOCK on the stcb. It DOES
+ * expect that the input to this function IS a locked TCB.
+ * It will return 0, if it did NOT destroy the association (instead
+ * it unlocks it. It will return NON-zero if it either destroyed the
+ * association OR the association is already destroyed.
+ */
+int
+sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfree, int from_location)
+{
+	int i;
+	struct sctp_association *asoc;
+	struct sctp_nets *net, *nnet;
+	struct sctp_laddr *laddr, *naddr;
+	struct sctp_tmit_chunk *chk, *nchk;
+	struct sctp_asconf_addr *aparam, *naparam;
+	struct sctp_asconf_ack *aack, *naack;
+	struct sctp_stream_reset_list *strrst, *nstrrst;
+	struct sctp_queued_to_read *sq, *nsq;
+	struct sctp_stream_queue_pending *sp, *nsp;
+	sctp_sharedkey_t *shared_key, *nshared_key;
+	struct socket *so;
+
+	/* first, lets purge the entry from the hash table. */
+#if defined(__APPLE__)
+	sctp_lock_assert(SCTP_INP_SO(inp));
+#endif
+
+#ifdef SCTP_LOG_CLOSING
+	sctp_log_closing(inp, stcb, 6);
+#endif
+	if (stcb->asoc.state == 0) {
+#ifdef SCTP_LOG_CLOSING
+		sctp_log_closing(inp, NULL, 7);
+#endif
+		/* there is no asoc, really TSNH :-0 */
+		return (1);
+	}
+	if (stcb->asoc.alternate) {
+		sctp_free_remote_addr(stcb->asoc.alternate);
+		stcb->asoc.alternate = NULL;
+	}
+#if !defined(__APPLE__) /* TEMP: moved to below */
+        /* TEMP CODE */
+	if (stcb->freed_from_where == 0) {
+		/* Only record the first place free happened from */
+		stcb->freed_from_where = from_location;
+	}
+        /* TEMP CODE */
+#endif
+
+	asoc = &stcb->asoc;
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE))
+		/* nothing around */
+		so = NULL;
+	else
+		so = inp->sctp_socket;
+
+	/*
+	 * We used timer based freeing if a reader or writer is in the way.
+	 * So we first check if we are actually being called from a timer,
+	 * if so we abort early if a reader or writer is still in the way.
+	 */
+	if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
+	    (from_inpcbfree == SCTP_NORMAL_PROC)) {
+		/*
+		 * is it the timer driving us? if so are the reader/writers
+		 * gone?
+		 */
+		if (stcb->asoc.refcnt) {
+			/* nope, reader or writer in the way */
+			sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL);
+			/* no asoc destroyed */
+			SCTP_TCB_UNLOCK(stcb);
+#ifdef SCTP_LOG_CLOSING
+			sctp_log_closing(inp, stcb, 8);
+#endif
+			return (0);
+		}
+	}
+	/* now clean up any other timers */
+	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
+	asoc->dack_timer.self = NULL;
+	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
+	/*-
+	 * For stream reset we don't blast this unless
+	 * it is a str-reset timer, it might be the
+	 * free-asoc timer which we DON'T want to
+	 * disturb.
+	 */
+	if (asoc->strreset_timer.type == SCTP_TIMER_TYPE_STRRESET)
+		asoc->strreset_timer.self = NULL;
+	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
+	asoc->asconf_timer.self = NULL;
+	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
+	asoc->autoclose_timer.self = NULL;
+	(void)SCTP_OS_TIMER_STOP(&asoc->shut_guard_timer.timer);
+	asoc->shut_guard_timer.self = NULL;
+	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
+	asoc->delayed_event_timer.self = NULL;
+	/* Mobility adaptation */
+	(void)SCTP_OS_TIMER_STOP(&asoc->delete_prim_timer.timer);
+	asoc->delete_prim_timer.self = NULL;
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+		(void)SCTP_OS_TIMER_STOP(&net->rxt_timer.timer);
+		net->rxt_timer.self = NULL;
+		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
+		net->pmtu_timer.self = NULL;
+		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
+		net->hb_timer.self = NULL;
+	}
+	/* Now the read queue needs to be cleaned up (only once) */
+	if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0) {
+		stcb->asoc.state |= SCTP_STATE_ABOUT_TO_BE_FREED;
+		SCTP_INP_READ_LOCK(inp);
+		TAILQ_FOREACH(sq, &inp->read_queue, next) {
+			if (sq->stcb == stcb) {
+				sq->do_not_ref_stcb = 1;
+				sq->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
+				/* If there is no end, there never
+				 * will be now.
+				 */
+				if (sq->end_added == 0) {
+					/* Held for PD-API clear that. */
+					sq->pdapi_aborted = 1;
+					sq->held_length = 0;
+					if (sctp_stcb_is_feature_on(inp, stcb, SCTP_PCB_FLAGS_PDAPIEVNT) && (so != NULL)) {
+						/*
+						 * Need to add a PD-API aborted indication.
+						 * Setting the control_pdapi assures that it will
+						 * be added right after this msg.
+						 */
+						uint32_t strseq;
+						stcb->asoc.control_pdapi = sq;
+						strseq = (sq->sinfo_stream << 16) | (sq->mid & 0x0000ffff);
+						sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
+						                stcb,
+						                SCTP_PARTIAL_DELIVERY_ABORTED,
+						                (void *)&strseq,
+						                SCTP_SO_LOCKED);
+						stcb->asoc.control_pdapi = NULL;
+					}
+				}
+				/* Add an end to wake them */
+				sq->end_added = 1;
+			}
+		}
+		SCTP_INP_READ_UNLOCK(inp);
+		if (stcb->block_entry) {
+			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_PCB, ECONNRESET);
+			stcb->block_entry->error = ECONNRESET;
+			stcb->block_entry = NULL;
+		}
+	}
+	if ((stcb->asoc.refcnt) || (stcb->asoc.state & SCTP_STATE_IN_ACCEPT_QUEUE)) {
+		/* Someone holds a reference OR the socket is unaccepted yet.
+		*/
+		if ((stcb->asoc.refcnt)  ||
+		    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+		    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+			stcb->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE;
+			sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL);
+		}
+		SCTP_TCB_UNLOCK(stcb);
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+		    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE))
+			/* nothing around */
+			so = NULL;
+		if (so) {
+			/* Wake any reader/writers */
+			sctp_sorwakeup(inp, so);
+			sctp_sowwakeup(inp, so);
+		}
+
+#ifdef SCTP_LOG_CLOSING
+		sctp_log_closing(inp, stcb, 9);
+#endif
+		/* no asoc destroyed */
+		return (0);
+	}
+#ifdef SCTP_LOG_CLOSING
+	sctp_log_closing(inp, stcb, 10);
+#endif
+	/* When I reach here, no others want
+	 * to kill the assoc yet.. and I own
+	 * the lock. Now its possible an abort
+	 * comes in when I do the lock exchange
+	 * below to grab all the locks to do
+	 * the final take out. to prevent this
+	 * we increment the count, which will
+	 * start a timer and blow out above thus
+	 * assuring us that we hold exclusive
+	 * killing of the asoc. Note that
+	 * after getting back the TCB lock
+	 * we will go ahead and increment the
+	 * counter back up and stop any timer
+	 * a passing stranger may have started :-S
+	 */
+	if (from_inpcbfree == SCTP_NORMAL_PROC) {
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_INP_INFO_WLOCK();
+		SCTP_INP_WLOCK(inp);
+		SCTP_TCB_LOCK(stcb);
+	}
+	/* Double check the GONE flag */
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE))
+		/* nothing around */
+		so = NULL;
+
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+		/*
+		 * For TCP type we need special handling when we are
+		 * connected. We also include the peel'ed off ones to.
+		 */
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+			inp->sctp_flags &= ~SCTP_PCB_FLAGS_CONNECTED;
+			inp->sctp_flags |= SCTP_PCB_FLAGS_WAS_CONNECTED;
+			if (so) {
+				SOCK_LOCK(so);
+				if (so->so_rcv.sb_cc == 0) {
+					so->so_state &= ~(SS_ISCONNECTING |
+							  SS_ISDISCONNECTING |
+							  SS_ISCONFIRMING |
+							  SS_ISCONNECTED);
+				}
+#if defined(__APPLE__)
+				socantrcvmore(so);
+#else
+				socantrcvmore_locked(so);
+#endif
+				socantsendmore(so);
+				sctp_sowwakeup(inp, so);
+				sctp_sorwakeup(inp, so);
+				SCTP_SOWAKEUP(so);
+			}
+		}
+	}
+
+	/* Make it invalid too, that way if its
+	 * about to run it will abort and return.
+	 */
+	/* re-increment the lock */
+	if (from_inpcbfree == SCTP_NORMAL_PROC) {
+		atomic_add_int(&stcb->asoc.refcnt, -1);
+	}
+	if (stcb->asoc.refcnt) {
+		stcb->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE;
+		sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL);
+		if (from_inpcbfree == SCTP_NORMAL_PROC) {
+			SCTP_INP_INFO_WUNLOCK();
+			SCTP_INP_WUNLOCK(inp);
+		}
+		SCTP_TCB_UNLOCK(stcb);
+		return (0);
+	}
+	asoc->state = 0;
+	if (inp->sctp_tcbhash) {
+		LIST_REMOVE(stcb, sctp_tcbhash);
+	}
+	if (stcb->asoc.in_asocid_hash) {
+		LIST_REMOVE(stcb, sctp_tcbasocidhash);
+	}
+	/* Now lets remove it from the list of ALL associations in the EP */
+	LIST_REMOVE(stcb, sctp_tcblist);
+	if (from_inpcbfree == SCTP_NORMAL_PROC) {
+		SCTP_INP_INCR_REF(inp);
+		SCTP_INP_WUNLOCK(inp);
+	}
+	/* pull from vtag hash */
+	LIST_REMOVE(stcb, sctp_asocs);
+	sctp_add_vtag_to_timewait(asoc->my_vtag, SCTP_BASE_SYSCTL(sctp_vtag_time_wait),
+				  inp->sctp_lport, stcb->rport);
+
+	/* Now restop the timers to be sure
+	 * this is paranoia at is finest!
+	 */
+	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
+	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
+	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
+	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
+	(void)SCTP_OS_TIMER_STOP(&asoc->shut_guard_timer.timer);
+	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
+	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+		(void)SCTP_OS_TIMER_STOP(&net->rxt_timer.timer);
+		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
+		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
+	}
+
+	asoc->strreset_timer.type = SCTP_TIMER_TYPE_NONE;
+	/*
+	 * The chunk lists and such SHOULD be empty but we check them just
+	 * in case.
+	 */
+	/* anything on the wheel needs to be removed */
+	for (i = 0; i < asoc->streamoutcnt; i++) {
+		struct sctp_stream_out *outs;
+
+		outs = &asoc->strmout[i];
+		/* now clean up any chunks here */
+		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
+			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
+			TAILQ_REMOVE(&outs->outqueue, sp, next);
+			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, 0);
+			sctp_free_spbufspace(stcb, asoc, sp);
+			if (sp->data) {
+				if (so) {
+					/* Still an open socket - report */
+					sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
+					                0, (void *)sp, SCTP_SO_LOCKED);
+				}
+				if (sp->data) {
+					sctp_m_freem(sp->data);
+					sp->data = NULL;
+					sp->tail_mbuf = NULL;
+					sp->length = 0;
+				}
+			}
+			if (sp->net) {
+				sctp_free_remote_addr(sp->net);
+				sp->net = NULL;
+			}
+			sctp_free_a_strmoq(stcb, sp, SCTP_SO_LOCKED);
+		}
+	}
+	/*sa_ignore FREED_MEMORY*/
+	TAILQ_FOREACH_SAFE(strrst, &asoc->resetHead, next_resp, nstrrst) {
+		TAILQ_REMOVE(&asoc->resetHead, strrst, next_resp);
+		SCTP_FREE(strrst, SCTP_M_STRESET);
+	}
+	TAILQ_FOREACH_SAFE(sq, &asoc->pending_reply_queue, next, nsq) {
+		TAILQ_REMOVE(&asoc->pending_reply_queue, sq, next);
+		if (sq->data) {
+			sctp_m_freem(sq->data);
+			sq->data = NULL;
+		}
+		sctp_free_remote_addr(sq->whoFrom);
+		sq->whoFrom = NULL;
+		sq->stcb = NULL;
+		/* Free the ctl entry */
+		sctp_free_a_readq(stcb, sq);
+		/*sa_ignore FREED_MEMORY*/
+	}
+	TAILQ_FOREACH_SAFE(chk, &asoc->free_chunks, sctp_next, nchk) {
+		TAILQ_REMOVE(&asoc->free_chunks, chk, sctp_next);
+		if (chk->data) {
+			sctp_m_freem(chk->data);
+			chk->data = NULL;
+		}
+		if (chk->holds_key_ref)
+			sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+		SCTP_DECR_CHK_COUNT();
+		atomic_subtract_int(&SCTP_BASE_INFO(ipi_free_chunks), 1);
+		asoc->free_chunk_cnt--;
+		/*sa_ignore FREED_MEMORY*/
+	}
+	/* pending send queue SHOULD be empty */
+	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
+		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
+#ifdef INVARIANTS
+		} else {
+			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
+#endif
+		}
+		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
+		if (chk->data) {
+			if (so) {
+				/* Still a socket? */
+				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
+				                0, chk, SCTP_SO_LOCKED);
+			}
+			if (chk->data) {
+				sctp_m_freem(chk->data);
+				chk->data = NULL;
+			}
+		}
+		if (chk->holds_key_ref)
+			sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
+		if (chk->whoTo) {
+			sctp_free_remote_addr(chk->whoTo);
+			chk->whoTo = NULL;
+		}
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+		SCTP_DECR_CHK_COUNT();
+		/*sa_ignore FREED_MEMORY*/
+	}
+	/* sent queue SHOULD be empty */
+	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
+		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
+			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
+#ifdef INVARIANTS
+			} else {
+				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
+#endif
+			}
+		}
+		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
+		if (chk->data) {
+			if (so) {
+				/* Still a socket? */
+				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
+				                0, chk, SCTP_SO_LOCKED);
+			}
+			if (chk->data) {
+				sctp_m_freem(chk->data);
+				chk->data = NULL;
+			}
+		}
+		if (chk->holds_key_ref)
+			sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
+		sctp_free_remote_addr(chk->whoTo);
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+		SCTP_DECR_CHK_COUNT();
+		/*sa_ignore FREED_MEMORY*/
+	}
+#ifdef INVARIANTS
+	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+		if (stcb->asoc.strmout[i].chunks_on_queues > 0) {
+			panic("%u chunks left for stream %u.", stcb->asoc.strmout[i].chunks_on_queues, i);
+		}
+	}
+#endif
+	/* control queue MAY not be empty */
+	TAILQ_FOREACH_SAFE(chk, &asoc->control_send_queue, sctp_next, nchk) {
+		TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
+		if (chk->data) {
+			sctp_m_freem(chk->data);
+			chk->data = NULL;
+		}
+		if (chk->holds_key_ref)
+			sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
+		sctp_free_remote_addr(chk->whoTo);
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+		SCTP_DECR_CHK_COUNT();
+		/*sa_ignore FREED_MEMORY*/
+	}
+	/* ASCONF queue MAY not be empty */
+	TAILQ_FOREACH_SAFE(chk, &asoc->asconf_send_queue, sctp_next, nchk) {
+		TAILQ_REMOVE(&asoc->asconf_send_queue, chk, sctp_next);
+		if (chk->data) {
+			sctp_m_freem(chk->data);
+			chk->data = NULL;
+		}
+		if (chk->holds_key_ref)
+			sctp_auth_key_release(stcb, chk->auth_keyid, SCTP_SO_LOCKED);
+		sctp_free_remote_addr(chk->whoTo);
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), chk);
+		SCTP_DECR_CHK_COUNT();
+		/*sa_ignore FREED_MEMORY*/
+	}
+	if (asoc->mapping_array) {
+		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
+		asoc->mapping_array = NULL;
+	}
+	if (asoc->nr_mapping_array) {
+		SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
+		asoc->nr_mapping_array = NULL;
+	}
+	/* the stream outs */
+	if (asoc->strmout) {
+		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
+		asoc->strmout = NULL;
+	}
+	asoc->strm_realoutsize = asoc->streamoutcnt = 0;
+	if (asoc->strmin) {
+		for (i = 0; i < asoc->streamincnt; i++) {
+			sctp_clean_up_stream(stcb, &asoc->strmin[i].inqueue);
+			sctp_clean_up_stream(stcb, &asoc->strmin[i].uno_inqueue);
+		}
+		SCTP_FREE(asoc->strmin, SCTP_M_STRMI);
+		asoc->strmin = NULL;
+	}
+	asoc->streamincnt = 0;
+	TAILQ_FOREACH_SAFE(net, &asoc->nets, sctp_next, nnet) {
+#ifdef INVARIANTS
+		if (SCTP_BASE_INFO(ipi_count_raddr) == 0) {
+			panic("no net's left alloc'ed, or list points to itself");
+		}
+#endif
+		TAILQ_REMOVE(&asoc->nets, net, sctp_next);
+		sctp_free_remote_addr(net);
+	}
+	LIST_FOREACH_SAFE(laddr, &asoc->sctp_restricted_addrs, sctp_nxt_addr, naddr) {
+		/*sa_ignore FREED_MEMORY*/
+		sctp_remove_laddr(laddr);
+	}
+
+	/* pending asconf (address) parameters */
+	TAILQ_FOREACH_SAFE(aparam, &asoc->asconf_queue, next, naparam) {
+		/*sa_ignore FREED_MEMORY*/
+		TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
+		SCTP_FREE(aparam,SCTP_M_ASC_ADDR);
+	}
+	TAILQ_FOREACH_SAFE(aack, &asoc->asconf_ack_sent, next, naack) {
+		/*sa_ignore FREED_MEMORY*/
+		TAILQ_REMOVE(&asoc->asconf_ack_sent, aack, next);
+		if (aack->data != NULL) {
+			sctp_m_freem(aack->data);
+		}
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asconf_ack), aack);
+	}
+	/* clean up auth stuff */
+	if (asoc->local_hmacs)
+		sctp_free_hmaclist(asoc->local_hmacs);
+	if (asoc->peer_hmacs)
+		sctp_free_hmaclist(asoc->peer_hmacs);
+
+	if (asoc->local_auth_chunks)
+		sctp_free_chunklist(asoc->local_auth_chunks);
+	if (asoc->peer_auth_chunks)
+		sctp_free_chunklist(asoc->peer_auth_chunks);
+
+	sctp_free_authinfo(&asoc->authinfo);
+
+	LIST_FOREACH_SAFE(shared_key, &asoc->shared_keys, next, nshared_key) {
+		LIST_REMOVE(shared_key, next);
+		sctp_free_sharedkey(shared_key);
+		/*sa_ignore FREED_MEMORY*/
+	}
+
+	/* Insert new items here :> */
+
+	/* Get rid of LOCK */
+	SCTP_TCB_UNLOCK(stcb);
+	SCTP_TCB_LOCK_DESTROY(stcb);
+	SCTP_TCB_SEND_LOCK_DESTROY(stcb);
+	if (from_inpcbfree == SCTP_NORMAL_PROC) {
+		SCTP_INP_INFO_WUNLOCK();
+		SCTP_INP_RLOCK(inp);
+	}
+#if defined(__APPLE__) /* TEMP CODE */
+	stcb->freed_from_where = from_location;
+#endif
+#ifdef SCTP_TRACK_FREED_ASOCS
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+		/* now clean up the tasoc itself */
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
+		SCTP_DECR_ASOC_COUNT();
+	} else {
+		LIST_INSERT_HEAD(&inp->sctp_asoc_free_list, stcb, sctp_tcblist);
+	}
+#else
+	SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_asoc), stcb);
+	SCTP_DECR_ASOC_COUNT();
+#endif
+	if (from_inpcbfree == SCTP_NORMAL_PROC) {
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+			/* If its NOT the inp_free calling us AND
+			 * sctp_close as been called, we
+			 * call back...
+			 */
+			SCTP_INP_RUNLOCK(inp);
+			/* This will start the kill timer (if we are
+			 * the last one) since we hold an increment yet. But
+			 * this is the only safe way to do this
+			 * since otherwise if the socket closes
+			 * at the same time we are here we might
+			 * collide in the cleanup.
+			 */
+			sctp_inpcb_free(inp,
+					SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE,
+					SCTP_CALLED_DIRECTLY_NOCMPSET);
+			SCTP_INP_DECR_REF(inp);
+			goto out_of;
+		} else {
+			/* The socket is still open. */
+			SCTP_INP_DECR_REF(inp);
+		}
+	}
+	if (from_inpcbfree == SCTP_NORMAL_PROC) {
+		SCTP_INP_RUNLOCK(inp);
+	}
+ out_of:
+	/* destroyed the asoc */
+#ifdef SCTP_LOG_CLOSING
+	sctp_log_closing(inp, NULL, 11);
+#endif
+	return (1);
+}
+
+
+
+/*
+ * determine if a destination is "reachable" based upon the addresses bound
+ * to the current endpoint (e.g. only v4 or v6 currently bound)
+ */
+/*
+ * FIX: if we allow assoc-level bindx(), then this needs to be fixed to use
+ * assoc level v4/v6 flags, as the assoc *may* not have the same address
+ * types bound as its endpoint
+ */
+int
+sctp_destination_is_reachable(struct sctp_tcb *stcb, struct sockaddr *destaddr)
+{
+	struct sctp_inpcb *inp;
+	int answer;
+
+	/*
+	 * No locks here, the TCB, in all cases is already locked and an
+	 * assoc is up. There is either a INP lock by the caller applied (in
+	 * asconf case when deleting an address) or NOT in the HB case,
+	 * however if HB then the INP increment is up and the INP will not
+	 * be removed (on top of the fact that we have a TCB lock). So we
+	 * only want to read the sctp_flags, which is either bound-all or
+	 * not.. no protection needed since once an assoc is up you can't be
+	 * changing your binding.
+	 */
+	inp = stcb->sctp_ep;
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		/* if bound all, destination is not restricted */
+		/*
+		 * RRS: Question during lock work: Is this correct? If you
+		 * are bound-all you still might need to obey the V4--V6
+		 * flags??? IMO this bound-all stuff needs to be removed!
+		 */
+		return (1);
+	}
+	/* NOTE: all "scope" checks are done when local addresses are added */
+	switch (destaddr->sa_family) {
+#ifdef INET6
+	case AF_INET6:
+#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__))
+		answer = inp->inp_vflag & INP_IPV6;
+#else
+		answer = inp->ip_inp.inp.inp_vflag & INP_IPV6;
+#endif
+		break;
+#endif
+#ifdef INET
+	case AF_INET:
+#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__))
+		answer = inp->inp_vflag & INP_IPV4;
+#else
+		answer = inp->ip_inp.inp.inp_vflag & INP_IPV4;
+#endif
+		break;
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+		answer = inp->ip_inp.inp.inp_vflag & INP_CONN;
+		break;
+#endif
+	default:
+		/* invalid family, so it's unreachable */
+		answer = 0;
+		break;
+	}
+	return (answer);
+}
+
+/*
+ * update the inp_vflags on an endpoint
+ */
+static void
+sctp_update_ep_vflag(struct sctp_inpcb *inp)
+{
+	struct sctp_laddr *laddr;
+
+	/* first clear the flag */
+#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__))
+	inp->inp_vflag = 0;
+#else
+	inp->ip_inp.inp.inp_vflag = 0;
+#endif
+	/* set the flag based on addresses on the ep list */
+	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+		if (laddr->ifa == NULL) {
+			SCTPDBG(SCTP_DEBUG_PCB1, "%s: NULL ifa\n",
+				__func__);
+			continue;
+		}
+
+		if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
+			continue;
+		}
+		switch (laddr->ifa->address.sa.sa_family) {
+#ifdef INET6
+		case AF_INET6:
+#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__))
+			inp->inp_vflag |= INP_IPV6;
+#else
+			inp->ip_inp.inp.inp_vflag |= INP_IPV6;
+#endif
+			break;
+#endif
+#ifdef INET
+		case AF_INET:
+#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__))
+			inp->inp_vflag |= INP_IPV4;
+#else
+			inp->ip_inp.inp.inp_vflag |= INP_IPV4;
+#endif
+			break;
+#endif
+#if defined(__Userspace__)
+		case AF_CONN:
+			inp->ip_inp.inp.inp_vflag |= INP_CONN;
+			break;
+#endif
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * Add the address to the endpoint local address list There is nothing to be
+ * done if we are bound to all addresses
+ */
+void
+sctp_add_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa, uint32_t action)
+{
+	struct sctp_laddr *laddr;
+	struct sctp_tcb *stcb;
+	int fnd, error = 0;
+
+	fnd = 0;
+
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		/* You are already bound to all. You have it already */
+		return;
+	}
+#ifdef INET6
+	if (ifa->address.sa.sa_family == AF_INET6) {
+		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+			/* Can't bind a non-useable addr. */
+			return;
+		}
+	}
+#endif
+	/* first, is it already present? */
+	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+		if (laddr->ifa == ifa) {
+			fnd = 1;
+			break;
+		}
+	}
+
+	if (fnd == 0) {
+		/* Not in the ep list */
+		error = sctp_insert_laddr(&inp->sctp_addr_list, ifa, action);
+		if (error != 0)
+			return;
+		inp->laddr_count++;
+		/* update inp_vflag flags */
+		switch (ifa->address.sa.sa_family) {
+#ifdef INET6
+		case AF_INET6:
+#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__))
+			inp->inp_vflag |= INP_IPV6;
+#else
+			inp->ip_inp.inp.inp_vflag |= INP_IPV6;
+#endif
+			break;
+#endif
+#ifdef INET
+		case AF_INET:
+#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__))
+			inp->inp_vflag |= INP_IPV4;
+#else
+			inp->ip_inp.inp.inp_vflag |= INP_IPV4;
+#endif
+			break;
+#endif
+#if defined(__Userspace__)
+		case AF_CONN:
+			inp->ip_inp.inp.inp_vflag |= INP_CONN;
+			break;
+#endif
+		default:
+			break;
+		}
+		LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+			sctp_add_local_addr_restricted(stcb, ifa);
+		}
+	}
+	return;
+}
+
+
+/*
+ * select a new (hopefully reachable) destination net (should only be used
+ * when we deleted an ep addr that is the only usable source address to reach
+ * the destination net)
+ */
+static void
+sctp_select_primary_destination(struct sctp_tcb *stcb)
+{
+	struct sctp_nets *net;
+
+	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+		/* for now, we'll just pick the first reachable one we find */
+		if (net->dest_state & SCTP_ADDR_UNCONFIRMED)
+			continue;
+		if (sctp_destination_is_reachable(stcb,
+		    (struct sockaddr *)&net->ro._l_addr)) {
+			/* found a reachable destination */
+			stcb->asoc.primary_destination = net;
+		}
+	}
+	/* I can't there from here! ...we're gonna die shortly... */
+}
+
+
+/*
+ * Delete the address from the endpoint local address list. There is nothing
+ * to be done if we are bound to all addresses
+ */
+void
+sctp_del_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
+{
+	struct sctp_laddr *laddr;
+	int fnd;
+
+	fnd = 0;
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		/* You are already bound to all. You have it already */
+		return;
+	}
+	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+		if (laddr->ifa == ifa) {
+			fnd = 1;
+			break;
+		}
+	}
+	if (fnd && (inp->laddr_count < 2)) {
+		/* can't delete unless there are at LEAST 2 addresses */
+		return;
+	}
+	if (fnd) {
+		/*
+		 * clean up any use of this address go through our
+		 * associations and clear any last_used_address that match
+		 * this one for each assoc, see if a new primary_destination
+		 * is needed
+		 */
+		struct sctp_tcb *stcb;
+
+		/* clean up "next_addr_touse" */
+		if (inp->next_addr_touse == laddr)
+			/* delete this address */
+			inp->next_addr_touse = NULL;
+
+		/* clean up "last_used_address" */
+		LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+			struct sctp_nets *net;
+
+			SCTP_TCB_LOCK(stcb);
+			if (stcb->asoc.last_used_address == laddr)
+				/* delete this address */
+				stcb->asoc.last_used_address = NULL;
+			/* Now spin through all the nets and purge any ref to laddr */
+			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+				if (net->ro._s_addr == laddr->ifa) {
+					/* Yep, purge src address selected */
+					sctp_rtentry_t *rt;
+
+					/* delete this address if cached */
+					rt = net->ro.ro_rt;
+					if (rt != NULL) {
+						RTFREE(rt);
+						net->ro.ro_rt = NULL;
+					}
+					sctp_free_ifa(net->ro._s_addr);
+					net->ro._s_addr = NULL;
+					net->src_addr_selected = 0;
+				}
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		}		/* for each tcb */
+		/* remove it from the ep list */
+		sctp_remove_laddr(laddr);
+		inp->laddr_count--;
+		/* update inp_vflag flags */
+		sctp_update_ep_vflag(inp);
+	}
+	return;
+}
+
+/*
+ * Add the address to the TCB local address restricted list.
+ * This is a "pending" address list (eg. addresses waiting for an
+ * ASCONF-ACK response) and cannot be used as a valid source address.
+ */
+void
+sctp_add_local_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
+{
+	struct sctp_laddr *laddr;
+	struct sctpladdr *list;
+
+	/*
+	 * Assumes TCB is locked.. and possibly the INP. May need to
+	 * confirm/fix that if we need it and is not the case.
+	 */
+	list = &stcb->asoc.sctp_restricted_addrs;
+
+#ifdef INET6
+	if (ifa->address.sa.sa_family == AF_INET6) {
+		if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
+			/* Can't bind a non-existent addr. */
+			return;
+		}
+	}
+#endif
+	/* does the address already exist? */
+	LIST_FOREACH(laddr, list, sctp_nxt_addr) {
+		if (laddr->ifa == ifa) {
+			return;
+		}
+	}
+
+	/* add to the list */
+	(void)sctp_insert_laddr(list, ifa, 0);
+	return;
+}
+
+/*
+ * Remove a local address from the TCB local address restricted list
+ */
+void
+sctp_del_local_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
+{
+	struct sctp_inpcb *inp;
+	struct sctp_laddr *laddr;
+
+	/*
+	 * This is called by asconf work. It is assumed that a) The TCB is
+	 * locked and b) The INP is locked. This is true in as much as I can
+	 * trace through the entry asconf code where I did these locks.
+	 * Again, the ASCONF code is a bit different in that it does lock
+	 * the INP during its work often times. This must be since we don't
+	 * want other proc's looking up things while what they are looking
+	 * up is changing :-D
+	 */
+
+	inp = stcb->sctp_ep;
+	/* if subset bound and don't allow ASCONF's, can't delete last */
+	if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) &&
+	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
+		if (stcb->sctp_ep->laddr_count < 2) {
+			/* can't delete last address */
+			return;
+		}
+	}
+	LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
+		/* remove the address if it exists */
+		if (laddr->ifa == NULL)
+			continue;
+		if (laddr->ifa == ifa) {
+			sctp_remove_laddr(laddr);
+			return;
+		}
+	}
+
+	/* address not found! */
+	return;
+}
+
+#if defined(__FreeBSD__)
+/*
+ * Temporarily remove for __APPLE__ until we use the Tiger equivalents
+ */
+/* sysctl */
+static int sctp_max_number_of_assoc = SCTP_MAX_NUM_OF_ASOC;
+static int sctp_scale_up_for_address = SCTP_SCALE_FOR_ADDR;
+#endif				/* FreeBSD || APPLE */
+
+
+
+#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
+struct sctp_mcore_ctrl *sctp_mcore_workers = NULL;
+int *sctp_cpuarry = NULL;
+void
+sctp_queue_to_mcore(struct mbuf *m, int off, int cpu_to_use)
+{
+	/* Queue a packet to a processor for the specified core */
+	struct sctp_mcore_queue *qent;
+	struct sctp_mcore_ctrl *wkq;
+	int need_wake = 0;
+	if (sctp_mcore_workers == NULL) {
+		/* Something went way bad during setup */
+		sctp_input_with_port(m, off, 0);
+		return;
+	}
+	SCTP_MALLOC(qent, struct sctp_mcore_queue *,
+		    (sizeof(struct sctp_mcore_queue)),
+		    SCTP_M_MCORE);
+	if (qent == NULL) {
+		/* This is trouble  */
+		sctp_input_with_port(m, off, 0);
+		return;
+	}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+	qent->vn = curvnet;
+#endif
+	qent->m = m;
+	qent->off = off;
+	qent->v6 = 0;
+	wkq = &sctp_mcore_workers[cpu_to_use];
+	SCTP_MCORE_QLOCK(wkq);
+
+	TAILQ_INSERT_TAIL(&wkq->que, qent, next);
+	if (wkq->running == 0) {
+		need_wake = 1;
+	}
+	SCTP_MCORE_QUNLOCK(wkq);
+	if (need_wake) {
+		wakeup(&wkq->running);
+	}
+}
+
+static void
+sctp_mcore_thread(void *arg)
+{
+
+	struct sctp_mcore_ctrl *wkq;
+	struct sctp_mcore_queue *qent;
+
+	wkq = (struct sctp_mcore_ctrl *)arg;
+	struct mbuf *m;
+	int off, v6;
+
+	/* Wait for first tickle */
+	SCTP_MCORE_LOCK(wkq);
+	wkq->running = 0;
+	msleep(&wkq->running,
+	       &wkq->core_mtx,
+	       0, "wait for pkt", 0);
+	SCTP_MCORE_UNLOCK(wkq);
+
+	/* Bind to our cpu */
+	thread_lock(curthread);
+	sched_bind(curthread, wkq->cpuid);
+	thread_unlock(curthread);
+
+	/* Now lets start working */
+	SCTP_MCORE_LOCK(wkq);
+	/* Now grab lock and go */
+	for (;;) {
+		SCTP_MCORE_QLOCK(wkq);
+	skip_sleep:
+		wkq->running = 1;
+		qent = TAILQ_FIRST(&wkq->que);
+		if (qent) {
+			TAILQ_REMOVE(&wkq->que, qent, next);
+			SCTP_MCORE_QUNLOCK(wkq);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+			CURVNET_SET(qent->vn);
+#endif
+			m = qent->m;
+			off = qent->off;
+			v6 = qent->v6;
+			SCTP_FREE(qent, SCTP_M_MCORE);
+			if (v6 == 0) {
+				sctp_input_with_port(m, off, 0);
+			} else {
+				SCTP_PRINTF("V6 not yet supported\n");
+				sctp_m_freem(m);
+			}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+			CURVNET_RESTORE();
+#endif
+			SCTP_MCORE_QLOCK(wkq);
+		}
+		wkq->running = 0;
+		if (!TAILQ_EMPTY(&wkq->que)) {
+			goto skip_sleep;
+		}
+		SCTP_MCORE_QUNLOCK(wkq);
+		msleep(&wkq->running,
+		       &wkq->core_mtx,
+		       0, "wait for pkt", 0);
+	}
+}
+
+static void
+sctp_startup_mcore_threads(void)
+{
+	int i, cpu;
+
+	if (mp_ncpus == 1)
+		return;
+
+	if (sctp_mcore_workers != NULL) {
+		/* Already been here in some previous
+		 * vnet?
+		 */
+		return;
+	}
+	SCTP_MALLOC(sctp_mcore_workers, struct sctp_mcore_ctrl *,
+		    ((mp_maxid+1) * sizeof(struct sctp_mcore_ctrl)),
+		    SCTP_M_MCORE);
+	if (sctp_mcore_workers == NULL) {
+		/* TSNH I hope */
+		return;
+	}
+	memset(sctp_mcore_workers, 0 , ((mp_maxid+1) *
+					sizeof(struct sctp_mcore_ctrl)));
+	/* Init the structures */
+	for (i = 0; i<=mp_maxid; i++) {
+		TAILQ_INIT(&sctp_mcore_workers[i].que);
+		SCTP_MCORE_LOCK_INIT(&sctp_mcore_workers[i]);
+		SCTP_MCORE_QLOCK_INIT(&sctp_mcore_workers[i]);
+		sctp_mcore_workers[i].cpuid = i;
+	}
+	if (sctp_cpuarry == NULL) {
+		SCTP_MALLOC(sctp_cpuarry, int *,
+			    (mp_ncpus * sizeof(int)),
+			    SCTP_M_MCORE);
+		i = 0;
+		CPU_FOREACH(cpu) {
+			sctp_cpuarry[i] = cpu;
+			i++;
+		}
+	}
+
+	/* Now start them all */
+	CPU_FOREACH(cpu) {
+#if __FreeBSD_version <= 701000
+		(void)kthread_create(sctp_mcore_thread,
+				     (void *)&sctp_mcore_workers[cpu],
+				     &sctp_mcore_workers[cpu].thread_proc,
+				     RFPROC,
+				     SCTP_KTHREAD_PAGES,
+				     SCTP_MCORE_NAME);
+
+#else
+		(void)kproc_create(sctp_mcore_thread,
+				   (void *)&sctp_mcore_workers[cpu],
+				   &sctp_mcore_workers[cpu].thread_proc,
+				   RFPROC,
+				   SCTP_KTHREAD_PAGES,
+				   SCTP_MCORE_NAME);
+#endif
+
+	}
+}
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_cc_version >= 1300000
+static struct mbuf *
+sctp_netisr_hdlr(struct mbuf *m, uintptr_t source)
+{
+	struct ip *ip;
+	struct sctphdr *sh;
+	int offset;
+	uint32_t flowid, tag;
+
+	/*
+	 * No flow id built by lower layers fix it so we
+	 * create one.
+	 */
+	ip = mtod(m, struct ip *);
+	offset = (ip->ip_hl << 2) + sizeof(struct sctphdr);
+	if (SCTP_BUF_LEN(m) < offset) {
+		if ((m = m_pullup(m, offset)) == NULL) {
+			SCTP_STAT_INCR(sctps_hdrops);
+			return (NULL);
+		}
+		ip = mtod(m, struct ip *);
+	}
+	sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
+	tag = htonl(sh->v_tag);
+	flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port);
+	m->m_pkthdr.flowid = flowid;
+/* FIX ME */
+	m->m_flags |= M_FLOWID;
+	return (m);
+}
+#endif
+
+void
+sctp_pcb_init()
+{
+	/*
+	 * SCTP initialization for the PCB structures should be called by
+	 * the sctp_init() function.
+	 */
+	int i;
+	struct timeval tv;
+
+	if (SCTP_BASE_VAR(sctp_pcb_initialized) != 0) {
+		/* error I was called twice */
+		return;
+	}
+	SCTP_BASE_VAR(sctp_pcb_initialized) = 1;
+
+#if defined(SCTP_PROCESS_LEVEL_LOCKS)
+#if !defined(__Userspace_os_Windows)
+	pthread_mutexattr_init(&SCTP_BASE_VAR(mtx_attr));
+#ifdef INVARIANTS
+	pthread_mutexattr_settype(&SCTP_BASE_VAR(mtx_attr), PTHREAD_MUTEX_ERRORCHECK);
+#endif
+#endif
+#endif
+#if defined(SCTP_LOCAL_TRACE_BUF)
+#if defined(__Windows__)
+	if (SCTP_BASE_SYSCTL(sctp_log) != NULL) {
+		bzero(SCTP_BASE_SYSCTL(sctp_log), sizeof(struct sctp_log));
+	}
+#else
+	bzero(&SCTP_BASE_SYSCTL(sctp_log), sizeof(struct sctp_log));
+#endif
+#endif
+#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+	SCTP_MALLOC(SCTP_BASE_STATS, struct sctpstat *,
+		    ((mp_maxid+1) * sizeof(struct sctpstat)),
+		    SCTP_M_MCORE);
+#endif
+	(void)SCTP_GETTIME_TIMEVAL(&tv);
+#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+	bzero(SCTP_BASE_STATS, (sizeof(struct sctpstat) * (mp_maxid+1)));
+	SCTP_BASE_STATS[PCPU_GET(cpuid)].sctps_discontinuitytime.tv_sec = (uint32_t)tv.tv_sec;
+	SCTP_BASE_STATS[PCPU_GET(cpuid)].sctps_discontinuitytime.tv_usec = (uint32_t)tv.tv_usec;
+#else
+	bzero(&SCTP_BASE_STATS, sizeof(struct sctpstat));
+	SCTP_BASE_STAT(sctps_discontinuitytime).tv_sec = (uint32_t)tv.tv_sec;
+	SCTP_BASE_STAT(sctps_discontinuitytime).tv_usec = (uint32_t)tv.tv_usec;
+#endif
+	/* init the empty list of (All) Endpoints */
+	LIST_INIT(&SCTP_BASE_INFO(listhead));
+#if defined(__APPLE__)
+	LIST_INIT(&SCTP_BASE_INFO(inplisthead));
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION)
+	SCTP_BASE_INFO(sctbinfo).listhead = &SCTP_BASE_INFO(inplisthead);
+	SCTP_BASE_INFO(sctbinfo).mtx_grp_attr = lck_grp_attr_alloc_init();
+	lck_grp_attr_setdefault(SCTP_BASE_INFO(sctbinfo).mtx_grp_attr);
+	SCTP_BASE_INFO(sctbinfo).mtx_grp = lck_grp_alloc_init("sctppcb", SCTP_BASE_INFO(sctbinfo).mtx_grp_attr);
+	SCTP_BASE_INFO(sctbinfo).mtx_attr = lck_attr_alloc_init();
+	lck_attr_setdefault(SCTP_BASE_INFO(sctbinfo).mtx_attr);
+#else
+	SCTP_BASE_INFO(sctbinfo).ipi_listhead = &SCTP_BASE_INFO(inplisthead);
+	SCTP_BASE_INFO(sctbinfo).ipi_lock_grp_attr = lck_grp_attr_alloc_init();
+	lck_grp_attr_setdefault(SCTP_BASE_INFO(sctbinfo).ipi_lock_grp_attr);
+	SCTP_BASE_INFO(sctbinfo).ipi_lock_grp = lck_grp_alloc_init("sctppcb", SCTP_BASE_INFO(sctbinfo).ipi_lock_grp_attr);
+	SCTP_BASE_INFO(sctbinfo).ipi_lock_attr = lck_attr_alloc_init();
+	lck_attr_setdefault(SCTP_BASE_INFO(sctbinfo).ipi_lock_attr);
+#endif
+#if !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)
+	SCTP_BASE_INFO(sctbinfo).ipi_gc = sctp_gc;
+	in_pcbinfo_attach(&SCTP_BASE_INFO(sctbinfo));
+#endif
+#endif
+
+
+	/* init the hash table of endpoints */
+#if defined(__FreeBSD__)
+#if defined(__FreeBSD_cc_version) && __FreeBSD_cc_version >= 440000
+	TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &SCTP_BASE_SYSCTL(sctp_hashtblsize));
+	TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &SCTP_BASE_SYSCTL(sctp_pcbtblsize));
+	TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &SCTP_BASE_SYSCTL(sctp_chunkscale));
+#else
+	TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", SCTP_TCBHASHSIZE,
+			  SCTP_BASE_SYSCTL(sctp_hashtblsize));
+	TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", SCTP_PCBHASHSIZE,
+			  SCTP_BASE_SYSCTL(sctp_pcbtblsize));
+	TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", SCTP_CHUNKQUEUE_SCALE,
+			  SCTP_BASE_SYSCTL(sctp_chunkscale));
+#endif
+#endif
+	SCTP_BASE_INFO(sctp_asochash) = SCTP_HASH_INIT((SCTP_BASE_SYSCTL(sctp_hashtblsize) * 31),
+						       &SCTP_BASE_INFO(hashasocmark));
+	SCTP_BASE_INFO(sctp_ephash) = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_hashtblsize),
+						     &SCTP_BASE_INFO(hashmark));
+	SCTP_BASE_INFO(sctp_tcpephash) = SCTP_HASH_INIT(SCTP_BASE_SYSCTL(sctp_hashtblsize),
+							&SCTP_BASE_INFO(hashtcpmark));
+	SCTP_BASE_INFO(hashtblsize) = SCTP_BASE_SYSCTL(sctp_hashtblsize);
+
+
+	SCTP_BASE_INFO(sctp_vrfhash) = SCTP_HASH_INIT(SCTP_SIZE_OF_VRF_HASH,
+						      &SCTP_BASE_INFO(hashvrfmark));
+
+	SCTP_BASE_INFO(vrf_ifn_hash) = SCTP_HASH_INIT(SCTP_VRF_IFN_HASH_SIZE,
+						      &SCTP_BASE_INFO(vrf_ifn_hashmark));
+	/* init the zones */
+	/*
+	 * FIX ME: Should check for NULL returns, but if it does fail we are
+	 * doomed to panic anyways... add later maybe.
+	 */
+	SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_ep), "sctp_ep",
+		       sizeof(struct sctp_inpcb), maxsockets);
+
+	SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asoc), "sctp_asoc",
+		       sizeof(struct sctp_tcb), sctp_max_number_of_assoc);
+
+	SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_laddr), "sctp_laddr",
+		       sizeof(struct sctp_laddr),
+		       (sctp_max_number_of_assoc * sctp_scale_up_for_address));
+
+	SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_net), "sctp_raddr",
+		       sizeof(struct sctp_nets),
+		       (sctp_max_number_of_assoc * sctp_scale_up_for_address));
+
+	SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_chunk), "sctp_chunk",
+		       sizeof(struct sctp_tmit_chunk),
+		       (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
+
+	SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_readq), "sctp_readq",
+		       sizeof(struct sctp_queued_to_read),
+		       (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
+
+	SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_strmoq), "sctp_stream_msg_out",
+		       sizeof(struct sctp_stream_queue_pending),
+		       (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
+
+	SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asconf), "sctp_asconf",
+		       sizeof(struct sctp_asconf),
+		       (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
+
+	SCTP_ZONE_INIT(SCTP_BASE_INFO(ipi_zone_asconf_ack), "sctp_asconf_ack",
+		       sizeof(struct sctp_asconf_ack),
+		       (sctp_max_number_of_assoc * SCTP_BASE_SYSCTL(sctp_chunkscale)));
+
+
+	/* Master Lock INIT for info structure */
+	SCTP_INP_INFO_LOCK_INIT();
+	SCTP_STATLOG_INIT_LOCK();
+
+	SCTP_IPI_COUNT_INIT();
+	SCTP_IPI_ADDR_INIT();
+#ifdef SCTP_PACKET_LOGGING
+	SCTP_IP_PKTLOG_INIT();
+#endif
+	LIST_INIT(&SCTP_BASE_INFO(addr_wq));
+
+	SCTP_WQ_ADDR_INIT();
+	/* not sure if we need all the counts */
+	SCTP_BASE_INFO(ipi_count_ep) = 0;
+	/* assoc/tcb zone info */
+	SCTP_BASE_INFO(ipi_count_asoc) = 0;
+	/* local addrlist zone info */
+	SCTP_BASE_INFO(ipi_count_laddr) = 0;
+	/* remote addrlist zone info */
+	SCTP_BASE_INFO(ipi_count_raddr) = 0;
+	/* chunk info */
+	SCTP_BASE_INFO(ipi_count_chunk) = 0;
+
+	/* socket queue zone info */
+	SCTP_BASE_INFO(ipi_count_readq) = 0;
+
+	/* stream out queue cont */
+	SCTP_BASE_INFO(ipi_count_strmoq) = 0;
+
+	SCTP_BASE_INFO(ipi_free_strmoq) = 0;
+	SCTP_BASE_INFO(ipi_free_chunks) = 0;
+
+	SCTP_OS_TIMER_INIT(&SCTP_BASE_INFO(addr_wq_timer.timer));
+
+	/* Init the TIMEWAIT list */
+	for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) {
+		LIST_INIT(&SCTP_BASE_INFO(vtag_timewait)[i]);
+	}
+#if defined(SCTP_PROCESS_LEVEL_LOCKS)
+#if defined(__Userspace_os_Windows)
+	InitializeConditionVariable(&sctp_it_ctl.iterator_wakeup);
+#else
+	(void)pthread_cond_init(&sctp_it_ctl.iterator_wakeup, NULL);
+#endif
+#endif
+	sctp_startup_iterator();
+
+#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
+	sctp_startup_mcore_threads();
+#endif
+
+#ifndef __Panda__
+	/*
+	 * INIT the default VRF which for BSD is the only one, other O/S's
+	 * may have more. But initially they must start with one and then
+	 * add the VRF's as addresses are added.
+	 */
+	sctp_init_vrf_list(SCTP_DEFAULT_VRF);
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_cc_version >= 1300000
+	if (ip_register_flow_handler(sctp_netisr_hdlr, IPPROTO_SCTP)) {
+		SCTP_PRINTF("***SCTP- Error can't register netisr handler***\n");
+	}
+#endif
+#if defined(_SCTP_NEEDS_CALLOUT_) || defined(_USER_SCTP_NEEDS_CALLOUT_)
+	/* allocate the lock for the callout/timer queue */
+	SCTP_TIMERQ_LOCK_INIT();
+	TAILQ_INIT(&SCTP_BASE_INFO(callqueue));
+#endif
+#if defined(__Userspace__)
+	mbuf_init(NULL);
+	atomic_init();
+#if defined(THREAD_SUPPORT) && (defined(INET) || defined(INET6))
+	recv_thread_init();
+#endif
+#endif
+}
+
+/*
+ * Assumes that the SCTP_BASE_INFO() lock is NOT held.
+ */
+void
+sctp_pcb_finish(void)
+{
+	struct sctp_vrflist *vrf_bucket;
+	struct sctp_vrf *vrf, *nvrf;
+	struct sctp_ifn *ifn, *nifn;
+	struct sctp_ifa *ifa, *nifa;
+	struct sctpvtaghead *chain;
+	struct sctp_tagblock *twait_block, *prev_twait_block;
+	struct sctp_laddr *wi, *nwi;
+	int i;
+	struct sctp_iterator *it, *nit;
+
+	if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
+		SCTP_PRINTF("%s: race condition on teardown.\n", __func__);
+		return;
+	}
+	SCTP_BASE_VAR(sctp_pcb_initialized) = 0;
+#if !defined(__FreeBSD__)
+	/* Notify the iterator to exit. */
+	SCTP_IPI_ITERATOR_WQ_LOCK();
+	sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_MUST_EXIT;
+	sctp_wakeup_iterator();
+	SCTP_IPI_ITERATOR_WQ_UNLOCK();
+#endif
+#if defined(__APPLE__)
+#if !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)
+	in_pcbinfo_detach(&SCTP_BASE_INFO(sctbinfo));
+#endif
+	SCTP_IPI_ITERATOR_WQ_LOCK();
+	do {
+		msleep(&sctp_it_ctl.iterator_flags,
+		       sctp_it_ctl.ipi_iterator_wq_mtx,
+		       0, "waiting_for_work", 0);
+	} while ((sctp_it_ctl.iterator_flags & SCTP_ITERATOR_EXITED) == 0);
+	thread_deallocate(sctp_it_ctl.thread_proc);
+	SCTP_IPI_ITERATOR_WQ_UNLOCK();
+#endif
+#if defined(__Windows__)
+	if (sctp_it_ctl.iterator_thread_obj != NULL) {
+		NTSTATUS status = STATUS_SUCCESS;
+
+		KeSetEvent(&sctp_it_ctl.iterator_wakeup[1], IO_NO_INCREMENT, FALSE);
+		status = KeWaitForSingleObject(sctp_it_ctl.iterator_thread_obj,
+					       Executive,
+					       KernelMode,
+					       FALSE,
+					       NULL);
+		ObDereferenceObject(sctp_it_ctl.iterator_thread_obj);
+	}
+#endif
+#if defined(__Userspace__)
+	if (sctp_it_ctl.thread_proc) {
+#if defined(__Userspace_os_Windows)
+		WaitForSingleObject(sctp_it_ctl.thread_proc, INFINITE);
+		CloseHandle(sctp_it_ctl.thread_proc);
+		sctp_it_ctl.thread_proc = NULL;
+#else
+		pthread_join(sctp_it_ctl.thread_proc, NULL);
+		sctp_it_ctl.thread_proc = 0;
+#endif
+	}
+#endif
+#if defined(SCTP_PROCESS_LEVEL_LOCKS)
+#if defined(__Userspace_os_Windows)
+	DeleteConditionVariable(&sctp_it_ctl.iterator_wakeup);
+#else
+	pthread_cond_destroy(&sctp_it_ctl.iterator_wakeup);
+	pthread_mutexattr_destroy(&SCTP_BASE_VAR(mtx_attr));
+#endif
+#endif
+	/* In FreeBSD the iterator thread never exits
+	 * but we do clean up.
+	 * The only way FreeBSD reaches here is if we have VRF's
+	 * but we still add the ifdef to make it compile on old versions.
+	 */
+#if defined(__FreeBSD__)
+retry:
+#endif
+	SCTP_IPI_ITERATOR_WQ_LOCK();
+#if defined(__FreeBSD__)
+	/*
+	 * sctp_iterator_worker() might be working on an it entry without
+	 * holding the lock.  We won't find it on the list either and
+	 * continue and free/destroy it.  While holding the lock, spin, to
+	 * avoid the race condition as sctp_iterator_worker() will have to
+	 * wait to re-aquire the lock.
+	 */
+	if (sctp_it_ctl.iterator_running != 0 || sctp_it_ctl.cur_it != NULL) {
+		SCTP_IPI_ITERATOR_WQ_UNLOCK();
+		SCTP_PRINTF("%s: Iterator running while we held the lock. Retry. "
+		            "cur_it=%p\n", __func__, sctp_it_ctl.cur_it);
+		DELAY(10);
+		goto retry;
+	}
+#endif
+	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+		if (it->vn != curvnet) {
+			continue;
+		}
+#endif
+		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
+		if (it->function_atend != NULL) {
+			(*it->function_atend) (it->pointer, it->val);
+		}
+		SCTP_FREE(it,SCTP_M_ITER);
+	}
+	SCTP_IPI_ITERATOR_WQ_UNLOCK();
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+	SCTP_ITERATOR_LOCK();
+	if ((sctp_it_ctl.cur_it) &&
+	    (sctp_it_ctl.cur_it->vn == curvnet)) {
+		sctp_it_ctl.iterator_flags |= SCTP_ITERATOR_STOP_CUR_IT;
+	}
+	SCTP_ITERATOR_UNLOCK();
+#endif
+#if !defined(__FreeBSD__)
+	SCTP_IPI_ITERATOR_WQ_DESTROY();
+	SCTP_ITERATOR_LOCK_DESTROY();
+#endif
+	SCTP_OS_TIMER_STOP_DRAIN(&SCTP_BASE_INFO(addr_wq_timer.timer));
+	SCTP_WQ_ADDR_LOCK();
+	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
+		LIST_REMOVE(wi, sctp_nxt_addr);
+		SCTP_DECR_LADDR_COUNT();
+		if (wi->action == SCTP_DEL_IP_ADDRESS) {
+			SCTP_FREE(wi->ifa, SCTP_M_IFA);
+		}
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_laddr), wi);
+	}
+	SCTP_WQ_ADDR_UNLOCK();
+
+	/*
+	 * free the vrf/ifn/ifa lists and hashes (be sure address monitor
+	 * is destroyed first).
+	 */
+	vrf_bucket = &SCTP_BASE_INFO(sctp_vrfhash)[(SCTP_DEFAULT_VRFID & SCTP_BASE_INFO(hashvrfmark))];
+	LIST_FOREACH_SAFE(vrf, vrf_bucket, next_vrf, nvrf) {
+		LIST_FOREACH_SAFE(ifn, &vrf->ifnlist, next_ifn, nifn) {
+			LIST_FOREACH_SAFE(ifa, &ifn->ifalist, next_ifa, nifa) {
+				/* free the ifa */
+				LIST_REMOVE(ifa, next_bucket);
+				LIST_REMOVE(ifa, next_ifa);
+				SCTP_FREE(ifa, SCTP_M_IFA);
+			}
+			/* free the ifn */
+			LIST_REMOVE(ifn, next_bucket);
+			LIST_REMOVE(ifn, next_ifn);
+			SCTP_FREE(ifn, SCTP_M_IFN);
+		}
+		SCTP_HASH_FREE(vrf->vrf_addr_hash, vrf->vrf_addr_hashmark);
+		/* free the vrf */
+		LIST_REMOVE(vrf, next_vrf);
+		SCTP_FREE(vrf, SCTP_M_VRF);
+	}
+	/* free the vrf hashes */
+	SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_vrfhash), SCTP_BASE_INFO(hashvrfmark));
+	SCTP_HASH_FREE(SCTP_BASE_INFO(vrf_ifn_hash), SCTP_BASE_INFO(vrf_ifn_hashmark));
+
+	/* free the TIMEWAIT list elements malloc'd in the function
+	 * sctp_add_vtag_to_timewait()...
+	 */
+	for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) {
+		chain = &SCTP_BASE_INFO(vtag_timewait)[i];
+		if (!LIST_EMPTY(chain)) {
+			prev_twait_block = NULL;
+			LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
+				if (prev_twait_block) {
+					SCTP_FREE(prev_twait_block, SCTP_M_TIMW);
+				}
+				prev_twait_block = twait_block;
+			}
+			SCTP_FREE(prev_twait_block, SCTP_M_TIMW);
+		}
+	}
+
+	/* free the locks and mutexes */
+#if defined(__APPLE__)
+	SCTP_TIMERQ_LOCK_DESTROY();
+#endif
+#ifdef SCTP_PACKET_LOGGING
+	SCTP_IP_PKTLOG_DESTROY();
+#endif
+	SCTP_IPI_ADDR_DESTROY();
+#if defined(__APPLE__)
+	SCTP_IPI_COUNT_DESTROY();
+#endif
+	SCTP_STATLOG_DESTROY();
+	SCTP_INP_INFO_LOCK_DESTROY();
+
+	SCTP_WQ_ADDR_DESTROY();
+
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION)
+	lck_grp_attr_free(SCTP_BASE_INFO(sctbinfo).mtx_grp_attr);
+	lck_grp_free(SCTP_BASE_INFO(sctbinfo).mtx_grp);
+	lck_attr_free(SCTP_BASE_INFO(sctbinfo).mtx_attr);
+#else
+	lck_grp_attr_free(SCTP_BASE_INFO(sctbinfo).ipi_lock_grp_attr);
+	lck_grp_free(SCTP_BASE_INFO(sctbinfo).ipi_lock_grp);
+	lck_attr_free(SCTP_BASE_INFO(sctbinfo).ipi_lock_attr);
+#endif
+#endif
+#if defined(__Userspace__)
+	SCTP_TIMERQ_LOCK_DESTROY();
+	SCTP_ZONE_DESTROY(zone_mbuf);
+	SCTP_ZONE_DESTROY(zone_clust);
+	SCTP_ZONE_DESTROY(zone_ext_refcnt);
+#endif
+	/* Get rid of other stuff too. */
+	if (SCTP_BASE_INFO(sctp_asochash) != NULL)
+		SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_asochash), SCTP_BASE_INFO(hashasocmark));
+	if (SCTP_BASE_INFO(sctp_ephash) != NULL)
+		SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_ephash), SCTP_BASE_INFO(hashmark));
+	if (SCTP_BASE_INFO(sctp_tcpephash) != NULL)
+		SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_tcpephash), SCTP_BASE_INFO(hashtcpmark));
+
+#if defined(__Windows__) || defined(__FreeBSD__) || defined(__Userspace__)
+	SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_ep));
+	SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asoc));
+	SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_laddr));
+	SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_net));
+	SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_chunk));
+	SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_readq));
+	SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_strmoq));
+	SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asconf));
+	SCTP_ZONE_DESTROY(SCTP_BASE_INFO(ipi_zone_asconf_ack));
+#endif
+#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+	SCTP_FREE(SCTP_BASE_STATS, SCTP_M_MCORE);
+#endif
+}
+
+
+int
+sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
+                              int offset, int limit,
+                              struct sockaddr *src, struct sockaddr *dst,
+                              struct sockaddr *altsa, uint16_t port)
+{
+	/*
+	 * grub through the INIT pulling addresses and loading them to the
+	 * nets structure in the asoc. The from address in the mbuf should
+	 * also be loaded (if it is not already). This routine can be called
+	 * with either INIT or INIT-ACK's as long as the m points to the IP
+	 * packet and the offset points to the beginning of the parameters.
+	 */
+	struct sctp_inpcb *inp;
+	struct sctp_nets *net, *nnet, *net_tmp;
+	struct sctp_paramhdr *phdr, parm_buf;
+	struct sctp_tcb *stcb_tmp;
+	uint16_t ptype, plen;
+	struct sockaddr *sa;
+	uint8_t random_store[SCTP_PARAM_BUFFER_SIZE];
+	struct sctp_auth_random *p_random = NULL;
+	uint16_t random_len = 0;
+	uint8_t hmacs_store[SCTP_PARAM_BUFFER_SIZE];
+	struct sctp_auth_hmac_algo *hmacs = NULL;
+	uint16_t hmacs_len = 0;
+	uint8_t saw_asconf = 0;
+	uint8_t saw_asconf_ack = 0;
+	uint8_t chunks_store[SCTP_PARAM_BUFFER_SIZE];
+	struct sctp_auth_chunk_list *chunks = NULL;
+	uint16_t num_chunks = 0;
+	sctp_key_t *new_key;
+	uint32_t keylen;
+	int got_random = 0, got_hmacs = 0, got_chklist = 0;
+	uint8_t peer_supports_ecn;
+	uint8_t peer_supports_prsctp;
+	uint8_t peer_supports_auth;
+	uint8_t peer_supports_asconf;
+	uint8_t peer_supports_asconf_ack;
+	uint8_t peer_supports_reconfig;
+	uint8_t peer_supports_nrsack;
+	uint8_t peer_supports_pktdrop;
+	uint8_t peer_supports_idata;
+#ifdef INET
+	struct sockaddr_in sin;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 sin6;
+#endif
+
+	/* First get the destination address setup too. */
+#ifdef INET
+	memset(&sin, 0, sizeof(sin));
+	sin.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+	sin.sin_len = sizeof(sin);
+#endif
+	sin.sin_port = stcb->rport;
+#endif
+#ifdef INET6
+	memset(&sin6, 0, sizeof(sin6));
+	sin6.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+	sin6.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+	sin6.sin6_port = stcb->rport;
+#endif
+	if (altsa) {
+		sa = altsa;
+	} else {
+		sa = src;
+	}
+	peer_supports_idata = 0;
+	peer_supports_ecn = 0;
+	peer_supports_prsctp = 0;
+	peer_supports_auth = 0;
+	peer_supports_asconf = 0;
+	peer_supports_reconfig = 0;
+	peer_supports_nrsack = 0;
+	peer_supports_pktdrop = 0;
+	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+		/* mark all addresses that we have currently on the list */
+		net->dest_state |= SCTP_ADDR_NOT_IN_ASSOC;
+	}
+	/* does the source address already exist? if so skip it */
+	inp = stcb->sctp_ep;
+	atomic_add_int(&stcb->asoc.refcnt, 1);
+	stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net_tmp, dst, stcb);
+	atomic_add_int(&stcb->asoc.refcnt, -1);
+
+	if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || inp == NULL) {
+		/* we must add the source address */
+		/* no scope set here since we have a tcb already. */
+		switch (sa->sa_family) {
+#ifdef INET
+		case AF_INET:
+			if (stcb->asoc.scope.ipv4_addr_legal) {
+				if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_2)) {
+					return (-1);
+				}
+			}
+			break;
+#endif
+#ifdef INET6
+		case AF_INET6:
+			if (stcb->asoc.scope.ipv6_addr_legal) {
+				if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_3)) {
+					return (-2);
+				}
+			}
+			break;
+#endif
+#if defined(__Userspace__)
+		case AF_CONN:
+			if (stcb->asoc.scope.conn_addr_legal) {
+				if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_3)) {
+					return (-2);
+				}
+			}
+			break;
+#endif
+		default:
+			break;
+		}
+	} else {
+		if (net_tmp != NULL && stcb_tmp == stcb) {
+			net_tmp->dest_state &= ~SCTP_ADDR_NOT_IN_ASSOC;
+		} else if (stcb_tmp != stcb) {
+			/* It belongs to another association? */
+			if (stcb_tmp)
+				SCTP_TCB_UNLOCK(stcb_tmp);
+			return (-3);
+		}
+	}
+	if (stcb->asoc.state == 0) {
+		/* the assoc was freed? */
+		return (-4);
+	}
+	/* now we must go through each of the params. */
+	phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
+	while (phdr) {
+		ptype = ntohs(phdr->param_type);
+		plen = ntohs(phdr->param_length);
+		/*
+		 * SCTP_PRINTF("ptype => %0x, plen => %d\n", (uint32_t)ptype,
+		 * (int)plen);
+		 */
+		if (offset + plen > limit) {
+			break;
+		}
+		if (plen == 0) {
+			break;
+		}
+#ifdef INET
+		if (ptype == SCTP_IPV4_ADDRESS) {
+			if (stcb->asoc.scope.ipv4_addr_legal) {
+				struct sctp_ipv4addr_param *p4, p4_buf;
+
+				/* ok get the v4 address and check/add */
+				phdr = sctp_get_next_param(m, offset,
+							   (struct sctp_paramhdr *)&p4_buf,
+							   sizeof(p4_buf));
+				if (plen != sizeof(struct sctp_ipv4addr_param) ||
+				    phdr == NULL) {
+					return (-5);
+				}
+				p4 = (struct sctp_ipv4addr_param *)phdr;
+				sin.sin_addr.s_addr = p4->addr;
+				if (IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
+					/* Skip multi-cast addresses */
+					goto next_param;
+				}
+				if ((sin.sin_addr.s_addr == INADDR_BROADCAST) ||
+				    (sin.sin_addr.s_addr == INADDR_ANY)) {
+					goto next_param;
+				}
+				sa = (struct sockaddr *)&sin;
+				inp = stcb->sctp_ep;
+				atomic_add_int(&stcb->asoc.refcnt, 1);
+				stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net,
+									dst, stcb);
+				atomic_add_int(&stcb->asoc.refcnt, -1);
+
+				if ((stcb_tmp == NULL && inp == stcb->sctp_ep) ||
+				    inp == NULL) {
+					/* we must add the source address */
+					/*
+					 * no scope set since we have a tcb
+					 * already
+					 */
+
+					/*
+					 * we must validate the state again
+					 * here
+					 */
+				add_it_now:
+					if (stcb->asoc.state == 0) {
+						/* the assoc was freed? */
+						return (-7);
+					}
+					if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_4)) {
+						return (-8);
+					}
+				} else if (stcb_tmp == stcb) {
+					if (stcb->asoc.state == 0) {
+						/* the assoc was freed? */
+						return (-10);
+					}
+					if (net != NULL) {
+						/* clear flag */
+						net->dest_state &=
+							~SCTP_ADDR_NOT_IN_ASSOC;
+					}
+				} else {
+					/*
+					 * strange, address is in another
+					 * assoc? straighten out locks.
+					 */
+					if (stcb_tmp) {
+						if (SCTP_GET_STATE(&stcb_tmp->asoc) & SCTP_STATE_COOKIE_WAIT) {
+							struct mbuf *op_err;
+							char msg[SCTP_DIAG_INFO_LEN];
+
+							/* in setup state we abort this guy */
+							snprintf(msg, sizeof(msg),
+							         "%s:%d at %s", __FILE__, __LINE__, __func__);
+							op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+							         msg);
+							sctp_abort_an_association(stcb_tmp->sctp_ep,
+										  stcb_tmp, op_err,
+							                          SCTP_SO_NOT_LOCKED);
+							goto add_it_now;
+						}
+						SCTP_TCB_UNLOCK(stcb_tmp);
+					}
+
+					if (stcb->asoc.state == 0) {
+						/* the assoc was freed? */
+						return (-12);
+					}
+					return (-13);
+				}
+			}
+		} else
+#endif
+#ifdef INET6
+		if (ptype == SCTP_IPV6_ADDRESS) {
+			if (stcb->asoc.scope.ipv6_addr_legal) {
+				/* ok get the v6 address and check/add */
+				struct sctp_ipv6addr_param *p6, p6_buf;
+
+				phdr = sctp_get_next_param(m, offset,
+							   (struct sctp_paramhdr *)&p6_buf,
+							   sizeof(p6_buf));
+				if (plen != sizeof(struct sctp_ipv6addr_param) ||
+				    phdr == NULL) {
+					return (-14);
+				}
+				p6 = (struct sctp_ipv6addr_param *)phdr;
+				memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
+				       sizeof(p6->addr));
+				if (IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) {
+					/* Skip multi-cast addresses */
+					goto next_param;
+				}
+				if (IN6_IS_ADDR_LINKLOCAL(&sin6.sin6_addr)) {
+					/* Link local make no sense without scope */
+					goto next_param;
+				}
+				sa = (struct sockaddr *)&sin6;
+				inp = stcb->sctp_ep;
+				atomic_add_int(&stcb->asoc.refcnt, 1);
+				stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net,
+									dst, stcb);
+				atomic_add_int(&stcb->asoc.refcnt, -1);
+				if (stcb_tmp == NULL &&
+				    (inp == stcb->sctp_ep || inp == NULL)) {
+					/*
+					 * we must validate the state again
+					 * here
+					 */
+				add_it_now6:
+					if (stcb->asoc.state == 0) {
+						/* the assoc was freed? */
+						return (-16);
+					}
+					/*
+					 * we must add the address, no scope
+					 * set
+					 */
+					if (sctp_add_remote_addr(stcb, sa, NULL, port, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_5)) {
+						return (-17);
+					}
+				} else if (stcb_tmp == stcb) {
+					/*
+					 * we must validate the state again
+					 * here
+					 */
+					if (stcb->asoc.state == 0) {
+						/* the assoc was freed? */
+						return (-19);
+					}
+					if (net != NULL) {
+						/* clear flag */
+						net->dest_state &=
+							~SCTP_ADDR_NOT_IN_ASSOC;
+					}
+				} else {
+					/*
+					 * strange, address is in another
+					 * assoc? straighten out locks.
+					 */
+					if (stcb_tmp) {
+						if (SCTP_GET_STATE(&stcb_tmp->asoc) & SCTP_STATE_COOKIE_WAIT) {
+							struct mbuf *op_err;
+							char msg[SCTP_DIAG_INFO_LEN];
+
+							/* in setup state we abort this guy */
+							snprintf(msg, sizeof(msg),
+							         "%s:%d at %s", __FILE__, __LINE__, __func__);
+							op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+							         msg);
+							sctp_abort_an_association(stcb_tmp->sctp_ep,
+										  stcb_tmp, op_err,
+							                          SCTP_SO_NOT_LOCKED);
+							goto add_it_now6;
+						}
+						SCTP_TCB_UNLOCK(stcb_tmp);
+					}
+					if (stcb->asoc.state == 0) {
+						/* the assoc was freed? */
+						return (-21);
+					}
+					return (-22);
+				}
+			}
+		} else
+#endif
+		if (ptype == SCTP_ECN_CAPABLE) {
+			peer_supports_ecn = 1;
+		} else if (ptype == SCTP_ULP_ADAPTATION) {
+			if (stcb->asoc.state != SCTP_STATE_OPEN) {
+				struct sctp_adaptation_layer_indication ai, *aip;
+
+				phdr = sctp_get_next_param(m, offset,
+							   (struct sctp_paramhdr *)&ai, sizeof(ai));
+				aip = (struct sctp_adaptation_layer_indication *)phdr;
+				if (aip) {
+					stcb->asoc.peers_adaptation = ntohl(aip->indication);
+					stcb->asoc.adaptation_needed = 1;
+				}
+			}
+		} else if (ptype == SCTP_SET_PRIM_ADDR) {
+			struct sctp_asconf_addr_param lstore, *fee;
+			int lptype;
+			struct sockaddr *lsa = NULL;
+#ifdef INET
+			struct sctp_asconf_addrv4_param *fii;
+#endif
+
+			if (stcb->asoc.asconf_supported == 0) {
+				return (-100);
+			}
+			if (plen > sizeof(lstore)) {
+				return (-23);
+			}
+			phdr = sctp_get_next_param(m, offset,
+						   (struct sctp_paramhdr *)&lstore,
+						   min(plen,sizeof(lstore)));
+			if (phdr == NULL) {
+				return (-24);
+			}
+			fee = (struct sctp_asconf_addr_param *)phdr;
+			lptype = ntohs(fee->addrp.ph.param_type);
+			switch (lptype) {
+#ifdef INET
+			case SCTP_IPV4_ADDRESS:
+				if (plen !=
+				    sizeof(struct sctp_asconf_addrv4_param)) {
+					SCTP_PRINTF("Sizeof setprim in init/init ack not %d but %d - ignored\n",
+						    (int)sizeof(struct sctp_asconf_addrv4_param),
+						    plen);
+				} else {
+					fii = (struct sctp_asconf_addrv4_param *)fee;
+					sin.sin_addr.s_addr = fii->addrp.addr;
+					lsa = (struct sockaddr *)&sin;
+				}
+				break;
+#endif
+#ifdef INET6
+			case SCTP_IPV6_ADDRESS:
+				if (plen !=
+				    sizeof(struct sctp_asconf_addr_param)) {
+					SCTP_PRINTF("Sizeof setprim (v6) in init/init ack not %d but %d - ignored\n",
+						    (int)sizeof(struct sctp_asconf_addr_param),
+						    plen);
+				} else {
+					memcpy(sin6.sin6_addr.s6_addr,
+					       fee->addrp.addr,
+					       sizeof(fee->addrp.addr));
+					lsa = (struct sockaddr *)&sin6;
+				}
+				break;
+#endif
+			default:
+				break;
+			}
+			if (lsa) {
+				(void)sctp_set_primary_addr(stcb, sa, NULL);
+			}
+		} else if (ptype == SCTP_HAS_NAT_SUPPORT) {
+			stcb->asoc.peer_supports_nat = 1;
+		} else if (ptype == SCTP_PRSCTP_SUPPORTED) {
+			/* Peer supports pr-sctp */
+			peer_supports_prsctp = 1;
+		} else if (ptype == SCTP_SUPPORTED_CHUNK_EXT) {
+			/* A supported extension chunk */
+			struct sctp_supported_chunk_types_param *pr_supported;
+			uint8_t local_store[SCTP_PARAM_BUFFER_SIZE];
+			int num_ent, i;
+
+			phdr = sctp_get_next_param(m, offset,
+						   (struct sctp_paramhdr *)&local_store, min(sizeof(local_store),plen));
+			if (phdr == NULL) {
+				return (-25);
+			}
+			pr_supported = (struct sctp_supported_chunk_types_param *)phdr;
+			num_ent = plen - sizeof(struct sctp_paramhdr);
+			for (i = 0; i < num_ent; i++) {
+				switch (pr_supported->chunk_types[i]) {
+				case SCTP_ASCONF:
+					peer_supports_asconf = 1;
+					break;
+				case SCTP_ASCONF_ACK:
+					peer_supports_asconf_ack = 1;
+					break;
+				case SCTP_FORWARD_CUM_TSN:
+					peer_supports_prsctp = 1;
+					break;
+				case SCTP_PACKET_DROPPED:
+					peer_supports_pktdrop = 1;
+					break;
+				case SCTP_NR_SELECTIVE_ACK:
+					peer_supports_nrsack = 1;
+					break;
+				case SCTP_STREAM_RESET:
+					peer_supports_reconfig = 1;
+					break;
+				case SCTP_AUTHENTICATION:
+					peer_supports_auth = 1;
+					break;
+				case SCTP_IDATA:
+					peer_supports_idata = 1;
+					break;
+				default:
+					/* one I have not learned yet */
+					break;
+
+				}
+			}
+		} else if (ptype == SCTP_RANDOM) {
+			if (plen > sizeof(random_store))
+				break;
+			if (got_random) {
+				/* already processed a RANDOM */
+				goto next_param;
+			}
+			phdr = sctp_get_next_param(m, offset,
+						   (struct sctp_paramhdr *)random_store,
+						   min(sizeof(random_store),plen));
+			if (phdr == NULL)
+				return (-26);
+			p_random = (struct sctp_auth_random *)phdr;
+			random_len = plen - sizeof(*p_random);
+			/* enforce the random length */
+			if (random_len != SCTP_AUTH_RANDOM_SIZE_REQUIRED) {
+				SCTPDBG(SCTP_DEBUG_AUTH1, "SCTP: invalid RANDOM len\n");
+				return (-27);
+			}
+			got_random = 1;
+		} else if (ptype == SCTP_HMAC_LIST) {
+			uint16_t num_hmacs;
+			uint16_t i;
+
+			if (plen > sizeof(hmacs_store))
+				break;
+			if (got_hmacs) {
+				/* already processed a HMAC list */
+				goto next_param;
+			}
+			phdr = sctp_get_next_param(m, offset,
+						   (struct sctp_paramhdr *)hmacs_store,
+						   min(plen,sizeof(hmacs_store)));
+			if (phdr == NULL)
+				return (-28);
+			hmacs = (struct sctp_auth_hmac_algo *)phdr;
+			hmacs_len = plen - sizeof(*hmacs);
+			num_hmacs = hmacs_len / sizeof(hmacs->hmac_ids[0]);
+			/* validate the hmac list */
+			if (sctp_verify_hmac_param(hmacs, num_hmacs)) {
+				return (-29);
+			}
+			if (stcb->asoc.peer_hmacs != NULL)
+				sctp_free_hmaclist(stcb->asoc.peer_hmacs);
+			stcb->asoc.peer_hmacs = sctp_alloc_hmaclist(num_hmacs);
+			if (stcb->asoc.peer_hmacs != NULL) {
+				for (i = 0; i < num_hmacs; i++) {
+					(void)sctp_auth_add_hmacid(stcb->asoc.peer_hmacs,
+								   ntohs(hmacs->hmac_ids[i]));
+				}
+			}
+			got_hmacs = 1;
+		} else if (ptype == SCTP_CHUNK_LIST) {
+			int i;
+
+			if (plen > sizeof(chunks_store))
+				break;
+			if (got_chklist) {
+				/* already processed a Chunks list */
+				goto next_param;
+			}
+			phdr = sctp_get_next_param(m, offset,
+						   (struct sctp_paramhdr *)chunks_store,
+						   min(plen,sizeof(chunks_store)));
+			if (phdr == NULL)
+				return (-30);
+			chunks = (struct sctp_auth_chunk_list *)phdr;
+			num_chunks = plen - sizeof(*chunks);
+			if (stcb->asoc.peer_auth_chunks != NULL)
+				sctp_clear_chunklist(stcb->asoc.peer_auth_chunks);
+			else
+				stcb->asoc.peer_auth_chunks = sctp_alloc_chunklist();
+			for (i = 0; i < num_chunks; i++) {
+				(void)sctp_auth_add_chunk(chunks->chunk_types[i],
+							  stcb->asoc.peer_auth_chunks);
+				/* record asconf/asconf-ack if listed */
+				if (chunks->chunk_types[i] == SCTP_ASCONF)
+					saw_asconf = 1;
+				if (chunks->chunk_types[i] == SCTP_ASCONF_ACK)
+					saw_asconf_ack = 1;
+
+			}
+			got_chklist = 1;
+		} else if ((ptype == SCTP_HEARTBEAT_INFO) ||
+			   (ptype == SCTP_STATE_COOKIE) ||
+			   (ptype == SCTP_UNRECOG_PARAM) ||
+			   (ptype == SCTP_COOKIE_PRESERVE) ||
+			   (ptype == SCTP_SUPPORTED_ADDRTYPE) ||
+			   (ptype == SCTP_ADD_IP_ADDRESS) ||
+			   (ptype == SCTP_DEL_IP_ADDRESS) ||
+			   (ptype == SCTP_ERROR_CAUSE_IND) ||
+			   (ptype == SCTP_SUCCESS_REPORT)) {
+			/* don't care */ ;
+		} else {
+			if ((ptype & 0x8000) == 0x0000) {
+				/*
+				 * must stop processing the rest of the
+				 * param's. Any report bits were handled
+				 * with the call to
+				 * sctp_arethere_unrecognized_parameters()
+				 * when the INIT or INIT-ACK was first seen.
+				 */
+				break;
+			}
+		}
+
+	next_param:
+		offset += SCTP_SIZE32(plen);
+		if (offset >= limit) {
+			break;
+		}
+		phdr = sctp_get_next_param(m, offset, &parm_buf,
+					   sizeof(parm_buf));
+	}
+	/* Now check to see if we need to purge any addresses */
+	TAILQ_FOREACH_SAFE(net, &stcb->asoc.nets, sctp_next, nnet) {
+		if ((net->dest_state & SCTP_ADDR_NOT_IN_ASSOC) ==
+		    SCTP_ADDR_NOT_IN_ASSOC) {
+			/* This address has been removed from the asoc */
+			/* remove and free it */
+			stcb->asoc.numnets--;
+			TAILQ_REMOVE(&stcb->asoc.nets, net, sctp_next);
+			sctp_free_remote_addr(net);
+			if (net == stcb->asoc.primary_destination) {
+				stcb->asoc.primary_destination = NULL;
+				sctp_select_primary_destination(stcb);
+			}
+		}
+	}
+	if ((stcb->asoc.ecn_supported == 1) &&
+	    (peer_supports_ecn == 0)) {
+		stcb->asoc.ecn_supported = 0;
+	}
+	if ((stcb->asoc.prsctp_supported == 1) &&
+	    (peer_supports_prsctp == 0)) {
+		stcb->asoc.prsctp_supported = 0;
+	}
+	if ((stcb->asoc.auth_supported == 1) &&
+	    ((peer_supports_auth == 0) ||
+	     (got_random == 0) || (got_hmacs == 0))) {
+		stcb->asoc.auth_supported = 0;
+	}
+	if ((stcb->asoc.asconf_supported == 1) &&
+	    ((peer_supports_asconf == 0) || (peer_supports_asconf_ack == 0) ||
+	     (stcb->asoc.auth_supported == 0) ||
+	     (saw_asconf == 0) || (saw_asconf_ack == 0))) {
+		stcb->asoc.asconf_supported = 0;
+	}
+	if ((stcb->asoc.reconfig_supported == 1) &&
+	    (peer_supports_reconfig == 0)) {
+		stcb->asoc.reconfig_supported = 0;
+	}
+	if ((stcb->asoc.idata_supported == 1) &&
+	    (peer_supports_idata == 0)) {
+		stcb->asoc.idata_supported = 0;
+	}
+	if ((stcb->asoc.nrsack_supported == 1) &&
+	    (peer_supports_nrsack == 0)) {
+		stcb->asoc.nrsack_supported = 0;
+	}
+	if ((stcb->asoc.pktdrop_supported == 1) &&
+	    (peer_supports_pktdrop == 0)){
+		stcb->asoc.pktdrop_supported = 0;
+	}
+	/* validate authentication required parameters */
+	if ((peer_supports_auth == 0) && (got_chklist == 1)) {
+		/* peer does not support auth but sent a chunks list? */
+		return (-31);
+	}
+	if ((peer_supports_asconf == 1) && (peer_supports_auth == 0)) {
+		/* peer supports asconf but not auth? */
+		return (-32);
+	} else if ((peer_supports_asconf == 1) &&
+	           (peer_supports_auth == 1) &&
+		   ((saw_asconf == 0) || (saw_asconf_ack == 0))) {
+		return (-33);
+	}
+	/* concatenate the full random key */
+	keylen = sizeof(*p_random) + random_len + sizeof(*hmacs) + hmacs_len;
+	if (chunks != NULL) {
+		keylen += sizeof(*chunks) + num_chunks;
+	}
+	new_key = sctp_alloc_key(keylen);
+	if (new_key != NULL) {
+		/* copy in the RANDOM */
+		if (p_random != NULL) {
+			keylen = sizeof(*p_random) + random_len;
+			bcopy(p_random, new_key->key, keylen);
+		}
+		/* append in the AUTH chunks */
+		if (chunks != NULL) {
+			bcopy(chunks, new_key->key + keylen,
+			      sizeof(*chunks) + num_chunks);
+			keylen += sizeof(*chunks) + num_chunks;
+		}
+		/* append in the HMACs */
+		if (hmacs != NULL) {
+			bcopy(hmacs, new_key->key + keylen,
+			      sizeof(*hmacs) + hmacs_len);
+		}
+	} else {
+		/* failed to get memory for the key */
+		return (-34);
+	}
+	if (stcb->asoc.authinfo.peer_random != NULL)
+		sctp_free_key(stcb->asoc.authinfo.peer_random);
+	stcb->asoc.authinfo.peer_random = new_key;
+	sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.assoc_keyid);
+	sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.recv_keyid);
+
+	return (0);
+}
+
+int
+sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa,
+		      struct sctp_nets *net)
+{
+	/* make sure the requested primary address exists in the assoc */
+	if (net == NULL && sa)
+		net = sctp_findnet(stcb, sa);
+
+	if (net == NULL) {
+		/* didn't find the requested primary address! */
+		return (-1);
+	} else {
+		/* set the primary address */
+		if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
+			/* Must be confirmed, so queue to set */
+			net->dest_state |= SCTP_ADDR_REQ_PRIMARY;
+			return (0);
+		}
+		stcb->asoc.primary_destination = net;
+		if (!(net->dest_state & SCTP_ADDR_PF) && (stcb->asoc.alternate)) {
+			sctp_free_remote_addr(stcb->asoc.alternate);
+			stcb->asoc.alternate = NULL;
+		}
+		net = TAILQ_FIRST(&stcb->asoc.nets);
+		if (net != stcb->asoc.primary_destination) {
+			/* first one on the list is NOT the primary
+			 * sctp_cmpaddr() is much more efficient if
+			 * the primary is the first on the list, make it
+			 * so.
+			 */
+			TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
+			TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
+		}
+		return (0);
+	}
+}
+
+int
+sctp_is_vtag_good(uint32_t tag, uint16_t lport, uint16_t rport, struct timeval *now)
+{
+	/*
+	 * This function serves two purposes. It will see if a TAG can be
+	 * re-used and return 1 for yes it is ok and 0 for don't use that
+	 * tag. A secondary function it will do is purge out old tags that
+	 * can be removed.
+	 */
+	struct sctpvtaghead *chain;
+	struct sctp_tagblock *twait_block;
+	struct sctpasochead *head;
+	struct sctp_tcb *stcb;
+	int i;
+
+	SCTP_INP_INFO_RLOCK();
+	head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag,
+								SCTP_BASE_INFO(hashasocmark))];
+	LIST_FOREACH(stcb, head, sctp_asocs) {
+		/* We choose not to lock anything here. TCB's can't be
+		 * removed since we have the read lock, so they can't
+		 * be freed on us, same thing for the INP. I may
+		 * be wrong with this assumption, but we will go
+		 * with it for now :-)
+		 */
+		if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+			continue;
+		}
+		if (stcb->asoc.my_vtag == tag) {
+			/* candidate */
+			if (stcb->rport != rport) {
+				continue;
+			}
+			if (stcb->sctp_ep->sctp_lport != lport) {
+				continue;
+			}
+			/* Its a used tag set */
+			SCTP_INP_INFO_RUNLOCK();
+			return (0);
+		}
+	}
+	chain = &SCTP_BASE_INFO(vtag_timewait)[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
+	/* Now what about timed wait ? */
+	LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
+		/*
+		 * Block(s) are present, lets see if we have this tag in the
+		 * list
+		 */
+		for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
+			if (twait_block->vtag_block[i].v_tag == 0) {
+				/* not used */
+				continue;
+			} else if ((long)twait_block->vtag_block[i].tv_sec_at_expire  <
+				   now->tv_sec) {
+				/* Audit expires this guy */
+				twait_block->vtag_block[i].tv_sec_at_expire = 0;
+				twait_block->vtag_block[i].v_tag = 0;
+				twait_block->vtag_block[i].lport = 0;
+				twait_block->vtag_block[i].rport = 0;
+			} else if ((twait_block->vtag_block[i].v_tag == tag) &&
+				   (twait_block->vtag_block[i].lport == lport) &&
+				   (twait_block->vtag_block[i].rport == rport)) {
+				/* Bad tag, sorry :< */
+				SCTP_INP_INFO_RUNLOCK();
+				return (0);
+			}
+		}
+	}
+	SCTP_INP_INFO_RUNLOCK();
+	return (1);
+}
+
+static void
+sctp_drain_mbufs(struct sctp_tcb *stcb)
+{
+	/*
+	 * We must hunt this association for MBUF's past the cumack (i.e.
+	 * out of order data that we can renege on).
+	 */
+	struct sctp_association *asoc;
+	struct sctp_tmit_chunk *chk, *nchk;
+	uint32_t cumulative_tsn_p1;
+	struct sctp_queued_to_read *ctl, *nctl;
+	int cnt, strmat;
+	uint32_t gap, i;
+	int fnd = 0;
+
+	/* We look for anything larger than the cum-ack + 1 */
+
+	asoc = &stcb->asoc;
+	if (asoc->cumulative_tsn == asoc->highest_tsn_inside_map) {
+		/* none we can reneg on. */
+		return;
+	}
+	SCTP_STAT_INCR(sctps_protocol_drains_done);
+	cumulative_tsn_p1 = asoc->cumulative_tsn + 1;
+	cnt = 0;
+	/* Ok that was fun, now we will drain all the inbound streams? */
+	for (strmat = 0; strmat < asoc->streamincnt; strmat++) {
+		TAILQ_FOREACH_SAFE(ctl, &asoc->strmin[strmat].inqueue, next_instrm, nctl) {
+#ifdef INVARIANTS
+			if (ctl->on_strm_q != SCTP_ON_ORDERED ) {
+				panic("Huh control: %p on_q: %d -- not ordered?",
+				      ctl, ctl->on_strm_q);
+			}
+#endif
+			if (SCTP_TSN_GT(ctl->sinfo_tsn, cumulative_tsn_p1)) {
+				/* Yep it is above cum-ack */
+				cnt++;
+				SCTP_CALC_TSN_TO_GAP(gap, ctl->sinfo_tsn, asoc->mapping_array_base_tsn);
+				asoc->size_on_all_streams = sctp_sbspace_sub(asoc->size_on_all_streams, ctl->length);
+				sctp_ucount_decr(asoc->cnt_on_all_streams);
+				SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
+				if (ctl->on_read_q) {
+					TAILQ_REMOVE(&stcb->sctp_ep->read_queue, ctl, next);
+					ctl->on_read_q = 0;
+				}
+				TAILQ_REMOVE(&asoc->strmin[strmat].inqueue, ctl, next_instrm);
+				ctl->on_strm_q = 0;
+				if (ctl->data) {
+					sctp_m_freem(ctl->data);
+					ctl->data = NULL;
+				}
+				sctp_free_remote_addr(ctl->whoFrom);
+				/* Now its reasm? */
+				TAILQ_FOREACH_SAFE(chk, &ctl->reasm, sctp_next, nchk) {
+					cnt++;
+					SCTP_CALC_TSN_TO_GAP(gap, chk->rec.data.tsn, asoc->mapping_array_base_tsn);
+					asoc->size_on_reasm_queue = sctp_sbspace_sub(asoc->size_on_reasm_queue, chk->send_size);
+					sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+					SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
+					TAILQ_REMOVE(&ctl->reasm, chk, sctp_next);
+					if (chk->data) {
+						sctp_m_freem(chk->data);
+						chk->data = NULL;
+					}
+					sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+				}
+				sctp_free_a_readq(stcb, ctl);
+			}
+		}
+		TAILQ_FOREACH_SAFE(ctl, &asoc->strmin[strmat].uno_inqueue, next_instrm, nctl) {
+#ifdef INVARIANTS
+			if (ctl->on_strm_q != SCTP_ON_UNORDERED ) {
+				panic("Huh control: %p on_q: %d -- not unordered?",
+				      ctl, ctl->on_strm_q);
+			}
+#endif
+			if (SCTP_TSN_GT(ctl->sinfo_tsn, cumulative_tsn_p1)) {
+				/* Yep it is above cum-ack */
+				cnt++;
+				SCTP_CALC_TSN_TO_GAP(gap, ctl->sinfo_tsn, asoc->mapping_array_base_tsn);
+				asoc->size_on_all_streams = sctp_sbspace_sub(asoc->size_on_all_streams, ctl->length);
+				sctp_ucount_decr(asoc->cnt_on_all_streams);
+				SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
+				if (ctl->on_read_q) {
+					TAILQ_REMOVE(&stcb->sctp_ep->read_queue, ctl, next);
+					ctl->on_read_q = 0;
+				}
+				TAILQ_REMOVE(&asoc->strmin[strmat].uno_inqueue, ctl, next_instrm);
+				ctl->on_strm_q = 0;
+				if (ctl->data) {
+					sctp_m_freem(ctl->data);
+					ctl->data = NULL;
+				}
+				sctp_free_remote_addr(ctl->whoFrom);
+				/* Now its reasm? */
+				TAILQ_FOREACH_SAFE(chk, &ctl->reasm, sctp_next, nchk) {
+					cnt++;
+					SCTP_CALC_TSN_TO_GAP(gap, chk->rec.data.tsn, asoc->mapping_array_base_tsn);
+					asoc->size_on_reasm_queue = sctp_sbspace_sub(asoc->size_on_reasm_queue, chk->send_size);
+					sctp_ucount_decr(asoc->cnt_on_reasm_queue);
+					SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
+					TAILQ_REMOVE(&ctl->reasm, chk, sctp_next);
+					if (chk->data) {
+						sctp_m_freem(chk->data);
+						chk->data = NULL;
+					}
+					sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+				}
+				sctp_free_a_readq(stcb, ctl);
+			}
+		}
+	}
+	if (cnt) {
+		/* We must back down to see what the new highest is */
+		for (i = asoc->highest_tsn_inside_map; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
+			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
+			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
+				asoc->highest_tsn_inside_map = i;
+				fnd = 1;
+				break;
+			}
+		}
+		if (!fnd) {
+			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
+		}
+
+		/*
+		 * Question, should we go through the delivery queue? The only
+		 * reason things are on here is the app not reading OR a p-d-api up.
+		 * An attacker COULD send enough in to initiate the PD-API and then
+		 * send a bunch of stuff to other streams... these would wind up on
+		 * the delivery queue.. and then we would not get to them. But in
+		 * order to do this I then have to back-track and un-deliver
+		 * sequence numbers in streams.. el-yucko. I think for now we will
+		 * NOT look at the delivery queue and leave it to be something to
+		 * consider later. An alternative would be to abort the P-D-API with
+		 * a notification and then deliver the data.... Or another method
+		 * might be to keep track of how many times the situation occurs and
+		 * if we see a possible attack underway just abort the association.
+		 */
+#ifdef SCTP_DEBUG
+		SCTPDBG(SCTP_DEBUG_PCB1, "Freed %d chunks from reneg harvest\n", cnt);
+#endif
+		/*
+		 * Now do we need to find a new
+		 * asoc->highest_tsn_inside_map?
+		 */
+		asoc->last_revoke_count = cnt;
+		(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
+		/*sa_ignore NO_NULL_CHK*/
+		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
+		sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_DRAIN, SCTP_SO_NOT_LOCKED);
+	}
+	/*
+	 * Another issue, in un-setting the TSN's in the mapping array we
+	 * DID NOT adjust the highest_tsn marker.  This will cause one of two
+	 * things to occur. It may cause us to do extra work in checking for
+	 * our mapping array movement. More importantly it may cause us to
+	 * SACK every datagram. This may not be a bad thing though since we
+	 * will recover once we get our cum-ack above and all this stuff we
+	 * dumped recovered.
+	 */
+}
+
+void
+sctp_drain()
+{
+	/*
+	 * We must walk the PCB lists for ALL associations here. The system
+	 * is LOW on MBUF's and needs help. This is where reneging will
+	 * occur. We really hope this does NOT happen!
+	 */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+	VNET_ITERATOR_DECL(vnet_iter);
+#else
+	struct sctp_inpcb *inp;
+	struct sctp_tcb *stcb;
+
+	SCTP_STAT_INCR(sctps_protocol_drain_calls);
+	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
+		return;
+	}
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+	VNET_LIST_RLOCK_NOSLEEP();
+	VNET_FOREACH(vnet_iter) {
+		CURVNET_SET(vnet_iter);
+		struct sctp_inpcb *inp;
+		struct sctp_tcb *stcb;
+#endif
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+		SCTP_STAT_INCR(sctps_protocol_drain_calls);
+		if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
+#ifdef VIMAGE
+			continue;
+#else
+			return;
+#endif
+		}
+#endif
+		SCTP_INP_INFO_RLOCK();
+		LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) {
+			/* For each endpoint */
+			SCTP_INP_RLOCK(inp);
+			LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+				/* For each association */
+				SCTP_TCB_LOCK(stcb);
+				sctp_drain_mbufs(stcb);
+				SCTP_TCB_UNLOCK(stcb);
+			}
+			SCTP_INP_RUNLOCK(inp);
+		}
+		SCTP_INP_INFO_RUNLOCK();
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+		CURVNET_RESTORE();
+	}
+	VNET_LIST_RUNLOCK_NOSLEEP();
+#endif
+}
+
+/*
+ * start a new iterator
+ * iterates through all endpoints and associations based on the pcb_state
+ * flags and asoc_state.  "af" (mandatory) is executed for all matching
+ * assocs and "ef" (optional) is executed when the iterator completes.
+ * "inpf" (optional) is executed for each new endpoint as it is being
+ * iterated through. inpe (optional) is called when the inp completes
+ * its way through all the stcbs.
+ */
+int
+sctp_initiate_iterator(inp_func inpf,
+		       asoc_func af,
+		       inp_func inpe,
+		       uint32_t pcb_state,
+		       uint32_t pcb_features,
+		       uint32_t asoc_state,
+		       void *argp,
+		       uint32_t argi,
+		       end_func ef,
+		       struct sctp_inpcb *s_inp,
+		       uint8_t chunk_output_off)
+{
+	struct sctp_iterator *it = NULL;
+
+	if (af == NULL) {
+		return (-1);
+	}
+	if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
+		SCTP_PRINTF("%s: abort on initialize being %d\n", __func__,
+		            SCTP_BASE_VAR(sctp_pcb_initialized));
+		return (-1);
+	}
+	SCTP_MALLOC(it, struct sctp_iterator *, sizeof(struct sctp_iterator),
+		    SCTP_M_ITER);
+	if (it == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PCB, ENOMEM);
+		return (ENOMEM);
+	}
+	memset(it, 0, sizeof(*it));
+	it->function_assoc = af;
+	it->function_inp = inpf;
+	if (inpf)
+		it->done_current_ep = 0;
+	else
+		it->done_current_ep = 1;
+	it->function_atend = ef;
+	it->pointer = argp;
+	it->val = argi;
+	it->pcb_flags = pcb_state;
+	it->pcb_features = pcb_features;
+	it->asoc_state = asoc_state;
+	it->function_inp_end = inpe;
+	it->no_chunk_output = chunk_output_off;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+	it->vn = curvnet;
+#endif
+	if (s_inp) {
+		/* Assume lock is held here */
+		it->inp = s_inp;
+		SCTP_INP_INCR_REF(it->inp);
+		it->iterator_flags = SCTP_ITERATOR_DO_SINGLE_INP;
+	} else {
+		SCTP_INP_INFO_RLOCK();
+		it->inp = LIST_FIRST(&SCTP_BASE_INFO(listhead));
+		if (it->inp) {
+			SCTP_INP_INCR_REF(it->inp);
+		}
+		SCTP_INP_INFO_RUNLOCK();
+		it->iterator_flags = SCTP_ITERATOR_DO_ALL_INP;
+
+	}
+	SCTP_IPI_ITERATOR_WQ_LOCK();
+	if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
+		SCTP_IPI_ITERATOR_WQ_UNLOCK();
+		SCTP_PRINTF("%s: rollback on initialize being %d it=%p\n", __func__,
+		            SCTP_BASE_VAR(sctp_pcb_initialized), it);
+		SCTP_FREE(it, SCTP_M_ITER);
+		return (-1);
+	}
+	TAILQ_INSERT_TAIL(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
+	if (sctp_it_ctl.iterator_running == 0) {
+		sctp_wakeup_iterator();
+	}
+	SCTP_IPI_ITERATOR_WQ_UNLOCK();
+	/* sa_ignore MEMLEAK {memory is put on the tailq for the iterator} */
+	return (0);
+}
diff --git a/usrsctplib/netinet/sctp_pcb.h b/usrsctplib/netinet/sctp_pcb.h
new file mode 100755
index 0000000..72309db
--- /dev/null
+++ b/usrsctplib/netinet/sctp_pcb.h
@@ -0,0 +1,910 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_pcb.h 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_PCB_H_
+#define _NETINET_SCTP_PCB_H_
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_constants.h>
+#include <netinet/sctp_sysctl.h>
+
+LIST_HEAD(sctppcbhead, sctp_inpcb);
+LIST_HEAD(sctpasochead, sctp_tcb);
+LIST_HEAD(sctpladdr, sctp_laddr);
+LIST_HEAD(sctpvtaghead, sctp_tagblock);
+LIST_HEAD(sctp_vrflist, sctp_vrf);
+LIST_HEAD(sctp_ifnlist, sctp_ifn);
+LIST_HEAD(sctp_ifalist, sctp_ifa);
+TAILQ_HEAD(sctp_readhead, sctp_queued_to_read);
+TAILQ_HEAD(sctp_streamhead, sctp_stream_queue_pending);
+
+#include <netinet/sctp_structs.h>
+#include <netinet/sctp_auth.h>
+
+#define SCTP_PCBHASH_ALLADDR(port, mask) (port & mask)
+#define SCTP_PCBHASH_ASOC(tag, mask) (tag & mask)
+#define MAXLEN_MBUF_CHAIN 32
+
+struct sctp_vrf {
+	LIST_ENTRY (sctp_vrf) next_vrf;
+	struct sctp_ifalist *vrf_addr_hash;
+	struct sctp_ifnlist ifnlist;
+	uint32_t vrf_id;
+	uint32_t tbl_id_v4;		/* default v4 table id */
+	uint32_t tbl_id_v6;		/* default v6 table id */
+	uint32_t total_ifa_count;
+	u_long   vrf_addr_hashmark;
+	uint32_t refcount;
+};
+
+struct sctp_ifn {
+	struct sctp_ifalist ifalist;
+	struct sctp_vrf *vrf;
+	LIST_ENTRY(sctp_ifn) next_ifn;
+	LIST_ENTRY(sctp_ifn) next_bucket;
+	void     *ifn_p;	/* never access without appropriate lock */
+	uint32_t ifn_mtu;
+	uint32_t ifn_type;
+	uint32_t ifn_index;	/* shorthand way to look at ifn for reference */
+	uint32_t refcount;	/* number of reference held should be >= ifa_count */
+	uint32_t ifa_count;	/* IFA's we hold (in our list - ifalist)*/
+	uint32_t num_v6;	/* number of v6 addresses */
+	uint32_t num_v4;	/* number of v4 addresses */
+	uint32_t registered_af;	/* registered address family for i/f events */
+	char     ifn_name[SCTP_IFNAMSIZ];
+};
+
+/* SCTP local IFA flags */
+#define SCTP_ADDR_VALID         0x00000001	/* its up and active */
+#define SCTP_BEING_DELETED      0x00000002	/* being deleted,
+						 * when refcount = 0. Note
+						 * that it is pulled from the ifn list
+						 * and ifa_p is nulled right away but
+						 * it cannot be freed until the last *net
+						 * pointing to it is deleted.
+						 */
+#define SCTP_ADDR_DEFER_USE     0x00000004	/* Hold off using this one */
+#define SCTP_ADDR_IFA_UNUSEABLE 0x00000008
+
+struct sctp_ifa {
+	LIST_ENTRY(sctp_ifa) next_ifa;
+	LIST_ENTRY(sctp_ifa) next_bucket;
+	struct sctp_ifn *ifn_p;	/* back pointer to parent ifn */
+	void    *ifa;		/* pointer to ifa, needed for flag
+				 * update for that we MUST lock
+				 * appropriate locks. This is for V6.
+				 */
+	union sctp_sockstore address;
+	uint32_t refcount;	/* number of folks referring to this */
+	uint32_t flags;
+	uint32_t localifa_flags;
+	uint32_t vrf_id;	/* vrf_id of this addr (for deleting) */
+	uint8_t src_is_loop;
+	uint8_t src_is_priv;
+	uint8_t src_is_glob;
+	uint8_t resv;
+};
+
+struct sctp_laddr {
+	LIST_ENTRY(sctp_laddr) sctp_nxt_addr;	/* next in list */
+	struct sctp_ifa *ifa;
+	uint32_t action;		/* Used during asconf and adding
+					 * if no-zero src-addr selection will
+					 * not consider this address.
+					 */
+	struct timeval start_time;      /* time when this address was created */
+};
+
+struct sctp_block_entry {
+	int error;
+};
+
+struct sctp_timewait {
+	uint32_t tv_sec_at_expire;	/* the seconds from boot to expire */
+        uint32_t v_tag;		        /* the vtag that can not be reused */
+        uint16_t lport;                 /* the local port used in vtag */
+        uint16_t rport;                 /* the remote port used in vtag */
+};
+
+struct sctp_tagblock {
+	LIST_ENTRY(sctp_tagblock) sctp_nxt_tagblock;
+	struct sctp_timewait vtag_block[SCTP_NUMBER_IN_VTAG_BLOCK];
+};
+
+
+struct sctp_epinfo {
+#if defined(__FreeBSD__)
+#ifdef INET
+	struct socket *udp4_tun_socket;
+#endif
+#ifdef INET6
+	struct socket *udp6_tun_socket;
+#endif
+#endif
+	struct sctpasochead *sctp_asochash;
+	u_long hashasocmark;
+
+	struct sctppcbhead *sctp_ephash;
+	u_long hashmark;
+
+	/*-
+	 * The TCP model represents a substantial overhead in that we get an
+	 * additional hash table to keep explicit connections in. The
+	 * listening TCP endpoint will exist in the usual ephash above and
+	 * accept only INIT's. It will be incapable of sending off an INIT.
+	 * When a dg arrives we must look in the normal ephash. If we find a
+	 * TCP endpoint that will tell us to go to the specific endpoint
+	 * hash and re-hash to find the right assoc/socket. If we find a UDP
+	 * model socket we then must complete the lookup. If this fails,
+	 * i.e. no association can be found then we must continue to see if
+	 * a sctp_peeloff()'d socket is in the tcpephash (a spun off socket
+	 * acts like a TCP model connected socket).
+	 */
+	struct sctppcbhead *sctp_tcpephash;
+	u_long hashtcpmark;
+	uint32_t hashtblsize;
+
+	struct sctp_vrflist *sctp_vrfhash;
+	u_long hashvrfmark;
+
+	struct sctp_ifnlist *vrf_ifn_hash;
+	u_long   vrf_ifn_hashmark;
+
+	struct sctppcbhead listhead;
+	struct sctpladdr addr_wq;
+
+#if defined(__APPLE__)
+	struct inpcbhead inplisthead;
+	struct inpcbinfo sctbinfo;
+#endif
+	/* ep zone info */
+	sctp_zone_t ipi_zone_ep;
+	sctp_zone_t ipi_zone_asoc;
+	sctp_zone_t ipi_zone_laddr;
+	sctp_zone_t ipi_zone_net;
+	sctp_zone_t ipi_zone_chunk;
+	sctp_zone_t ipi_zone_readq;
+	sctp_zone_t ipi_zone_strmoq;
+	sctp_zone_t ipi_zone_asconf;
+	sctp_zone_t ipi_zone_asconf_ack;
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 503000
+#if __FreeBSD_version <= 602000
+	struct mtx ipi_ep_mtx;
+#else
+	struct rwlock ipi_ep_mtx;
+#endif
+	struct mtx ipi_iterator_wq_mtx;
+#if __FreeBSD_version <= 602000
+	struct mtx ipi_addr_mtx;
+#else
+	struct rwlock ipi_addr_mtx;
+#endif
+	struct mtx ipi_pktlog_mtx;
+	struct mtx wq_addr_mtx;
+#elif defined(SCTP_PROCESS_LEVEL_LOCKS)
+	userland_mutex_t ipi_ep_mtx;
+	userland_mutex_t ipi_addr_mtx;
+	userland_mutex_t ipi_count_mtx;
+	userland_mutex_t ipi_pktlog_mtx;
+	userland_mutex_t wq_addr_mtx;
+#elif defined(__APPLE__)
+#ifdef _KERN_LOCKS_H_
+	lck_mtx_t *ipi_addr_mtx;
+	lck_mtx_t *ipi_count_mtx;
+	lck_mtx_t *ipi_pktlog_mtx;
+	lck_mtx_t *logging_mtx;
+	lck_mtx_t *wq_addr_mtx;
+#else
+	void *ipi_count_mtx;
+	void *logging_mtx;
+#endif /* _KERN_LOCKS_H_ */
+#elif defined(__Windows__)
+	struct rwlock ipi_ep_lock;
+	struct rwlock ipi_addr_lock;
+	struct spinlock ipi_pktlog_mtx;
+	struct rwlock wq_addr_mtx;
+#elif defined(__Userspace__)
+    /* TODO decide on __Userspace__ locks */
+#endif
+	uint32_t ipi_count_ep;
+
+	/* assoc/tcb zone info */
+	uint32_t ipi_count_asoc;
+
+	/* local addrlist zone info */
+	uint32_t ipi_count_laddr;
+
+	/* remote addrlist zone info */
+	uint32_t ipi_count_raddr;
+
+	/* chunk structure list for output */
+	uint32_t ipi_count_chunk;
+
+	/* socket queue zone info */
+	uint32_t ipi_count_readq;
+
+	/* socket queue zone info */
+	uint32_t ipi_count_strmoq;
+
+	/* Number of vrfs */
+	uint32_t ipi_count_vrfs;
+
+        /* Number of ifns */
+	uint32_t ipi_count_ifns;
+
+        /* Number of ifas */
+	uint32_t ipi_count_ifas;
+
+	/* system wide number of free chunks hanging around */
+	uint32_t ipi_free_chunks;
+	uint32_t ipi_free_strmoq;
+
+	struct sctpvtaghead vtag_timewait[SCTP_STACK_VTAG_HASH_SIZE];
+
+	/* address work queue handling */
+	struct sctp_timer addr_wq_timer;
+
+#if defined(_SCTP_NEEDS_CALLOUT_) || defined(_USER_SCTP_NEEDS_CALLOUT_)
+	struct calloutlist callqueue;
+#endif
+};
+
+
+struct sctp_base_info {
+	/* All static structures that
+	 * anchor the system must be here.
+	 */
+	struct sctp_epinfo sctppcbinfo;
+#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+	struct sctpstat    *sctpstat;
+#else
+	struct sctpstat    sctpstat;
+#endif
+	struct sctp_sysctl sctpsysctl;
+	uint8_t first_time;
+	char sctp_pcb_initialized;
+#if defined(SCTP_PACKET_LOGGING)
+	int packet_log_writers;
+	int packet_log_end;
+	uint8_t packet_log_buffer[SCTP_PACKET_LOG_SIZE];
+#endif
+#if defined(__APPLE__)
+	int sctp_main_timer_ticks;
+#endif
+#if defined(__Userspace__)
+	userland_mutex_t timer_mtx;
+	userland_thread_t timer_thread;
+	uint8_t timer_thread_should_exit;
+#if !defined(__Userspace_os_Windows)
+	pthread_mutexattr_t mtx_attr;
+#if defined(INET) || defined(INET6)
+	int userspace_route;
+	userland_thread_t recvthreadroute;
+#endif
+#endif
+#ifdef INET
+#if defined(__Userspace_os_Windows)
+	SOCKET userspace_rawsctp;
+	SOCKET userspace_udpsctp;
+#else
+	int userspace_rawsctp;
+	int userspace_udpsctp;
+#endif
+	userland_thread_t recvthreadraw;
+	userland_thread_t recvthreadudp;
+#if !defined(THREAD_SUPPORT)
+	struct mbuf **recvmbuf4;
+	int to_fill4;
+	struct mbuf **recvmbuf6;
+	int to_fill6;
+	struct mbuf **udp_recvmbuf4;
+	int udp_to_fill4;
+	struct mbuf **udp_recvmbuf6;
+	int udp_to_fill6;
+#if !defined(__Userspace_os_Windows)
+	struct iovec recv_iovec4[MAXLEN_MBUF_CHAIN];
+	struct iovec recv_iovec6[MAXLEN_MBUF_CHAIN];
+	struct iovec udp_recv_iovec4[MAXLEN_MBUF_CHAIN];
+	struct iovec udp_recv_iovec6[MAXLEN_MBUF_CHAIN];
+#else
+    WSABUF *rcv_iovec4;
+    WSABUF *rcv_iovec6;
+    WSABUF *udp_rcv_iovec4;
+    WSABUF *udp_rcv_iovec6;
+#endif
+#endif
+#endif
+#ifdef INET6
+#if defined(__Userspace_os_Windows)
+	SOCKET userspace_rawsctp6;
+	SOCKET userspace_udpsctp6;
+#else
+	int userspace_rawsctp6;
+	int userspace_udpsctp6;
+#endif
+	userland_thread_t recvthreadraw6;
+	userland_thread_t recvthreadudp6;
+#endif
+	int (*conn_output)(void *addr, void *buffer, size_t length, uint8_t tos, uint8_t set_df);
+	void (*debug_printf)(const char *format, ...);
+	int crc32c_offloaded;
+#endif
+};
+
+/*-
+ * Here we have all the relevant information for each SCTP entity created. We
+ * will need to modify this as approprate. We also need to figure out how to
+ * access /dev/random.
+ */
+struct sctp_pcb {
+	unsigned int time_of_secret_change;	/* number of seconds from
+						 * timeval.tv_sec */
+	uint32_t secret_key[SCTP_HOW_MANY_SECRETS][SCTP_NUMBER_OF_SECRETS];
+	unsigned int size_of_a_cookie;
+
+	unsigned int sctp_timeoutticks[SCTP_NUM_TMRS];
+	unsigned int sctp_minrto;
+	unsigned int sctp_maxrto;
+	unsigned int initial_rto;
+	int initial_init_rto_max;
+
+	unsigned int sctp_sack_freq;
+	uint32_t sctp_sws_sender;
+	uint32_t sctp_sws_receiver;
+
+	uint32_t sctp_default_cc_module;
+	uint32_t sctp_default_ss_module;
+	/* authentication related fields */
+	struct sctp_keyhead shared_keys;
+	sctp_auth_chklist_t *local_auth_chunks;
+	sctp_hmaclist_t *local_hmacs;
+	uint16_t default_keyid;
+
+	/* various thresholds */
+	/* Max times I will init at a guy */
+	uint16_t max_init_times;
+
+	/* Max times I will send before we consider someone dead */
+	uint16_t max_send_times;
+
+	uint16_t def_net_failure;
+
+	uint16_t def_net_pf_threshold;
+
+	/* number of streams to pre-open on a association */
+	uint16_t pre_open_stream_count;
+	uint16_t max_open_streams_intome;
+
+	/* random number generator */
+	uint32_t random_counter;
+	uint8_t random_numbers[SCTP_SIGNATURE_ALOC_SIZE];
+	uint8_t random_store[SCTP_SIGNATURE_ALOC_SIZE];
+
+	/*
+	 * This timer is kept running per endpoint.  When it fires it will
+	 * change the secret key.  The default is once a hour
+	 */
+	struct sctp_timer signature_change;
+
+	/* Zero copy full buffer timer */
+	struct sctp_timer zero_copy_timer;
+        /* Zero copy app to transport (sendq) read repulse timer */
+	struct sctp_timer zero_copy_sendq_timer;
+	uint32_t def_cookie_life;
+	/* defaults to 0 */
+	int auto_close_time;
+	uint32_t initial_sequence_debug;
+	uint32_t adaptation_layer_indicator;
+	uint8_t adaptation_layer_indicator_provided;
+	uint32_t store_at;
+	uint32_t max_burst;
+	uint32_t fr_max_burst;
+#ifdef INET6
+	uint32_t default_flowlabel;
+#endif
+	uint8_t default_dscp;
+	char current_secret_number;
+	char last_secret_number;
+	uint16_t port; /* remote UDP encapsulation port */
+};
+
+#ifndef SCTP_ALIGNMENT
+#define SCTP_ALIGNMENT 32
+#endif
+
+#ifndef SCTP_ALIGNM1
+#define SCTP_ALIGNM1 (SCTP_ALIGNMENT-1)
+#endif
+
+#define sctp_lport ip_inp.inp.inp_lport
+
+struct sctp_pcbtsn_rlog {
+	uint32_t vtag;
+	uint16_t strm;
+	uint16_t seq;
+	uint16_t sz;
+	uint16_t flgs;
+};
+#define SCTP_READ_LOG_SIZE 135	/* we choose the number to make a pcb a page */
+
+
+struct sctp_inpcb {
+	/*-
+	 * put an inpcb in front of it all, kind of a waste but we need to
+	 * for compatibility with all the other stuff.
+	 */
+	union {
+		struct inpcb inp;
+		char align[(sizeof(struct in6pcb) + SCTP_ALIGNM1) &
+		        ~SCTP_ALIGNM1];
+	}     ip_inp;
+
+#if defined(__APPLE__)
+	/* leave some space in case i386 inpcb is bigger than ppc */
+	uint8_t		padding[128];
+#endif
+
+	/* Socket buffer lock protects read_queue and of course sb_cc */
+	struct sctp_readhead read_queue;
+
+	LIST_ENTRY(sctp_inpcb) sctp_list;	/* lists all endpoints */
+        /* hash of all endpoints for model */
+	LIST_ENTRY(sctp_inpcb) sctp_hash;
+	/* count of local addresses bound, 0 if bound all */
+	int laddr_count;
+
+	/* list of addrs in use by the EP, NULL if bound-all */
+	struct sctpladdr sctp_addr_list;
+	/* used for source address selection rotation when we are subset bound */
+	struct sctp_laddr *next_addr_touse;
+
+	/* back pointer to our socket */
+	struct socket *sctp_socket;
+	uint64_t sctp_features;	/* Feature flags */
+	uint32_t sctp_flags;	/* INP state flag set */
+	uint32_t sctp_mobility_features; /* Mobility  Feature flags */
+	struct sctp_pcb sctp_ep;/* SCTP ep data */
+	/* head of the hash of all associations */
+	struct sctpasochead *sctp_tcbhash;
+	u_long sctp_hashmark;
+	/* head of the list of all associations */
+	struct sctpasochead sctp_asoc_list;
+#ifdef SCTP_TRACK_FREED_ASOCS
+	struct sctpasochead sctp_asoc_free_list;
+#endif
+	struct sctp_iterator *inp_starting_point_for_iterator;
+	uint32_t sctp_frag_point;
+	uint32_t partial_delivery_point;
+	uint32_t sctp_context;
+	uint32_t max_cwnd;
+	uint8_t local_strreset_support;
+	uint32_t sctp_cmt_on_off;
+	uint8_t ecn_supported;
+	uint8_t prsctp_supported;
+	uint8_t auth_supported;
+	uint8_t idata_supported;
+	uint8_t asconf_supported;
+	uint8_t reconfig_supported;
+	uint8_t nrsack_supported;
+	uint8_t pktdrop_supported;
+	struct sctp_nonpad_sndrcvinfo def_send;
+	/*-
+	 * These three are here for the sosend_dgram
+	 * (pkt, pkt_last and control).
+	 * routine. However, I don't think anyone in
+	 * the current FreeBSD kernel calls this. So
+	 * they are candidates with sctp_sendm for
+	 * de-supporting.
+	 */
+#ifdef __Panda__
+	pakhandle_type pak_to_read;
+	pakhandle_type pak_to_read_sendq;
+#endif
+	struct mbuf *pkt, *pkt_last;
+	struct mbuf *control;
+#if !(defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__))
+#ifndef INP_IPV6
+#define INP_IPV6	0x1
+#endif
+#ifndef INP_IPV4
+#define INP_IPV4	0x2
+#endif
+	uint8_t inp_vflag;
+                               /* TODO __Userspace__ where is our inp_vlag going to be? */
+	uint8_t inp_ip_ttl;
+        uint8_t inp_ip_tos;    /* defined as macro in user_inpcb.h */
+	uint8_t inp_ip_resv;
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_version >= 503000
+	struct mtx inp_mtx;
+	struct mtx inp_create_mtx;
+	struct mtx inp_rdata_mtx;
+	int32_t refcount;
+#elif defined(SCTP_PROCESS_LEVEL_LOCKS)
+	userland_mutex_t inp_mtx;
+	userland_mutex_t inp_create_mtx;
+	userland_mutex_t inp_rdata_mtx;
+	int32_t refcount;
+#elif defined(__APPLE__)
+#if defined(SCTP_APPLE_RWLOCK)
+	lck_rw_t *inp_mtx;
+#else
+	lck_mtx_t *inp_mtx;
+#endif
+	lck_mtx_t *inp_create_mtx;
+	lck_mtx_t *inp_rdata_mtx;
+#elif defined(__Windows__)
+	struct rwlock inp_lock;
+	struct spinlock inp_create_lock;
+	struct spinlock inp_rdata_lock;
+	int32_t refcount;
+#elif defined(__Userspace__)
+    /* TODO decide on __Userspace__ locks */
+	int32_t refcount;
+#endif
+#if defined(__APPLE__)
+	int32_t refcount;
+
+	uint32_t lock_caller1;
+	uint32_t lock_caller2;
+	uint32_t lock_caller3;
+	uint32_t unlock_caller1;
+	uint32_t unlock_caller2;
+	uint32_t unlock_caller3;
+	uint32_t getlock_caller1;
+	uint32_t getlock_caller2;
+	uint32_t getlock_caller3;
+	uint32_t gen_count;
+	uint32_t lock_gen_count;
+	uint32_t unlock_gen_count;
+	uint32_t getlock_gen_count;
+
+	uint32_t i_am_here_file;
+	uint32_t i_am_here_line;
+#endif
+	uint32_t def_vrf_id;
+	uint16_t fibnum;
+#ifdef SCTP_MVRF
+	uint32_t *m_vrf_ids;
+	uint32_t num_vrfs;
+	uint32_t vrf_size;
+#endif
+	uint32_t total_sends;
+	uint32_t total_recvs;
+	uint32_t last_abort_code;
+	uint32_t total_nospaces;
+	struct sctpasochead *sctp_asocidhash;
+	u_long hashasocidmark;
+        uint32_t sctp_associd_counter;
+
+#ifdef SCTP_ASOCLOG_OF_TSNS
+	struct sctp_pcbtsn_rlog readlog[SCTP_READ_LOG_SIZE];
+	uint32_t readlog_index;
+#endif
+#if defined(__Userspace__)
+	void *ulp_info;
+	int (*recv_callback)(struct socket *, union sctp_sockstore, void *, size_t,
+                             struct sctp_rcvinfo, int, void *);
+	uint32_t send_sb_threshold;
+	int (*send_callback)(struct socket *, uint32_t);
+#endif
+};
+
+#if defined(__Userspace__)
+int register_recv_cb (struct socket *,
+                      int (*)(struct socket *, union sctp_sockstore, void *, size_t,
+                              struct sctp_rcvinfo, int, void *));
+int register_send_cb (struct socket *, uint32_t, int (*)(struct socket *, uint32_t));
+int register_ulp_info (struct socket *, void *);
+
+#endif
+struct sctp_tcb {
+	struct socket *sctp_socket;	/* back pointer to socket */
+	struct sctp_inpcb *sctp_ep;	/* back pointer to ep */
+	LIST_ENTRY(sctp_tcb) sctp_tcbhash;	/* next link in hash
+						 * table */
+        LIST_ENTRY(sctp_tcb) sctp_tcblist;	/* list of all of the
+						 * TCB's */
+        LIST_ENTRY(sctp_tcb) sctp_tcbasocidhash;	/* next link in asocid
+							 * hash table
+							 */
+        LIST_ENTRY(sctp_tcb) sctp_asocs;	/* vtag hash list */
+	struct sctp_block_entry *block_entry;	/* pointer locked by  socket
+						 * send buffer */
+	struct sctp_association asoc;
+	/*-
+	 * freed_by_sorcv_sincelast is protected by the sockbuf_lock NOT the
+	 * tcb_lock. Its special in this way to help avoid extra mutex calls
+	 * in the reading of data.
+	 */
+	uint32_t freed_by_sorcv_sincelast;
+	uint32_t total_sends;
+	uint32_t total_recvs;
+	int freed_from_where;
+	uint16_t rport;		/* remote port in network format */
+	uint16_t resv;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 503000
+	struct mtx tcb_mtx;
+	struct mtx tcb_send_mtx;
+#elif defined(SCTP_PROCESS_LEVEL_LOCKS)
+	userland_mutex_t tcb_mtx;
+	userland_mutex_t tcb_send_mtx;
+#elif defined(__APPLE__)
+	lck_mtx_t* tcb_mtx;
+	lck_mtx_t* tcb_send_mtx;
+#elif defined(__Windows__)
+	struct spinlock tcb_lock;
+	struct spinlock tcb_send_lock;
+#elif defined(__Userspace__)
+    /* TODO decide on __Userspace__ locks */
+#endif
+#if defined(__APPLE__)
+	uint32_t caller1;
+	uint32_t caller2;
+	uint32_t caller3;
+#endif
+};
+
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 503000
+
+#include <netinet/sctp_lock_bsd.h>
+
+#elif defined(__APPLE__)
+/*
+ * Apple MacOS X 10.4 "Tiger"
+ */
+
+#include <netinet/sctp_lock_apple_fg.h>
+
+#elif defined(SCTP_PROCESS_LEVEL_LOCKS)
+
+#include <netinet/sctp_process_lock.h>
+
+#elif defined(__Windows__)
+
+#include <netinet/sctp_lock_windows.h>
+
+#elif defined(__Userspace__)
+
+#include <netinet/sctp_lock_userspace.h>
+
+#else
+/*
+ * Pre-5.x FreeBSD, and others.
+ */
+#include <netinet/sctp_lock_empty.h>
+#endif
+
+/* TODO where to put non-_KERNEL things for __Userspace__? */
+#if defined(_KERNEL) || defined(__Userspace__)
+
+/* Attention Julian, this is the extern that
+ * goes with the base info. sctp_pcb.c has
+ * the real definition.
+ */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+VNET_DECLARE(struct sctp_base_info, system_base_info) ;
+#else
+extern struct sctp_base_info system_base_info;
+#endif
+
+#ifdef INET6
+int SCTP6_ARE_ADDR_EQUAL(struct sockaddr_in6 *a, struct sockaddr_in6 *b);
+#endif
+
+void sctp_fill_pcbinfo(struct sctp_pcbinfo *);
+
+struct sctp_ifn *
+sctp_find_ifn(void *ifn, uint32_t ifn_index);
+
+struct sctp_vrf *sctp_allocate_vrf(int vrfid);
+struct sctp_vrf *sctp_find_vrf(uint32_t vrfid);
+void sctp_free_vrf(struct sctp_vrf *vrf);
+
+/*-
+ * Change address state, can be used if
+ * O/S supports telling transports about
+ * changes to IFA/IFN's (link layer triggers).
+ * If a ifn goes down, we will do src-addr-selection
+ * and NOT use that, as a source address. This does
+ * not stop the routing system from routing out
+ * that interface, but we won't put it as a source.
+ */
+void sctp_mark_ifa_addr_down(uint32_t vrf_id, struct sockaddr *addr, const char *if_name, uint32_t ifn_index);
+void sctp_mark_ifa_addr_up(uint32_t vrf_id, struct sockaddr *addr, const char *if_name, uint32_t ifn_index);
+
+struct sctp_ifa *
+sctp_add_addr_to_vrf(uint32_t vrfid,
+		     void *ifn, uint32_t ifn_index, uint32_t ifn_type,
+		     const char *if_name,
+		     void *ifa, struct sockaddr *addr, uint32_t ifa_flags,
+		     int dynamic_add);
+
+void sctp_update_ifn_mtu(uint32_t ifn_index, uint32_t mtu);
+
+void sctp_free_ifn(struct sctp_ifn *sctp_ifnp);
+void sctp_free_ifa(struct sctp_ifa *sctp_ifap);
+
+
+void sctp_del_addr_from_vrf(uint32_t vrfid, struct sockaddr *addr,
+			    uint32_t ifn_index, const char *if_name);
+
+
+
+struct sctp_nets *sctp_findnet(struct sctp_tcb *, struct sockaddr *);
+
+struct sctp_inpcb *sctp_pcb_findep(struct sockaddr *, int, int, uint32_t);
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+int sctp_inpcb_bind(struct socket *, struct sockaddr *,
+		    struct sctp_ifa *,struct thread *);
+#elif defined(__Windows__)
+int sctp_inpcb_bind(struct socket *, struct sockaddr *,
+		    struct sctp_ifa *,PKTHREAD);
+#else
+/* struct proc is a dummy for __Userspace__ */
+int sctp_inpcb_bind(struct socket *, struct sockaddr *,
+		    struct sctp_ifa *, struct proc *);
+#endif
+
+struct sctp_tcb *
+sctp_findassociation_addr(struct mbuf *, int,
+    struct sockaddr *, struct sockaddr *,
+    struct sctphdr *, struct sctp_chunkhdr *, struct sctp_inpcb **,
+    struct sctp_nets **, uint32_t vrf_id);
+
+struct sctp_tcb *
+sctp_findassociation_addr_sa(struct sockaddr *,
+    struct sockaddr *, struct sctp_inpcb **, struct sctp_nets **, int, uint32_t);
+
+void
+sctp_move_pcb_and_assoc(struct sctp_inpcb *, struct sctp_inpcb *,
+    struct sctp_tcb *);
+
+/*-
+ * For this call ep_addr, the to is the destination endpoint address of the
+ * peer (relative to outbound). The from field is only used if the TCP model
+ * is enabled and helps distingush amongst the subset bound (non-boundall).
+ * The TCP model MAY change the actual ep field, this is why it is passed.
+ */
+struct sctp_tcb *
+sctp_findassociation_ep_addr(struct sctp_inpcb **,
+    struct sockaddr *, struct sctp_nets **, struct sockaddr *,
+    struct sctp_tcb *);
+
+struct sctp_tcb *
+sctp_findasoc_ep_asocid_locked(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock);
+
+struct sctp_tcb *
+sctp_findassociation_ep_asocid(struct sctp_inpcb *,
+    sctp_assoc_t, int);
+
+struct sctp_tcb *
+sctp_findassociation_ep_asconf(struct mbuf *, int, struct sockaddr *,
+			       struct sctphdr *, struct sctp_inpcb **, struct sctp_nets **, uint32_t vrf_id);
+
+int sctp_inpcb_alloc(struct socket *so, uint32_t vrf_id);
+
+int sctp_is_address_on_local_host(struct sockaddr *addr, uint32_t vrf_id);
+
+void sctp_inpcb_free(struct sctp_inpcb *, int, int);
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+struct sctp_tcb *
+sctp_aloc_assoc(struct sctp_inpcb *, struct sockaddr *,
+                int *, uint32_t, uint32_t, uint16_t, uint16_t, struct thread *);
+#elif defined(__Windows__)
+struct sctp_tcb *
+sctp_aloc_assoc(struct sctp_inpcb *, struct sockaddr *,
+                int *, uint32_t, uint32_t, uint16_t, uint16_t, PKTHREAD);
+#else
+/* proc will be NULL for __Userspace__ */
+struct sctp_tcb *
+sctp_aloc_assoc(struct sctp_inpcb *, struct sockaddr *,
+                int *, uint32_t, uint32_t, uint16_t, uint16_t, struct proc *);
+#endif
+
+int sctp_free_assoc(struct sctp_inpcb *, struct sctp_tcb *, int, int);
+
+
+void sctp_delete_from_timewait(uint32_t, uint16_t, uint16_t);
+
+int sctp_is_in_timewait(uint32_t tag, uint16_t lport, uint16_t rport);
+
+void
+sctp_add_vtag_to_timewait(uint32_t tag, uint32_t time, uint16_t lport, uint16_t rport);
+
+void sctp_add_local_addr_ep(struct sctp_inpcb *, struct sctp_ifa *, uint32_t);
+
+void sctp_del_local_addr_ep(struct sctp_inpcb *, struct sctp_ifa *);
+
+int sctp_add_remote_addr(struct sctp_tcb *, struct sockaddr *, struct sctp_nets **, uint16_t, int, int);
+
+void sctp_remove_net(struct sctp_tcb *, struct sctp_nets *);
+
+int sctp_del_remote_addr(struct sctp_tcb *, struct sockaddr *);
+
+void sctp_pcb_init(void);
+
+void sctp_pcb_finish(void);
+
+void sctp_add_local_addr_restricted(struct sctp_tcb *, struct sctp_ifa *);
+void sctp_del_local_addr_restricted(struct sctp_tcb *, struct sctp_ifa *);
+
+int
+sctp_load_addresses_from_init(struct sctp_tcb *, struct mbuf *, int, int,
+    struct sockaddr *, struct sockaddr *, struct sockaddr *, uint16_t);
+
+int
+sctp_set_primary_addr(struct sctp_tcb *, struct sockaddr *,
+    struct sctp_nets *);
+
+int sctp_is_vtag_good(uint32_t, uint16_t lport, uint16_t rport, struct timeval *);
+
+/* void sctp_drain(void); */
+
+int sctp_destination_is_reachable(struct sctp_tcb *, struct sockaddr *);
+
+int sctp_swap_inpcb_for_listen(struct sctp_inpcb *inp);
+
+void sctp_clean_up_stream(struct sctp_tcb *stcb, struct sctp_readhead *rh);
+
+/*-
+ * Null in last arg inpcb indicate run on ALL ep's. Specific inp in last arg
+ * indicates run on ONLY assoc's of the specified endpoint.
+ */
+int
+sctp_initiate_iterator(inp_func inpf,
+		       asoc_func af,
+		       inp_func inpe,
+		       uint32_t, uint32_t,
+		       uint32_t, void *,
+		       uint32_t,
+		       end_func ef,
+		       struct sctp_inpcb *,
+		       uint8_t co_off);
+#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
+void
+sctp_queue_to_mcore(struct mbuf *m, int off, int cpu_to_use);
+
+#endif
+
+#endif				/* _KERNEL */
+#endif				/* !__sctp_pcb_h__ */
diff --git a/usrsctplib/netinet/sctp_peeloff.c b/usrsctplib/netinet/sctp_peeloff.c
new file mode 100755
index 0000000..7c55aa3
--- /dev/null
+++ b/usrsctplib/netinet/sctp_peeloff.c
@@ -0,0 +1,326 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_peeloff.c 279859 2015-03-10 19:49:25Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_peeloff.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_auth.h>
+
+#if defined(__APPLE__)
+#define APPLE_FILE_NO 5
+#endif
+
+int
+sctp_can_peel_off(struct socket *head, sctp_assoc_t assoc_id)
+{
+	struct sctp_inpcb *inp;
+	struct sctp_tcb *stcb;
+	uint32_t state;
+
+	if (head == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, EBADF);
+		return (EBADF);
+	}
+	inp = (struct sctp_inpcb *)head->so_pcb;
+	if (inp == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, EFAULT);
+		return (EFAULT);
+	}
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, EOPNOTSUPP);
+		return (EOPNOTSUPP);
+	}
+	stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1);
+	if (stcb == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_PEELOFF, ENOENT);
+		return (ENOENT);
+	}
+	state = SCTP_GET_STATE((&stcb->asoc));
+	if ((state == SCTP_STATE_EMPTY) ||
+	    (state == SCTP_STATE_INUSE)) {
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_PEELOFF, ENOTCONN);
+		return (ENOTCONN);
+	}
+	SCTP_TCB_UNLOCK(stcb);
+	/* We are clear to peel this one off */
+	return (0);
+}
+
+int
+sctp_do_peeloff(struct socket *head, struct socket *so, sctp_assoc_t assoc_id)
+{
+	struct sctp_inpcb *inp, *n_inp;
+	struct sctp_tcb *stcb;
+	uint32_t state;
+
+	inp = (struct sctp_inpcb *)head->so_pcb;
+	if (inp == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, EFAULT);
+		return (EFAULT);
+	}
+	stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1);
+	if (stcb == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, ENOTCONN);
+		return (ENOTCONN);
+	}
+
+	state = SCTP_GET_STATE((&stcb->asoc));
+	if ((state == SCTP_STATE_EMPTY) ||
+	    (state == SCTP_STATE_INUSE)) {
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, ENOTCONN);
+		return (ENOTCONN);
+	}
+
+	n_inp = (struct sctp_inpcb *)so->so_pcb;
+	n_inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
+	    SCTP_PCB_FLAGS_CONNECTED |
+	    SCTP_PCB_FLAGS_IN_TCPPOOL |	/* Turn on Blocking IO */
+	    (SCTP_PCB_COPY_FLAGS & inp->sctp_flags));
+	n_inp->sctp_socket = so;
+	n_inp->sctp_features = inp->sctp_features;
+	n_inp->sctp_mobility_features = inp->sctp_mobility_features;
+	n_inp->sctp_frag_point = inp->sctp_frag_point;
+	n_inp->sctp_cmt_on_off = inp->sctp_cmt_on_off;
+	n_inp->ecn_supported = inp->ecn_supported;
+	n_inp->prsctp_supported = inp->prsctp_supported;
+	n_inp->auth_supported = inp->auth_supported;
+	n_inp->asconf_supported = inp->asconf_supported;
+	n_inp->reconfig_supported = inp->reconfig_supported;
+	n_inp->nrsack_supported = inp->nrsack_supported;
+	n_inp->pktdrop_supported = inp->pktdrop_supported;
+	n_inp->partial_delivery_point = inp->partial_delivery_point;
+	n_inp->sctp_context = inp->sctp_context;
+	n_inp->max_cwnd = inp->max_cwnd;
+	n_inp->local_strreset_support = inp->local_strreset_support;
+	n_inp->inp_starting_point_for_iterator = NULL;
+	/* copy in the authentication parameters from the original endpoint */
+	if (n_inp->sctp_ep.local_hmacs)
+		sctp_free_hmaclist(n_inp->sctp_ep.local_hmacs);
+	n_inp->sctp_ep.local_hmacs =
+	    sctp_copy_hmaclist(inp->sctp_ep.local_hmacs);
+	if (n_inp->sctp_ep.local_auth_chunks)
+		sctp_free_chunklist(n_inp->sctp_ep.local_auth_chunks);
+	n_inp->sctp_ep.local_auth_chunks =
+	    sctp_copy_chunklist(inp->sctp_ep.local_auth_chunks);
+	(void)sctp_copy_skeylist(&inp->sctp_ep.shared_keys,
+	    &n_inp->sctp_ep.shared_keys);
+#if defined(__Userspace__)
+	n_inp->ulp_info = inp->ulp_info;
+	n_inp->recv_callback = inp->recv_callback;
+	n_inp->send_callback = inp->send_callback;
+	n_inp->send_sb_threshold = inp->send_sb_threshold;
+#endif
+	/*
+	 * Now we must move it from one hash table to another and get the
+	 * stcb in the right place.
+	 */
+	sctp_move_pcb_and_assoc(inp, n_inp, stcb);
+	atomic_add_int(&stcb->asoc.refcnt, 1);
+	SCTP_TCB_UNLOCK(stcb);
+
+#if defined(__FreeBSD__)
+	sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, SBL_WAIT);
+#else
+	sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, M_WAITOK);
+#endif
+	atomic_subtract_int(&stcb->asoc.refcnt, 1);
+
+	return (0);
+}
+
+#if defined(HAVE_SCTP_PEELOFF_SOCKOPT)
+struct socket *
+sctp_get_peeloff(struct socket *head, sctp_assoc_t assoc_id, int *error)
+{
+#if defined(__Userspace__)
+    /* if __Userspace__ chooses to originally not support peeloff, put it here... */
+#endif
+#if defined(__Panda__)
+	SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, EINVAL);
+	*error = EINVAL;
+	return (NULL);
+#else
+	struct socket *newso;
+	struct sctp_inpcb *inp, *n_inp;
+	struct sctp_tcb *stcb;
+
+	SCTPDBG(SCTP_DEBUG_PEEL1, "SCTP peel-off called\n");
+	inp = (struct sctp_inpcb *)head->so_pcb;
+	if (inp == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, EFAULT);
+		*error = EFAULT;
+		return (NULL);
+	}
+	stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1);
+	if (stcb == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, ENOTCONN);
+		*error = ENOTCONN;
+		return (NULL);
+	}
+	atomic_add_int(&stcb->asoc.refcnt, 1);
+	SCTP_TCB_UNLOCK(stcb);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+	CURVNET_SET(head->so_vnet);
+#endif
+	newso = sonewconn(head, SS_ISCONNECTED
+#if defined(__APPLE__)
+	    , NULL
+#elif defined(__Panda__)
+	    /* place this socket in the assoc's vrf id */
+	    , NULL, stcb->asoc.vrf_id
+#endif
+		);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+	CURVNET_RESTORE();
+#endif
+	if (newso == NULL) {
+		SCTPDBG(SCTP_DEBUG_PEEL1, "sctp_peeloff:sonewconn failed\n");
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTP_PEELOFF, ENOMEM);
+		*error = ENOMEM;
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+		return (NULL);
+
+	}
+#if defined(__APPLE__)
+	  else {
+		SCTP_SOCKET_LOCK(newso, 1);
+	}
+#endif
+	SCTP_TCB_LOCK(stcb);
+	atomic_subtract_int(&stcb->asoc.refcnt, 1);
+	n_inp = (struct sctp_inpcb *)newso->so_pcb;
+	SOCK_LOCK(head);
+	n_inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
+	    SCTP_PCB_FLAGS_CONNECTED |
+	    SCTP_PCB_FLAGS_IN_TCPPOOL |	/* Turn on Blocking IO */
+	    (SCTP_PCB_COPY_FLAGS & inp->sctp_flags));
+	n_inp->sctp_features = inp->sctp_features;
+	n_inp->sctp_frag_point = inp->sctp_frag_point;
+	n_inp->sctp_cmt_on_off = inp->sctp_cmt_on_off;
+	n_inp->ecn_supported = inp->ecn_supported;
+	n_inp->prsctp_supported = inp->prsctp_supported;
+	n_inp->auth_supported = inp->auth_supported;
+	n_inp->asconf_supported = inp->asconf_supported;
+	n_inp->reconfig_supported = inp->reconfig_supported;
+	n_inp->nrsack_supported = inp->nrsack_supported;
+	n_inp->pktdrop_supported = inp->pktdrop_supported;
+	n_inp->partial_delivery_point = inp->partial_delivery_point;
+	n_inp->sctp_context = inp->sctp_context;
+	n_inp->max_cwnd = inp->max_cwnd;
+	n_inp->local_strreset_support = inp->local_strreset_support;
+	n_inp->inp_starting_point_for_iterator = NULL;
+#if defined(__Userspace__)
+	n_inp->ulp_info = inp->ulp_info;
+	n_inp->recv_callback = inp->recv_callback;
+	n_inp->send_callback = inp->send_callback;
+	n_inp->send_sb_threshold = inp->send_sb_threshold;
+#endif
+
+	/* copy in the authentication parameters from the original endpoint */
+	if (n_inp->sctp_ep.local_hmacs)
+		sctp_free_hmaclist(n_inp->sctp_ep.local_hmacs);
+	n_inp->sctp_ep.local_hmacs =
+	    sctp_copy_hmaclist(inp->sctp_ep.local_hmacs);
+	if (n_inp->sctp_ep.local_auth_chunks)
+		sctp_free_chunklist(n_inp->sctp_ep.local_auth_chunks);
+	n_inp->sctp_ep.local_auth_chunks =
+	    sctp_copy_chunklist(inp->sctp_ep.local_auth_chunks);
+	(void)sctp_copy_skeylist(&inp->sctp_ep.shared_keys,
+	    &n_inp->sctp_ep.shared_keys);
+
+	n_inp->sctp_socket = newso;
+	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+		sctp_feature_off(n_inp, SCTP_PCB_FLAGS_AUTOCLOSE);
+		n_inp->sctp_ep.auto_close_time = 0;
+		sctp_timer_stop(SCTP_TIMER_TYPE_AUTOCLOSE, n_inp, stcb, NULL,
+				SCTP_FROM_SCTP_PEELOFF + SCTP_LOC_1);
+	}
+	/* Turn off any non-blocking semantic. */
+	SCTP_CLEAR_SO_NBIO(newso);
+        newso->so_state |= SS_ISCONNECTED;
+	/* We remove it right away */
+
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)
+#ifdef SCTP_LOCK_LOGGING
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) {
+		sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
+	}
+#endif
+	TAILQ_REMOVE(&head->so_comp, newso, so_list);
+	head->so_qlen--;
+	SOCK_UNLOCK(head);
+#else
+        newso = TAILQ_FIRST(&head->so_q);
+	if (soqremque(newso, 1) == 0) {
+		SCTP_PRINTF("soremque failed, peeloff-fails (invarients would panic)\n");
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_PEELOFF, ENOTCONN);
+		*error = ENOTCONN;
+		return (NULL);
+
+	}
+#endif
+	/*
+	 * Now we must move it from one hash table to another and get the
+	 * stcb in the right place.
+	 */
+        sctp_move_pcb_and_assoc(inp, n_inp, stcb);
+	atomic_add_int(&stcb->asoc.refcnt, 1);
+	SCTP_TCB_UNLOCK(stcb);
+	/*
+	 * And now the final hack. We move data in the pending side i.e.
+	 * head to the new socket buffer. Let the GRUBBING begin :-0
+	 */
+#if defined(__FreeBSD__)
+	sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, SBL_WAIT);
+#else
+	sctp_pull_off_control_to_new_inp(inp, n_inp, stcb, M_WAITOK);
+#endif
+	atomic_subtract_int(&stcb->asoc.refcnt, 1);
+	return (newso);
+#endif
+}
+#endif
diff --git a/usrsctplib/netinet/sctp_peeloff.h b/usrsctplib/netinet/sctp_peeloff.h
new file mode 100755
index 0000000..22375e3
--- /dev/null
+++ b/usrsctplib/netinet/sctp_peeloff.h
@@ -0,0 +1,68 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_peeloff.h 309607 2016-12-06 10:21:25Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_PEELOFF_H_
+#define _NETINET_SCTP_PEELOFF_H_
+#if defined(HAVE_SCTP_PEELOFF_SOCKOPT)
+/* socket option peeloff */
+struct sctp_peeloff_opt {
+#if !defined(__Windows__)
+	int s;
+#else
+	HANDLE s;
+#endif
+	sctp_assoc_t assoc_id;
+#if !defined(__Windows__)
+	int new_sd;
+#else
+	HANDLE new_sd;
+#endif
+};
+#endif /* HAVE_SCTP_PEELOFF_SOCKOPT */
+#if defined(_KERNEL)
+int sctp_can_peel_off(struct socket *, sctp_assoc_t);
+int sctp_do_peeloff(struct socket *, struct socket *, sctp_assoc_t);
+#if defined(HAVE_SCTP_PEELOFF_SOCKOPT)
+struct socket *sctp_get_peeloff(struct socket *, sctp_assoc_t, int *);
+int sctp_peeloff_option(struct proc *p, struct sctp_peeloff_opt *peeloff);
+#endif /* HAVE_SCTP_PEELOFF_SOCKOPT */
+#endif /* _KERNEL */
+#if defined(__Userspace__)
+int sctp_can_peel_off(struct socket *, sctp_assoc_t);
+int sctp_do_peeloff(struct socket *, struct socket *, sctp_assoc_t);
+#endif /* __Userspace__ */
+#endif /* _NETINET_SCTP_PEELOFF_H_ */
diff --git a/usrsctplib/netinet/sctp_process_lock.h b/usrsctplib/netinet/sctp_process_lock.h
new file mode 100755
index 0000000..1d10985
--- /dev/null
+++ b/usrsctplib/netinet/sctp_process_lock.h
@@ -0,0 +1,642 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __sctp_process_lock_h__
+#define __sctp_process_lock_h__
+
+/*
+ * Need to yet define five atomic fuctions or
+ * their equivalant.
+ * - atomic_add_int(&foo, val) - add atomically the value
+ * - atomic_fetchadd_int(&foo, val) - does same as atomic_add_int
+ *				      but value it was is returned.
+ * - atomic_subtract_int(&foo, val) - can be made from atomic_add_int()
+ *
+ * - atomic_cmpset_int(&foo, value, newvalue) - Does a set of newvalue
+ *					        in foo if and only if
+ *					        foo is value. Returns 0
+ *					        on success.
+ */
+
+#ifdef SCTP_PER_SOCKET_LOCKING
+/*
+ * per socket level locking
+ */
+
+#if defined(__Userspace_os_Windows)
+/* Lock for INFO stuff */
+#define SCTP_INP_INFO_LOCK_INIT()
+#define SCTP_INP_INFO_RLOCK()
+#define SCTP_INP_INFO_RUNLOCK()
+#define SCTP_INP_INFO_WLOCK()
+#define SCTP_INP_INFO_WUNLOCK()
+#define SCTP_INP_INFO_LOCK_DESTROY()
+#define SCTP_IPI_COUNT_INIT()
+#define SCTP_IPI_COUNT_DESTROY()
+#else
+#define SCTP_INP_INFO_LOCK_INIT()
+#define SCTP_INP_INFO_RLOCK()
+#define SCTP_INP_INFO_RUNLOCK()
+#define SCTP_INP_INFO_WLOCK()
+#define SCTP_INP_INFO_WUNLOCK()
+#define SCTP_INP_INFO_LOCK_DESTROY()
+#define SCTP_IPI_COUNT_INIT()
+#define SCTP_IPI_COUNT_DESTROY()
+#endif
+
+#define SCTP_TCB_SEND_LOCK_INIT(_tcb)
+#define SCTP_TCB_SEND_LOCK_DESTROY(_tcb)
+#define SCTP_TCB_SEND_LOCK(_tcb)
+#define SCTP_TCB_SEND_UNLOCK(_tcb)
+
+/* Lock for INP */
+#define SCTP_INP_LOCK_INIT(_inp)
+#define SCTP_INP_LOCK_DESTROY(_inp)
+
+#define SCTP_INP_RLOCK(_inp)
+#define SCTP_INP_RUNLOCK(_inp)
+#define SCTP_INP_WLOCK(_inp)
+#define SCTP_INP_WUNLOCK(_inep)
+#define SCTP_INP_INCR_REF(_inp)
+#define SCTP_INP_DECR_REF(_inp)
+
+#define SCTP_ASOC_CREATE_LOCK_INIT(_inp)
+#define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp)
+#define SCTP_ASOC_CREATE_LOCK(_inp)
+#define SCTP_ASOC_CREATE_UNLOCK(_inp)
+
+#define SCTP_INP_READ_INIT(_inp)
+#define SCTP_INP_READ_DESTROY(_inp)
+#define SCTP_INP_READ_LOCK(_inp)
+#define SCTP_INP_READ_UNLOCK(_inp)
+
+/* Lock for TCB */
+#define SCTP_TCB_LOCK_INIT(_tcb)
+#define SCTP_TCB_LOCK_DESTROY(_tcb)
+#define SCTP_TCB_LOCK(_tcb)
+#define SCTP_TCB_TRYLOCK(_tcb) 1
+#define SCTP_TCB_UNLOCK(_tcb)
+#define SCTP_TCB_UNLOCK_IFOWNED(_tcb)
+#define SCTP_TCB_LOCK_ASSERT(_tcb)
+
+#else
+/*
+ * per tcb level locking
+ */
+#define SCTP_IPI_COUNT_INIT()
+
+#if defined(__Userspace_os_Windows)
+#define SCTP_WQ_ADDR_INIT() \
+        InitializeCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
+#define SCTP_WQ_ADDR_DESTROY() \
+	DeleteCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
+#define SCTP_WQ_ADDR_LOCK() \
+        EnterCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
+#define SCTP_WQ_ADDR_UNLOCK() \
+        LeaveCriticalSection(&SCTP_BASE_INFO(wq_addr_mtx))
+
+
+#define SCTP_INP_INFO_LOCK_INIT() \
+	InitializeCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_LOCK_DESTROY() \
+	DeleteCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_RLOCK() \
+	EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_TRYLOCK()	\
+        TryEnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_WLOCK() \
+	EnterCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_RUNLOCK() \
+ 	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_WUNLOCK()	\
+	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_ep_mtx))
+
+#define SCTP_IP_PKTLOG_INIT() \
+        InitializeCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
+#define SCTP_IP_PKTLOG_DESTROY () \
+	DeleteCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
+#define SCTP_IP_PKTLOG_LOCK() \
+    	EnterCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
+#define SCTP_IP_PKTLOG_UNLOCK() \
+	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_pktlog_mtx))
+
+/*
+ * The INP locks we will use for locking an SCTP endpoint, so for example if
+ * we want to change something at the endpoint level for example random_store
+ * or cookie secrets we lock the INP level.
+ */
+#define SCTP_INP_READ_INIT(_inp) \
+	InitializeCriticalSection(&(_inp)->inp_rdata_mtx)
+#define SCTP_INP_READ_DESTROY(_inp) \
+	DeleteCriticalSection(&(_inp)->inp_rdata_mtx)
+#define SCTP_INP_READ_LOCK(_inp) \
+	EnterCriticalSection(&(_inp)->inp_rdata_mtx)
+#define SCTP_INP_READ_UNLOCK(_inp) \
+	LeaveCriticalSection(&(_inp)->inp_rdata_mtx)
+
+#define SCTP_INP_LOCK_INIT(_inp) \
+	InitializeCriticalSection(&(_inp)->inp_mtx)
+
+#define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
+	InitializeCriticalSection(&(_inp)->inp_create_mtx)
+
+#define SCTP_INP_LOCK_DESTROY(_inp) \
+	DeleteCriticalSection(&(_inp)->inp_mtx)
+
+#define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
+	DeleteCriticalSection(&(_inp)->inp_create_mtx)
+
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_INP_RLOCK(_inp)	do { 					\
+	if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_INP);\
+	EnterCriticalSection(&(_inp)->inp_mtx);			\
+} while (0)
+
+#define SCTP_INP_WLOCK(_inp)	do { 					\
+	sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_INP);\
+	EnterCriticalSection(&(_inp)->inp_mtx);			\
+} while (0)
+#else
+
+#define SCTP_INP_RLOCK(_inp)	do { 					\
+	EnterCriticalSection(&(_inp)->inp_mtx);			\
+} while (0)
+
+#define SCTP_INP_WLOCK(_inp)	do { 					\
+	EnterCriticalSection(&(_inp)->inp_mtx);			\
+} while (0)
+#endif
+
+
+#define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
+	InitializeCriticalSection(&(_tcb)->tcb_send_mtx)
+
+#define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) \
+	DeleteCriticalSection(&(_tcb)->tcb_send_mtx)
+
+#define SCTP_TCB_SEND_LOCK(_tcb) do { \
+	EnterCriticalSection(&(_tcb)->tcb_send_mtx); \
+} while (0)
+
+#define SCTP_TCB_SEND_UNLOCK(_tcb) \
+	LeaveCriticalSection(&(_tcb)->tcb_send_mtx)
+
+#define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
+#define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
+
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_ASOC_CREATE_LOCK(_inp) do {				\
+	if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_CREATE); \
+	EnterCriticalSection(&(_inp)->inp_create_mtx);		\
+} while (0)
+#else
+#define SCTP_ASOC_CREATE_LOCK(_inp) do {				\
+	EnterCriticalSection(&(_inp)->inp_create_mtx);		\
+} while (0)
+#endif
+
+#define SCTP_INP_RUNLOCK(_inp) \
+	LeaveCriticalSection(&(_inp)->inp_mtx)
+#define SCTP_INP_WUNLOCK(_inp) \
+	LeaveCriticalSection(&(_inp)->inp_mtx)
+#define SCTP_ASOC_CREATE_UNLOCK(_inp) \
+	LeaveCriticalSection(&(_inp)->inp_create_mtx)
+
+/*
+ * For the majority of things (once we have found the association) we will
+ * lock the actual association mutex. This will protect all the assoiciation
+ * level queues and streams and such. We will need to lock the socket layer
+ * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
+ * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
+ */
+
+#define SCTP_TCB_LOCK_INIT(_tcb) \
+	InitializeCriticalSection(&(_tcb)->tcb_mtx)
+
+#define SCTP_TCB_LOCK_DESTROY(_tcb) \
+	DeleteCriticalSection(&(_tcb)->tcb_mtx)
+
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_TCB_LOCK(_tcb)  do {					\
+	if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);		\
+	EnterCriticalSection(&(_tcb)->tcb_mtx);			\
+} while (0)
+
+#else
+#define SCTP_TCB_LOCK(_tcb)  do {					\
+	EnterCriticalSection(&(_tcb)->tcb_mtx);			\
+} while (0)
+#endif
+
+#define SCTP_TCB_TRYLOCK(_tcb) 	((TryEnterCriticalSection(&(_tcb)->tcb_mtx)))
+
+#define SCTP_TCB_UNLOCK(_tcb)	do {  \
+	LeaveCriticalSection(&(_tcb)->tcb_mtx);  \
+} while (0)
+
+#define SCTP_TCB_LOCK_ASSERT(_tcb)
+
+#else /* all Userspaces except Windows */
+#define SCTP_WQ_ADDR_INIT() \
+        (void)pthread_mutex_init(&SCTP_BASE_INFO(wq_addr_mtx), NULL)
+#define SCTP_WQ_ADDR_DESTROY() \
+	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(wq_addr_mtx))
+#define SCTP_WQ_ADDR_LOCK() \
+        (void)pthread_mutex_lock(&SCTP_BASE_INFO(wq_addr_mtx))
+#define SCTP_WQ_ADDR_UNLOCK() \
+        (void)pthread_mutex_unlock(&SCTP_BASE_INFO(wq_addr_mtx))
+
+
+#define SCTP_INP_INFO_LOCK_INIT() \
+	(void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_ep_mtx), NULL)
+#define SCTP_INP_INFO_LOCK_DESTROY() \
+	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_RLOCK() \
+	(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_TRYLOCK()	\
+        (!(pthread_mutex_trylock(&SCTP_BASE_INFO(ipi_ep_mtx))))
+#define SCTP_INP_INFO_WLOCK() \
+	(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_RUNLOCK()	\
+	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
+#define SCTP_INP_INFO_WUNLOCK()	\
+	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_ep_mtx))
+
+#define SCTP_IP_PKTLOG_INIT() \
+        (void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_pktlog_mtx), NULL)
+#define SCTP_IP_PKTLOG_DESTROY() \
+	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_pktlog_mtx))
+#define SCTP_IP_PKTLOG_LOCK() \
+        (void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
+#define SCTP_IP_PKTLOG_UNLOCK()	\
+        (void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_pktlog_mtx))
+
+
+
+/*
+ * The INP locks we will use for locking an SCTP endpoint, so for example if
+ * we want to change something at the endpoint level for example random_store
+ * or cookie secrets we lock the INP level.
+ */
+#define SCTP_INP_READ_INIT(_inp) \
+	(void)pthread_mutex_init(&(_inp)->inp_rdata_mtx, NULL)
+
+#define SCTP_INP_READ_DESTROY(_inp) \
+	(void)pthread_mutex_destroy(&(_inp)->inp_rdata_mtx)
+
+#define SCTP_INP_READ_LOCK(_inp)	do { \
+	(void)pthread_mutex_lock(&(_inp)->inp_rdata_mtx);    \
+} while (0)
+
+
+#define SCTP_INP_READ_UNLOCK(_inp) \
+	(void)pthread_mutex_unlock(&(_inp)->inp_rdata_mtx)
+
+#define SCTP_INP_LOCK_INIT(_inp) \
+	(void)pthread_mutex_init(&(_inp)->inp_mtx, NULL)
+
+#define SCTP_ASOC_CREATE_LOCK_INIT(_inp) \
+	(void)pthread_mutex_init(&(_inp)->inp_create_mtx, NULL)
+
+#define SCTP_INP_LOCK_DESTROY(_inp) \
+	(void)pthread_mutex_destroy(&(_inp)->inp_mtx)
+
+#define SCTP_ASOC_CREATE_LOCK_DESTROY(_inp) \
+	(void)pthread_mutex_destroy(&(_inp)->inp_create_mtx)
+
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_INP_RLOCK(_inp)	do { 					\
+	if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_INP);\
+	(void)pthread_mutex_lock(&(_inp)->inp_mtx);			\
+} while (0)
+
+#define SCTP_INP_WLOCK(_inp)	do { 					\
+	sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_INP);\
+	(void)pthread_mutex_lock(&(_inp)->inp_mtx);			\
+} while (0)
+
+#else
+
+#define SCTP_INP_RLOCK(_inp)	do { 					\
+	(void)pthread_mutex_lock(&(_inp)->inp_mtx);			\
+} while (0)
+
+#define SCTP_INP_WLOCK(_inp)	do { 					\
+	(void)pthread_mutex_lock(&(_inp)->inp_mtx);			\
+} while (0)
+#endif
+
+
+#define SCTP_TCB_SEND_LOCK_INIT(_tcb) \
+	(void)pthread_mutex_init(&(_tcb)->tcb_send_mtx, NULL)
+
+#define SCTP_TCB_SEND_LOCK_DESTROY(_tcb) \
+	(void)pthread_mutex_destroy(&(_tcb)->tcb_send_mtx)
+
+#define SCTP_TCB_SEND_LOCK(_tcb) do { \
+	(void)pthread_mutex_lock(&(_tcb)->tcb_send_mtx); \
+} while (0)
+
+#define SCTP_TCB_SEND_UNLOCK(_tcb) \
+	(void)pthread_mutex_unlock(&(_tcb)->tcb_send_mtx)
+
+#define SCTP_INP_INCR_REF(_inp) atomic_add_int(&((_inp)->refcount), 1)
+#define SCTP_INP_DECR_REF(_inp) atomic_add_int(&((_inp)->refcount), -1)
+
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_ASOC_CREATE_LOCK(_inp) do {				\
+	if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_CREATE); \
+	(void)pthread_mutex_lock(&(_inp)->inp_create_mtx);		\
+} while (0)
+#else
+#define SCTP_ASOC_CREATE_LOCK(_inp) do {				\
+	(void)pthread_mutex_lock(&(_inp)->inp_create_mtx);		\
+} while (0)
+#endif
+
+#define SCTP_INP_RUNLOCK(_inp) \
+	(void)pthread_mutex_unlock(&(_inp)->inp_mtx)
+#define SCTP_INP_WUNLOCK(_inp) \
+	(void)pthread_mutex_unlock(&(_inp)->inp_mtx)
+#define SCTP_ASOC_CREATE_UNLOCK(_inp) \
+	(void)pthread_mutex_unlock(&(_inp)->inp_create_mtx)
+
+/*
+ * For the majority of things (once we have found the association) we will
+ * lock the actual association mutex. This will protect all the assoiciation
+ * level queues and streams and such. We will need to lock the socket layer
+ * when we stuff data up into the receiving sb_mb. I.e. we will need to do an
+ * extra SOCKBUF_LOCK(&so->so_rcv) even though the association is locked.
+ */
+
+#define SCTP_TCB_LOCK_INIT(_tcb) \
+	(void)pthread_mutex_init(&(_tcb)->tcb_mtx, NULL)
+
+#define SCTP_TCB_LOCK_DESTROY(_tcb) \
+	(void)pthread_mutex_destroy(&(_tcb)->tcb_mtx)
+
+#ifdef SCTP_LOCK_LOGGING
+#define SCTP_TCB_LOCK(_tcb)  do {					\
+	if(SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) sctp_log_lock(_tcb->sctp_ep, _tcb, SCTP_LOG_LOCK_TCB);		\
+	(void)pthread_mutex_lock(&(_tcb)->tcb_mtx);			\
+} while (0)
+
+#else
+#define SCTP_TCB_LOCK(_tcb)  do {					\
+	(void)pthread_mutex_lock(&(_tcb)->tcb_mtx);			\
+} while (0)
+#endif
+
+#define SCTP_TCB_TRYLOCK(_tcb) 	(!(pthread_mutex_trylock(&(_tcb)->tcb_mtx)))
+
+#define SCTP_TCB_UNLOCK(_tcb)	(void)pthread_mutex_unlock(&(_tcb)->tcb_mtx)
+
+#define SCTP_TCB_LOCK_ASSERT(_tcb)
+#endif
+
+#endif /* SCTP_PER_SOCKET_LOCKING */
+
+
+/*
+ * common locks
+ */
+
+/* copied over to compile */
+#define SCTP_INP_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
+#define SCTP_INP_READ_CONTENDED(_inp) (0) /* Don't know if this is possible */
+#define SCTP_ASOC_CREATE_LOCK_CONTENDED(_inp) (0) /* Don't know if this is possible */
+
+
+/* socket locks */
+
+#if defined(__Userspace__)
+#if defined(__Userspace_os_Windows)
+#define SOCKBUF_LOCK_ASSERT(_so_buf)
+#define SOCKBUF_LOCK(_so_buf) EnterCriticalSection(&(_so_buf)->sb_mtx)
+#define SOCKBUF_UNLOCK(_so_buf) LeaveCriticalSection(&(_so_buf)->sb_mtx)
+#define SOCK_LOCK(_so)  SOCKBUF_LOCK(&(_so)->so_rcv)
+#define SOCK_UNLOCK(_so)  SOCKBUF_UNLOCK(&(_so)->so_rcv)
+#else
+#define SOCKBUF_LOCK_ASSERT(_so_buf) KASSERT(pthread_mutex_trylock(SOCKBUF_MTX(_so_buf)) == EBUSY, ("%s: socket buffer not locked", __func__))
+#define SOCKBUF_LOCK(_so_buf)   pthread_mutex_lock(SOCKBUF_MTX(_so_buf))
+#define SOCKBUF_UNLOCK(_so_buf) pthread_mutex_unlock(SOCKBUF_MTX(_so_buf))
+#define	SOCK_LOCK(_so)		SOCKBUF_LOCK(&(_so)->so_rcv)
+#define	SOCK_UNLOCK(_so)	SOCKBUF_UNLOCK(&(_so)->so_rcv)
+#endif
+#else
+#define SOCK_LOCK(_so)
+#define SOCK_UNLOCK(_so)
+#define SOCKBUF_LOCK(_so_buf)
+#define SOCKBUF_UNLOCK(_so_buf)
+#define SOCKBUF_LOCK_ASSERT(_so_buf)
+#endif
+
+#define SCTP_STATLOG_INIT_LOCK()
+#define SCTP_STATLOG_LOCK()
+#define SCTP_STATLOG_UNLOCK()
+#define SCTP_STATLOG_DESTROY()
+
+#if defined(__Userspace_os_Windows)
+/* address list locks */
+#define SCTP_IPI_ADDR_INIT() \
+	InitializeCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
+#define SCTP_IPI_ADDR_DESTROY() \
+	DeleteCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
+
+#define SCTP_IPI_ADDR_RLOCK() 						\
+	do { 								\
+		EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx));	\
+	} while (0)
+#define SCTP_IPI_ADDR_RUNLOCK() \
+	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
+
+#define SCTP_IPI_ADDR_WLOCK() 						\
+	do { 								\
+		EnterCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx));	\
+	} while (0)
+#define SCTP_IPI_ADDR_WUNLOCK() \
+	LeaveCriticalSection(&SCTP_BASE_INFO(ipi_addr_mtx))
+
+
+/* iterator locks */
+#define SCTP_ITERATOR_LOCK_INIT() \
+	InitializeCriticalSection(&sctp_it_ctl.it_mtx)
+
+#define SCTP_ITERATOR_LOCK() 						\
+	do {								\
+		EnterCriticalSection(&sctp_it_ctl.it_mtx);		\
+	} while (0)
+
+#define SCTP_ITERATOR_UNLOCK() \
+	LeaveCriticalSection(&sctp_it_ctl.it_mtx)
+
+#define SCTP_ITERATOR_LOCK_DESTROY() \
+	DeleteCriticalSection(&sctp_it_ctl.it_mtx)
+
+
+#define SCTP_IPI_ITERATOR_WQ_INIT() \
+	InitializeCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
+
+#define SCTP_IPI_ITERATOR_WQ_DESTROY() \
+	DeleteCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
+
+#define SCTP_IPI_ITERATOR_WQ_LOCK() \
+	do { \
+		EnterCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx); \
+	} while (0)
+
+#define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
+	LeaveCriticalSection(&sctp_it_ctl.ipi_iterator_wq_mtx)
+
+#else /* end of __Userspace_os_Windows */
+/* address list locks */
+#define SCTP_IPI_ADDR_INIT() \
+	(void)pthread_mutex_init(&SCTP_BASE_INFO(ipi_addr_mtx), NULL)
+#define SCTP_IPI_ADDR_DESTROY() \
+	(void)pthread_mutex_destroy(&SCTP_BASE_INFO(ipi_addr_mtx))
+
+#define SCTP_IPI_ADDR_RLOCK() 						\
+	do { 								\
+		(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx));	\
+	} while (0)
+#define SCTP_IPI_ADDR_RUNLOCK() \
+	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
+
+#define SCTP_IPI_ADDR_WLOCK() 						\
+	do { 								\
+		(void)pthread_mutex_lock(&SCTP_BASE_INFO(ipi_addr_mtx));	\
+	} while (0)
+#define SCTP_IPI_ADDR_WUNLOCK() \
+	(void)pthread_mutex_unlock(&SCTP_BASE_INFO(ipi_addr_mtx))
+
+
+/* iterator locks */
+#define SCTP_ITERATOR_LOCK_INIT() \
+	(void)pthread_mutex_init(&sctp_it_ctl.it_mtx, NULL)
+
+#define SCTP_ITERATOR_LOCK() 						\
+	do {								\
+		(void)pthread_mutex_lock(&sctp_it_ctl.it_mtx);		\
+	} while (0)
+
+#define SCTP_ITERATOR_UNLOCK() \
+	(void)pthread_mutex_unlock(&sctp_it_ctl.it_mtx)
+
+#define SCTP_ITERATOR_LOCK_DESTROY() \
+	(void)pthread_mutex_destroy(&sctp_it_ctl.it_mtx)
+
+
+#define SCTP_IPI_ITERATOR_WQ_INIT() \
+	(void)pthread_mutex_init(&sctp_it_ctl.ipi_iterator_wq_mtx, NULL)
+
+#define SCTP_IPI_ITERATOR_WQ_DESTROY() \
+	(void)pthread_mutex_destroy(&sctp_it_ctl.ipi_iterator_wq_mtx)
+
+#define SCTP_IPI_ITERATOR_WQ_LOCK() \
+	do { \
+		(void)pthread_mutex_lock(&sctp_it_ctl.ipi_iterator_wq_mtx); \
+	} while (0)
+
+#define SCTP_IPI_ITERATOR_WQ_UNLOCK() \
+	(void)pthread_mutex_unlock(&sctp_it_ctl.ipi_iterator_wq_mtx)
+#endif
+
+#define SCTP_INCR_EP_COUNT() \
+	do { \
+		atomic_add_int(&SCTP_BASE_INFO(ipi_count_ep), 1); \
+	} while (0)
+
+#define SCTP_DECR_EP_COUNT() \
+	do { \
+	       atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_ep), 1); \
+	} while (0)
+
+#define SCTP_INCR_ASOC_COUNT() \
+	do { \
+	       atomic_add_int(&SCTP_BASE_INFO(ipi_count_asoc), 1); \
+	} while (0)
+
+#define SCTP_DECR_ASOC_COUNT() \
+	do { \
+	       atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_asoc), 1); \
+	} while (0)
+
+#define SCTP_INCR_LADDR_COUNT() \
+	do { \
+	       atomic_add_int(&SCTP_BASE_INFO(ipi_count_laddr), 1); \
+	} while (0)
+
+#define SCTP_DECR_LADDR_COUNT() \
+	do { \
+	       atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_laddr), 1); \
+	} while (0)
+
+#define SCTP_INCR_RADDR_COUNT() \
+	do { \
+ 	       atomic_add_int(&SCTP_BASE_INFO(ipi_count_raddr), 1); \
+	} while (0)
+
+#define SCTP_DECR_RADDR_COUNT() \
+	do { \
+ 	       atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_raddr), 1); \
+	} while (0)
+
+#define SCTP_INCR_CHK_COUNT() \
+	do { \
+  	       atomic_add_int(&SCTP_BASE_INFO(ipi_count_chunk), 1); \
+	} while (0)
+
+#define SCTP_DECR_CHK_COUNT() \
+	do { \
+  	       atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_chunk), 1); \
+	} while (0)
+
+#define SCTP_INCR_READQ_COUNT() \
+	do { \
+	       atomic_add_int(&SCTP_BASE_INFO(ipi_count_readq), 1); \
+	} while (0)
+
+#define SCTP_DECR_READQ_COUNT() \
+	do { \
+	       atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_readq), 1); \
+	} while (0)
+
+#define SCTP_INCR_STRMOQ_COUNT() \
+	do { \
+	       atomic_add_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1); \
+	} while (0)
+
+#define SCTP_DECR_STRMOQ_COUNT() \
+	do { \
+	       atomic_subtract_int(&SCTP_BASE_INFO(ipi_count_strmoq), 1); \
+	} while (0)
+
+#endif
diff --git a/usrsctplib/netinet/sctp_sha1.c b/usrsctplib/netinet/sctp_sha1.c
new file mode 100755
index 0000000..c86517f
--- /dev/null
+++ b/usrsctplib/netinet/sctp_sha1.c
@@ -0,0 +1,327 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2013, by Michael Tuexen. All rights reserved.
+ * Copyright (c) 2013,      by Lally Singh. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <netinet/sctp_sha1.h>
+
+#if defined(SCTP_USE_NSS_SHA1)
+/* A SHA-1 Digest is 160 bits, or 20 bytes */
+#define SHA_DIGEST_LENGTH (20)
+
+void
+sctp_sha1_init(struct sctp_sha1_context *ctx)
+{
+	ctx->pk11_ctx = PK11_CreateDigestContext(SEC_OID_SHA1);
+	PK11_DigestBegin(ctx->pk11_ctx);
+}
+
+void
+sctp_sha1_update(struct sctp_sha1_context *ctx, const unsigned char *ptr, unsigned int siz)
+{
+	PK11_DigestOp(ctx->pk11_ctx, ptr, siz);
+}
+
+void
+sctp_sha1_final(unsigned char *digest, struct sctp_sha1_context *ctx)
+{
+	unsigned int output_len = 0;
+
+	PK11_DigestFinal(ctx->pk11_ctx, digest, &output_len, SHA_DIGEST_LENGTH);
+	PK11_DestroyContext(ctx->pk11_ctx, PR_TRUE);
+}
+
+#elif defined(SCTP_USE_OPENSSL_SHA1)
+
+void
+sctp_sha1_init(struct sctp_sha1_context *ctx)
+{
+	SHA1_Init(&ctx->sha_ctx);
+}
+
+void
+sctp_sha1_update(struct sctp_sha1_context *ctx, const unsigned char *ptr, unsigned int siz)
+{
+	SHA1_Update(&ctx->sha_ctx, ptr, (unsigned long)siz);
+}
+
+void
+sctp_sha1_final(unsigned char *digest, struct sctp_sha1_context *ctx)
+{
+	SHA1_Final(digest, &ctx->sha_ctx);
+}
+
+#else
+
+#include <string.h>
+#if defined(__Userspace_os_Windows)
+#include <winsock2.h>
+#elif !defined(__Windows__)
+#include <arpa/inet.h>
+#endif
+
+#define F1(B,C,D) (((B & C) | ((~B) & D)))	/* 0  <= t <= 19 */
+#define F2(B,C,D) (B ^ C ^ D)	/* 20 <= t <= 39 */
+#define F3(B,C,D) ((B & C) | (B & D) | (C & D))	/* 40 <= t <= 59 */
+#define F4(B,C,D) (B ^ C ^ D)	/* 600 <= t <= 79 */
+
+/* circular shift */
+#define CSHIFT(A,B) ((B << A) | (B >> (32-A)))
+
+#define K1 0x5a827999		/* 0  <= t <= 19 */
+#define K2 0x6ed9eba1		/* 20 <= t <= 39 */
+#define K3 0x8f1bbcdc		/* 40 <= t <= 59 */
+#define K4 0xca62c1d6		/* 60 <= t <= 79 */
+
+#define H0INIT 0x67452301
+#define H1INIT 0xefcdab89
+#define H2INIT 0x98badcfe
+#define H3INIT 0x10325476
+#define H4INIT 0xc3d2e1f0
+
+void
+sctp_sha1_init(struct sctp_sha1_context *ctx)
+{
+	/* Init the SHA-1 context structure */
+	ctx->A = 0;
+	ctx->B = 0;
+	ctx->C = 0;
+	ctx->D = 0;
+	ctx->E = 0;
+	ctx->H0 = H0INIT;
+	ctx->H1 = H1INIT;
+	ctx->H2 = H2INIT;
+	ctx->H3 = H3INIT;
+	ctx->H4 = H4INIT;
+	ctx->TEMP = 0;
+	memset(ctx->words, 0, sizeof(ctx->words));
+	ctx->how_many_in_block = 0;
+	ctx->running_total = 0;
+}
+
+static void
+sctp_sha1_process_a_block(struct sctp_sha1_context *ctx, unsigned int *block)
+{
+	int i;
+
+	/* init the W0-W15 to the block of words being hashed. */
+	/* step a) */
+	for (i = 0; i < 16; i++) {
+		ctx->words[i] = ntohl(block[i]);
+	}
+	/* now init the rest based on the SHA-1 formula, step b) */
+	for (i = 16; i < 80; i++) {
+		ctx->words[i] = CSHIFT(1, ((ctx->words[(i - 3)]) ^
+		    (ctx->words[(i - 8)]) ^
+		    (ctx->words[(i - 14)]) ^
+		    (ctx->words[(i - 16)])));
+	}
+	/* step c) */
+	ctx->A = ctx->H0;
+	ctx->B = ctx->H1;
+	ctx->C = ctx->H2;
+	ctx->D = ctx->H3;
+	ctx->E = ctx->H4;
+
+	/* step d) */
+	for (i = 0; i < 80; i++) {
+		if (i < 20) {
+			ctx->TEMP = ((CSHIFT(5, ctx->A)) +
+			    (F1(ctx->B, ctx->C, ctx->D)) +
+			    (ctx->E) +
+			    ctx->words[i] +
+			    K1);
+		} else if (i < 40) {
+			ctx->TEMP = ((CSHIFT(5, ctx->A)) +
+			    (F2(ctx->B, ctx->C, ctx->D)) +
+			    (ctx->E) +
+			    (ctx->words[i]) +
+			    K2);
+		} else if (i < 60) {
+			ctx->TEMP = ((CSHIFT(5, ctx->A)) +
+			    (F3(ctx->B, ctx->C, ctx->D)) +
+			    (ctx->E) +
+			    (ctx->words[i]) +
+			    K3);
+		} else {
+			ctx->TEMP = ((CSHIFT(5, ctx->A)) +
+			    (F4(ctx->B, ctx->C, ctx->D)) +
+			    (ctx->E) +
+			    (ctx->words[i]) +
+			    K4);
+		}
+		ctx->E = ctx->D;
+		ctx->D = ctx->C;
+		ctx->C = CSHIFT(30, ctx->B);
+		ctx->B = ctx->A;
+		ctx->A = ctx->TEMP;
+	}
+	/* step e) */
+	ctx->H0 = (ctx->H0) + (ctx->A);
+	ctx->H1 = (ctx->H1) + (ctx->B);
+	ctx->H2 = (ctx->H2) + (ctx->C);
+	ctx->H3 = (ctx->H3) + (ctx->D);
+	ctx->H4 = (ctx->H4) + (ctx->E);
+}
+
+void
+sctp_sha1_update(struct sctp_sha1_context *ctx, const unsigned char *ptr, unsigned int siz)
+{
+	unsigned int number_left, left_to_fill;
+
+	number_left = siz;
+	while (number_left > 0) {
+		left_to_fill = sizeof(ctx->sha_block) - ctx->how_many_in_block;
+		if (left_to_fill > number_left) {
+			/* can only partially fill up this one */
+			memcpy(&ctx->sha_block[ctx->how_many_in_block],
+			    ptr, number_left);
+			ctx->how_many_in_block += number_left;
+			ctx->running_total += number_left;
+			break;
+		} else {
+			/* block is now full, process it */
+			memcpy(&ctx->sha_block[ctx->how_many_in_block],
+			    ptr, left_to_fill);
+			sctp_sha1_process_a_block(ctx,
+			    (unsigned int *)ctx->sha_block);
+			number_left -= left_to_fill;
+			ctx->running_total += left_to_fill;
+			ctx->how_many_in_block = 0;
+			ptr = (const unsigned char *)(ptr + left_to_fill);
+		}
+	}
+}
+
+void
+sctp_sha1_final(unsigned char *digest, struct sctp_sha1_context *ctx)
+{
+	/*
+	 * if any left in block fill with padding and process. Then transfer
+	 * the digest to the pointer. At the last block some special rules
+	 * need to apply. We must add a 1 bit following the message, then we
+	 * pad with 0's. The total size is encoded as a 64 bit number at the
+	 * end. Now if the last buffer has more than 55 octets in it we
+	 * cannot fit the 64 bit number + 10000000 pad on the end and must
+	 * add the 10000000 pad, pad the rest of the message with 0's and
+	 * then create an all 0 message with just the 64 bit size at the end
+	 * and run this block through by itself.  Also the 64 bit int must
+	 * be in network byte order.
+	 */
+	int left_to_fill;
+	unsigned int i, *ptr;
+
+	if (ctx->how_many_in_block > 55) {
+		/*
+		 * special case, we need to process two blocks here. One for
+		 * the current stuff plus possibly the pad. The other for
+		 * the size.
+		 */
+		left_to_fill = sizeof(ctx->sha_block) - ctx->how_many_in_block;
+		if (left_to_fill == 0) {
+			/* Should not really happen but I am paranoid */
+			sctp_sha1_process_a_block(ctx,
+			    (unsigned int *)ctx->sha_block);
+			/* init last block, a bit different than the rest */
+			ctx->sha_block[0] = '\x80';
+			for (i = 1; i < sizeof(ctx->sha_block); i++) {
+				ctx->sha_block[i] = 0x0;
+			}
+		} else if (left_to_fill == 1) {
+			ctx->sha_block[ctx->how_many_in_block] = '\x80';
+			sctp_sha1_process_a_block(ctx,
+			    (unsigned int *)ctx->sha_block);
+			/* init last block */
+			memset(ctx->sha_block, 0, sizeof(ctx->sha_block));
+		} else {
+			ctx->sha_block[ctx->how_many_in_block] = '\x80';
+			for (i = (ctx->how_many_in_block + 1);
+			    i < sizeof(ctx->sha_block);
+			    i++) {
+				ctx->sha_block[i] = 0x0;
+			}
+			sctp_sha1_process_a_block(ctx,
+			    (unsigned int *)ctx->sha_block);
+			/* init last block */
+			memset(ctx->sha_block, 0, sizeof(ctx->sha_block));
+		}
+		/* This is in bits so multiply by 8 */
+		ctx->running_total *= 8;
+		ptr = (unsigned int *)&ctx->sha_block[60];
+		*ptr = htonl(ctx->running_total);
+		sctp_sha1_process_a_block(ctx, (unsigned int *)ctx->sha_block);
+	} else {
+		/*
+		 * easy case, we just pad this message to size - end with 0
+		 * add the magic 0x80 to the next word and then put the
+		 * network byte order size in the last spot and process the
+		 * block.
+		 */
+		ctx->sha_block[ctx->how_many_in_block] = '\x80';
+		for (i = (ctx->how_many_in_block + 1);
+		    i < sizeof(ctx->sha_block);
+		    i++) {
+			ctx->sha_block[i] = 0x0;
+		}
+		/* get last int spot */
+		ctx->running_total *= 8;
+		ptr = (unsigned int *)&ctx->sha_block[60];
+		*ptr = htonl(ctx->running_total);
+		sctp_sha1_process_a_block(ctx, (unsigned int *)ctx->sha_block);
+	}
+	/* transfer the digest back to the user */
+	digest[3] = (ctx->H0 & 0xff);
+	digest[2] = ((ctx->H0 >> 8) & 0xff);
+	digest[1] = ((ctx->H0 >> 16) & 0xff);
+	digest[0] = ((ctx->H0 >> 24) & 0xff);
+
+	digest[7] = (ctx->H1 & 0xff);
+	digest[6] = ((ctx->H1 >> 8) & 0xff);
+	digest[5] = ((ctx->H1 >> 16) & 0xff);
+	digest[4] = ((ctx->H1 >> 24) & 0xff);
+
+	digest[11] = (ctx->H2 & 0xff);
+	digest[10] = ((ctx->H2 >> 8) & 0xff);
+	digest[9] = ((ctx->H2 >> 16) & 0xff);
+	digest[8] = ((ctx->H2 >> 24) & 0xff);
+
+	digest[15] = (ctx->H3 & 0xff);
+	digest[14] = ((ctx->H3 >> 8) & 0xff);
+	digest[13] = ((ctx->H3 >> 16) & 0xff);
+	digest[12] = ((ctx->H3 >> 24) & 0xff);
+
+	digest[19] = (ctx->H4 & 0xff);
+	digest[18] = ((ctx->H4 >> 8) & 0xff);
+	digest[17] = ((ctx->H4 >> 16) & 0xff);
+	digest[16] = ((ctx->H4 >> 24) & 0xff);
+}
+
+#endif
diff --git a/usrsctplib/netinet/sctp_sha1.h b/usrsctplib/netinet/sctp_sha1.h
new file mode 100755
index 0000000..01e3c2e
--- /dev/null
+++ b/usrsctplib/netinet/sctp_sha1.h
@@ -0,0 +1,97 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#endif
+
+
+#ifndef __NETINET_SCTP_SHA1_H__
+#define __NETINET_SCTP_SHA1_H__
+
+#include <sys/types.h>
+#if defined(SCTP_USE_NSS_SHA1)
+#if defined(__Userspace_os_Darwin)
+/* The NSS sources require __APPLE__ to be defined.
+ * XXX: Remove this ugly hack once the platform defines have been cleaned up.
+ */
+#define __APPLE__
+#endif
+#include <pk11pub.h>
+#if defined(__Userspace_os_Darwin)
+#undef __APPLE__
+#endif
+#elif defined(SCTP_USE_OPENSSL_SHA1)
+#include <openssl/sha.h>
+#endif
+
+struct sctp_sha1_context {
+#if defined(SCTP_USE_NSS_SHA1)
+	struct PK11Context *pk11_ctx;
+#elif defined(SCTP_USE_OPENSSL_SHA1)
+	SHA_CTX sha_ctx;
+#else
+	unsigned int A;
+	unsigned int B;
+	unsigned int C;
+	unsigned int D;
+	unsigned int E;
+	unsigned int H0;
+	unsigned int H1;
+	unsigned int H2;
+	unsigned int H3;
+	unsigned int H4;
+	unsigned int words[80];
+	unsigned int TEMP;
+	/* block I am collecting to process */
+	char sha_block[64];
+	/* collected so far */
+	int how_many_in_block;
+	unsigned int running_total;
+#endif
+};
+
+#if (defined(__APPLE__) && defined(KERNEL))
+#ifndef _KERNEL
+#define _KERNEL
+#endif
+#endif
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+void sctp_sha1_init(struct sctp_sha1_context *);
+void sctp_sha1_update(struct sctp_sha1_context *, const unsigned char *, unsigned int);
+void sctp_sha1_final(unsigned char *, struct sctp_sha1_context *);
+
+#endif
+#endif
diff --git a/usrsctplib/netinet/sctp_ss_functions.c b/usrsctplib/netinet/sctp_ss_functions.c
new file mode 100755
index 0000000..ded79f6
--- /dev/null
+++ b/usrsctplib/netinet/sctp_ss_functions.c
@@ -0,0 +1,1078 @@
+/*-
+ * Copyright (c) 2010-2012, by Michael Tuexen. All rights reserved.
+ * Copyright (c) 2010-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2010-2012, by Robin Seggelmann. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_ss_functions.c 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#include <netinet/sctp_pcb.h>
+#if defined(__Userspace__)
+#include <netinet/sctp_os_userspace.h>
+#endif
+
+/*
+ * Default simple round-robin algorithm.
+ * Just interates the streams in the order they appear.
+ */
+
+static void
+sctp_ss_default_add(struct sctp_tcb *, struct sctp_association *,
+                    struct sctp_stream_out *,
+                    struct sctp_stream_queue_pending *, int);
+
+static void
+sctp_ss_default_remove(struct sctp_tcb *, struct sctp_association *,
+                       struct sctp_stream_out *,
+                       struct sctp_stream_queue_pending *, int);
+
+static void
+sctp_ss_default_init(struct sctp_tcb *stcb, struct sctp_association *asoc,
+                     int holds_lock)
+{
+	uint16_t i;
+
+	asoc->ss_data.locked_on_sending = NULL;
+	asoc->ss_data.last_out_stream = NULL;
+	TAILQ_INIT(&asoc->ss_data.out.wheel);
+	/*
+	 * If there is data in the stream queues already,
+	 * the scheduler of an existing association has
+	 * been changed. We need to add all stream queues
+	 * to the wheel.
+	 */
+	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+		stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, &stcb->asoc,
+		                                              &stcb->asoc.strmout[i],
+		                                              NULL, holds_lock);
+	}
+	return;
+}
+
+static void
+sctp_ss_default_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
+                      int clear_values SCTP_UNUSED, int holds_lock)
+{
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_LOCK(stcb);
+	}
+	while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
+		struct sctp_stream_out *strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+		TAILQ_REMOVE(&asoc->ss_data.out.wheel, TAILQ_FIRST(&asoc->ss_data.out.wheel), ss_params.rr.next_spoke);
+		strq->ss_params.rr.next_spoke.tqe_next = NULL;
+		strq->ss_params.rr.next_spoke.tqe_prev = NULL;
+	}
+	asoc->ss_data.last_out_stream = NULL;
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_UNLOCK(stcb);
+	}
+	return;
+}
+
+static void
+sctp_ss_default_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
+{
+	if (with_strq != NULL) {
+		if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
+			stcb->asoc.ss_data.locked_on_sending = strq;
+		}
+		if (stcb->asoc.ss_data.last_out_stream == with_strq) {
+			stcb->asoc.ss_data.last_out_stream = strq;
+		}
+	}
+	strq->ss_params.rr.next_spoke.tqe_next = NULL;
+	strq->ss_params.rr.next_spoke.tqe_prev = NULL;
+	return;
+}
+
+static void
+sctp_ss_default_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
+                    struct sctp_stream_out *strq,
+                    struct sctp_stream_queue_pending *sp SCTP_UNUSED, int holds_lock)
+{
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_LOCK(stcb);
+	}
+	/* Add to wheel if not already on it and stream queue not empty */
+	if (!TAILQ_EMPTY(&strq->outqueue) &&
+	    (strq->ss_params.rr.next_spoke.tqe_next == NULL) &&
+	    (strq->ss_params.rr.next_spoke.tqe_prev == NULL)) {
+		TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel,
+		                  strq, ss_params.rr.next_spoke);
+	}
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_UNLOCK(stcb);
+	}
+	return;
+}
+
+static int
+sctp_ss_default_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
+{
+	if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
+		return (1);
+	} else {
+		return (0);
+	}
+}
+
+static void
+sctp_ss_default_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
+                       struct sctp_stream_out *strq,
+                       struct sctp_stream_queue_pending *sp SCTP_UNUSED, int holds_lock)
+{
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_LOCK(stcb);
+	}
+	/* Remove from wheel if stream queue is empty and actually is on the wheel */
+	if (TAILQ_EMPTY(&strq->outqueue) &&
+	    (strq->ss_params.rr.next_spoke.tqe_next != NULL ||
+	    strq->ss_params.rr.next_spoke.tqe_prev != NULL)) {
+		if (asoc->ss_data.last_out_stream == strq) {
+			asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream,
+			                                   sctpwheel_listhead,
+			                                   ss_params.rr.next_spoke);
+			if (asoc->ss_data.last_out_stream == NULL) {
+				asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
+				                                   sctpwheel_listhead);
+			}
+			if (asoc->ss_data.last_out_stream == strq) {
+				asoc->ss_data.last_out_stream = NULL;
+			}
+		}
+		TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.rr.next_spoke);
+		strq->ss_params.rr.next_spoke.tqe_next = NULL;
+		strq->ss_params.rr.next_spoke.tqe_prev = NULL;
+	}
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_UNLOCK(stcb);
+	}
+	return;
+}
+
+
+static struct sctp_stream_out *
+sctp_ss_default_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
+                       struct sctp_association *asoc)
+{
+	struct sctp_stream_out *strq, *strqt;
+
+	if (asoc->ss_data.locked_on_sending) {
+		return (asoc->ss_data.locked_on_sending);
+	}
+	strqt = asoc->ss_data.last_out_stream;
+default_again:
+	/* Find the next stream to use */
+	if (strqt == NULL) {
+		strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+	} else {
+		strq = TAILQ_NEXT(strqt, ss_params.rr.next_spoke);
+		if (strq == NULL) {
+			strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+		}
+	}
+
+	/* If CMT is off, we must validate that
+	 * the stream in question has the first
+	 * item pointed towards are network destination
+	 * requested by the caller. Note that if we
+	 * turn out to be locked to a stream (assigning
+	 * TSN's then we must stop, since we cannot
+	 * look for another stream with data to send
+	 * to that destination). In CMT's case, by
+	 * skipping this check, we will send one
+	 * data packet towards the requested net.
+	 */
+	if (net != NULL && strq != NULL &&
+	    SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
+		if (TAILQ_FIRST(&strq->outqueue) &&
+		    TAILQ_FIRST(&strq->outqueue)->net != NULL &&
+		    TAILQ_FIRST(&strq->outqueue)->net != net) {
+			if (strq == asoc->ss_data.last_out_stream) {
+				return (NULL);
+			} else {
+				strqt = strq;
+				goto default_again;
+			}
+		}
+	}
+	return (strq);
+}
+
+static void
+sctp_ss_default_scheduled(struct sctp_tcb *stcb,
+                          struct sctp_nets *net SCTP_UNUSED,
+                          struct sctp_association *asoc,
+                          struct sctp_stream_out *strq,
+                          int moved_how_much SCTP_UNUSED)
+{
+	struct sctp_stream_queue_pending *sp;
+
+	asoc->ss_data.last_out_stream = strq;
+	if (stcb->asoc.idata_supported == 0) {
+		sp = TAILQ_FIRST(&strq->outqueue);
+		if ((sp != NULL) && (sp->some_taken == 1)) {
+			stcb->asoc.ss_data.locked_on_sending = strq;
+		} else {
+			stcb->asoc.ss_data.locked_on_sending = NULL;
+		}
+	} else {
+		stcb->asoc.ss_data.locked_on_sending = NULL;
+	}
+	return;
+}
+
+static void
+sctp_ss_default_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED,
+                            struct sctp_association *asoc SCTP_UNUSED)
+{
+	/* Nothing to be done here */
+	return;
+}
+
+static int
+sctp_ss_default_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
+                          struct sctp_stream_out *strq SCTP_UNUSED, uint16_t *value SCTP_UNUSED)
+{
+	/* Nothing to be done here */
+	return (-1);
+}
+
+static int
+sctp_ss_default_set_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
+                          struct sctp_stream_out *strq SCTP_UNUSED, uint16_t value SCTP_UNUSED)
+{
+	/* Nothing to be done here */
+	return (-1);
+}
+
+static int
+sctp_ss_default_is_user_msgs_incomplete(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED)
+{
+	return (0);
+}
+
+/*
+ * Real round-robin algorithm.
+ * Always interates the streams in ascending order.
+ */
+static void
+sctp_ss_rr_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
+               struct sctp_stream_out *strq,
+               struct sctp_stream_queue_pending *sp SCTP_UNUSED, int holds_lock)
+{
+	struct sctp_stream_out *strqt;
+
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_LOCK(stcb);
+	}
+	if (!TAILQ_EMPTY(&strq->outqueue) &&
+	    (strq->ss_params.rr.next_spoke.tqe_next == NULL) &&
+	    (strq->ss_params.rr.next_spoke.tqe_prev == NULL)) {
+		if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
+			TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.rr.next_spoke);
+		} else {
+			strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+			while (strqt != NULL && (strqt->sid < strq->sid)) {
+				strqt = TAILQ_NEXT(strqt, ss_params.rr.next_spoke);
+			}
+			if (strqt != NULL) {
+				TAILQ_INSERT_BEFORE(strqt, strq, ss_params.rr.next_spoke);
+			} else {
+				TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.rr.next_spoke);
+			}
+		}
+	}
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_UNLOCK(stcb);
+	}
+	return;
+}
+
+/*
+ * Real round-robin per packet algorithm.
+ * Always interates the streams in ascending order and
+ * only fills messages of the same stream in a packet.
+ */
+static struct sctp_stream_out *
+sctp_ss_rrp_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net SCTP_UNUSED,
+                   struct sctp_association *asoc)
+{
+	return (asoc->ss_data.last_out_stream);
+}
+
+static void
+sctp_ss_rrp_packet_done(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
+                        struct sctp_association *asoc)
+{
+	struct sctp_stream_out *strq, *strqt;
+
+	strqt = asoc->ss_data.last_out_stream;
+rrp_again:
+	/* Find the next stream to use */
+	if (strqt == NULL) {
+		strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+	} else {
+		strq = TAILQ_NEXT(strqt, ss_params.rr.next_spoke);
+		if (strq == NULL) {
+			strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+		}
+	}
+
+	/* If CMT is off, we must validate that
+	 * the stream in question has the first
+	 * item pointed towards are network destination
+	 * requested by the caller. Note that if we
+	 * turn out to be locked to a stream (assigning
+	 * TSN's then we must stop, since we cannot
+	 * look for another stream with data to send
+	 * to that destination). In CMT's case, by
+	 * skipping this check, we will send one
+	 * data packet towards the requested net.
+	 */
+	if (net != NULL && strq != NULL &&
+	    SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
+		if (TAILQ_FIRST(&strq->outqueue) &&
+		    TAILQ_FIRST(&strq->outqueue)->net != NULL &&
+		    TAILQ_FIRST(&strq->outqueue)->net != net) {
+			if (strq == asoc->ss_data.last_out_stream) {
+				strq = NULL;
+			} else {
+				strqt = strq;
+				goto rrp_again;
+			}
+		}
+	}
+	asoc->ss_data.last_out_stream = strq;
+	return;
+}
+
+
+/*
+ * Priority algorithm.
+ * Always prefers streams based on their priority id.
+ */
+static void
+sctp_ss_prio_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
+                   int clear_values, int holds_lock)
+{
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_LOCK(stcb);
+	}
+	while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
+		struct sctp_stream_out *strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+		if (clear_values) {
+			strq->ss_params.prio.priority = 0;
+		}
+		TAILQ_REMOVE(&asoc->ss_data.out.wheel, TAILQ_FIRST(&asoc->ss_data.out.wheel), ss_params.prio.next_spoke);
+		strq->ss_params.prio.next_spoke.tqe_next = NULL;
+		strq->ss_params.prio.next_spoke.tqe_prev = NULL;
+
+	}
+	asoc->ss_data.last_out_stream = NULL;
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_UNLOCK(stcb);
+	}
+	return;
+}
+
+static void
+sctp_ss_prio_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
+{
+	if (with_strq != NULL) {
+		if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
+			stcb->asoc.ss_data.locked_on_sending = strq;
+		}
+		if (stcb->asoc.ss_data.last_out_stream == with_strq) {
+			stcb->asoc.ss_data.last_out_stream = strq;
+		}
+	}
+	strq->ss_params.prio.next_spoke.tqe_next = NULL;
+	strq->ss_params.prio.next_spoke.tqe_prev = NULL;
+	if (with_strq != NULL) {
+		strq->ss_params.prio.priority = with_strq->ss_params.prio.priority;
+	} else {
+		strq->ss_params.prio.priority = 0;
+	}
+	return;
+}
+
+static void
+sctp_ss_prio_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
+                 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED,
+                 int holds_lock)
+{
+	struct sctp_stream_out *strqt;
+
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_LOCK(stcb);
+	}
+	/* Add to wheel if not already on it and stream queue not empty */
+	if (!TAILQ_EMPTY(&strq->outqueue) &&
+	    (strq->ss_params.prio.next_spoke.tqe_next == NULL) &&
+	    (strq->ss_params.prio.next_spoke.tqe_prev == NULL)) {
+		if (TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
+			TAILQ_INSERT_HEAD(&asoc->ss_data.out.wheel, strq, ss_params.prio.next_spoke);
+		} else {
+			strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+			while (strqt != NULL && strqt->ss_params.prio.priority < strq->ss_params.prio.priority) {
+				strqt = TAILQ_NEXT(strqt, ss_params.prio.next_spoke);
+			}
+			if (strqt != NULL) {
+				TAILQ_INSERT_BEFORE(strqt, strq, ss_params.prio.next_spoke);
+			} else {
+				TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.prio.next_spoke);
+			}
+		}
+	}
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_UNLOCK(stcb);
+	}
+	return;
+}
+
+static void
+sctp_ss_prio_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
+                    struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED,
+                    int holds_lock)
+{
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_LOCK(stcb);
+	}
+	/* Remove from wheel if stream queue is empty and actually is on the wheel */
+	if (TAILQ_EMPTY(&strq->outqueue) &&
+	    (strq->ss_params.prio.next_spoke.tqe_next != NULL ||
+	    strq->ss_params.prio.next_spoke.tqe_prev != NULL)) {
+		if (asoc->ss_data.last_out_stream == strq) {
+			asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream, sctpwheel_listhead,
+			                                   ss_params.prio.next_spoke);
+			if (asoc->ss_data.last_out_stream == NULL) {
+				asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
+				                                   sctpwheel_listhead);
+			}
+			if (asoc->ss_data.last_out_stream == strq) {
+				asoc->ss_data.last_out_stream = NULL;
+			}
+		}
+		TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.prio.next_spoke);
+		strq->ss_params.prio.next_spoke.tqe_next = NULL;
+		strq->ss_params.prio.next_spoke.tqe_prev = NULL;
+	}
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_UNLOCK(stcb);
+	}
+	return;
+}
+
+static struct sctp_stream_out*
+sctp_ss_prio_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
+                    struct sctp_association *asoc)
+{
+	struct sctp_stream_out *strq, *strqt, *strqn;
+
+	strqt = asoc->ss_data.last_out_stream;
+prio_again:
+	/* Find the next stream to use */
+	if (strqt == NULL) {
+		strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+	} else {
+		strqn = TAILQ_NEXT(strqt, ss_params.prio.next_spoke);
+		if (strqn != NULL &&
+		    strqn->ss_params.prio.priority == strqt->ss_params.prio.priority) {
+			strq = strqn;
+		} else {
+			strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+		}
+	}
+
+	/* If CMT is off, we must validate that
+	 * the stream in question has the first
+	 * item pointed towards are network destination
+	 * requested by the caller. Note that if we
+	 * turn out to be locked to a stream (assigning
+	 * TSN's then we must stop, since we cannot
+	 * look for another stream with data to send
+	 * to that destination). In CMT's case, by
+	 * skipping this check, we will send one
+	 * data packet towards the requested net.
+	 */
+	if (net != NULL && strq != NULL &&
+	    SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
+		if (TAILQ_FIRST(&strq->outqueue) &&
+		    TAILQ_FIRST(&strq->outqueue)->net != NULL &&
+		    TAILQ_FIRST(&strq->outqueue)->net != net) {
+			if (strq == asoc->ss_data.last_out_stream) {
+				return (NULL);
+			} else {
+				strqt = strq;
+				goto prio_again;
+			}
+		}
+	}
+	return (strq);
+}
+
+static int
+sctp_ss_prio_get_value(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc SCTP_UNUSED,
+                       struct sctp_stream_out *strq, uint16_t *value)
+{
+	if (strq == NULL) {
+		return (-1);
+	}
+	*value = strq->ss_params.prio.priority;
+	return (1);
+}
+
+static int
+sctp_ss_prio_set_value(struct sctp_tcb *stcb, struct sctp_association *asoc,
+                       struct sctp_stream_out *strq, uint16_t value)
+{
+	if (strq == NULL) {
+		return (-1);
+	}
+	strq->ss_params.prio.priority = value;
+	sctp_ss_prio_remove(stcb, asoc, strq, NULL, 1);
+	sctp_ss_prio_add(stcb, asoc, strq, NULL, 1);
+	return (1);
+}
+
+/*
+ * Fair bandwidth algorithm.
+ * Maintains an equal troughput per stream.
+ */
+static void
+sctp_ss_fb_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
+                   int clear_values, int holds_lock)
+{
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_LOCK(stcb);
+	}
+	while (!TAILQ_EMPTY(&asoc->ss_data.out.wheel)) {
+		struct sctp_stream_out *strq = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+		if (clear_values) {
+			strq->ss_params.fb.rounds = -1;
+		}
+		TAILQ_REMOVE(&asoc->ss_data.out.wheel, TAILQ_FIRST(&asoc->ss_data.out.wheel), ss_params.fb.next_spoke);
+		strq->ss_params.fb.next_spoke.tqe_next = NULL;
+		strq->ss_params.fb.next_spoke.tqe_prev = NULL;
+	}
+	asoc->ss_data.last_out_stream = NULL;
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_UNLOCK(stcb);
+	}
+	return;
+}
+
+static void
+sctp_ss_fb_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
+{
+	if (with_strq != NULL) {
+		if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
+			stcb->asoc.ss_data.locked_on_sending = strq;
+		}
+		if (stcb->asoc.ss_data.last_out_stream == with_strq) {
+			stcb->asoc.ss_data.last_out_stream = strq;
+		}
+	}
+	strq->ss_params.fb.next_spoke.tqe_next = NULL;
+	strq->ss_params.fb.next_spoke.tqe_prev = NULL;
+	if (with_strq != NULL) {
+		strq->ss_params.fb.rounds = with_strq->ss_params.fb.rounds;
+	} else {
+		strq->ss_params.fb.rounds = -1;
+	}
+	return;
+}
+
+static void
+sctp_ss_fb_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
+               struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED,
+               int holds_lock)
+{
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_LOCK(stcb);
+	}
+	if (!TAILQ_EMPTY(&strq->outqueue) &&
+	    (strq->ss_params.fb.next_spoke.tqe_next == NULL) &&
+	    (strq->ss_params.fb.next_spoke.tqe_prev == NULL)) {
+		if (strq->ss_params.fb.rounds < 0)
+			strq->ss_params.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length;
+		TAILQ_INSERT_TAIL(&asoc->ss_data.out.wheel, strq, ss_params.fb.next_spoke);
+	}
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_UNLOCK(stcb);
+	}
+	return;
+}
+
+static void
+sctp_ss_fb_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
+                  struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp SCTP_UNUSED,
+                  int holds_lock)
+{
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_LOCK(stcb);
+	}
+	/* Remove from wheel if stream queue is empty and actually is on the wheel */
+	if (TAILQ_EMPTY(&strq->outqueue) &&
+	    (strq->ss_params.fb.next_spoke.tqe_next != NULL ||
+	    strq->ss_params.fb.next_spoke.tqe_prev != NULL)) {
+		if (asoc->ss_data.last_out_stream == strq) {
+			asoc->ss_data.last_out_stream = TAILQ_PREV(asoc->ss_data.last_out_stream, sctpwheel_listhead,
+			                                   ss_params.fb.next_spoke);
+			if (asoc->ss_data.last_out_stream == NULL) {
+				asoc->ss_data.last_out_stream = TAILQ_LAST(&asoc->ss_data.out.wheel,
+				                                   sctpwheel_listhead);
+			}
+			if (asoc->ss_data.last_out_stream == strq) {
+				asoc->ss_data.last_out_stream = NULL;
+			}
+		}
+		TAILQ_REMOVE(&asoc->ss_data.out.wheel, strq, ss_params.fb.next_spoke);
+		strq->ss_params.fb.next_spoke.tqe_next = NULL;
+		strq->ss_params.fb.next_spoke.tqe_prev = NULL;
+	}
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_UNLOCK(stcb);
+	}
+	return;
+}
+
+static struct sctp_stream_out*
+sctp_ss_fb_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
+                  struct sctp_association *asoc)
+{
+	struct sctp_stream_out *strq = NULL, *strqt;
+
+	if (asoc->ss_data.last_out_stream == NULL ||
+	    TAILQ_FIRST(&asoc->ss_data.out.wheel) == TAILQ_LAST(&asoc->ss_data.out.wheel, sctpwheel_listhead)) {
+		strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+	} else {
+		strqt = TAILQ_NEXT(asoc->ss_data.last_out_stream, ss_params.fb.next_spoke);
+	}
+	do {
+		if ((strqt != NULL) &&
+		    ((SCTP_BASE_SYSCTL(sctp_cmt_on_off) > 0) ||
+		     (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0 &&
+		      (net == NULL || (TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net == NULL) ||
+		       (net != NULL && TAILQ_FIRST(&strqt->outqueue) && TAILQ_FIRST(&strqt->outqueue)->net != NULL &&
+		        TAILQ_FIRST(&strqt->outqueue)->net == net))))) {
+			if ((strqt->ss_params.fb.rounds >= 0) && (strq == NULL ||
+				strqt->ss_params.fb.rounds < strq->ss_params.fb.rounds)) {
+				strq = strqt;
+			}
+		}
+		if (strqt != NULL) {
+			strqt = TAILQ_NEXT(strqt, ss_params.fb.next_spoke);
+		} else {
+			strqt = TAILQ_FIRST(&asoc->ss_data.out.wheel);
+		}
+	} while (strqt != strq);
+	return (strq);
+}
+
+static void
+sctp_ss_fb_scheduled(struct sctp_tcb *stcb, struct sctp_nets *net SCTP_UNUSED,
+                     struct sctp_association *asoc, struct sctp_stream_out *strq,
+                     int moved_how_much SCTP_UNUSED)
+{
+	struct sctp_stream_queue_pending *sp;
+	struct sctp_stream_out *strqt;
+	int subtract;
+
+	if (stcb->asoc.idata_supported == 0) {
+		sp = TAILQ_FIRST(&strq->outqueue);
+		if ((sp != NULL) && (sp->some_taken == 1)) {
+			stcb->asoc.ss_data.locked_on_sending = strq;
+		} else {
+			stcb->asoc.ss_data.locked_on_sending = NULL;
+		}
+	} else {
+		stcb->asoc.ss_data.locked_on_sending = NULL;
+	}
+	subtract = strq->ss_params.fb.rounds;
+	TAILQ_FOREACH(strqt, &asoc->ss_data.out.wheel, ss_params.fb.next_spoke) {
+		strqt->ss_params.fb.rounds -= subtract;
+		if (strqt->ss_params.fb.rounds < 0)
+			strqt->ss_params.fb.rounds = 0;
+	}
+	if (TAILQ_FIRST(&strq->outqueue)) {
+		strq->ss_params.fb.rounds = TAILQ_FIRST(&strq->outqueue)->length;
+	} else {
+		strq->ss_params.fb.rounds = -1;
+	}
+	asoc->ss_data.last_out_stream = strq;
+	return;
+}
+
+/*
+ * First-come, first-serve algorithm.
+ * Maintains the order provided by the application.
+ */
+static void
+sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
+                 struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp,
+                 int holds_lock);
+
+static void
+sctp_ss_fcfs_init(struct sctp_tcb *stcb, struct sctp_association *asoc,
+                  int holds_lock)
+{
+	uint32_t x, n = 0, add_more = 1;
+	struct sctp_stream_queue_pending *sp;
+	uint16_t i;
+
+	TAILQ_INIT(&asoc->ss_data.out.list);
+	/*
+	 * If there is data in the stream queues already,
+	 * the scheduler of an existing association has
+	 * been changed. We can only cycle through the
+	 * stream queues and add everything to the FCFS
+	 * queue.
+	 */
+	while (add_more) {
+		add_more = 0;
+		for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+			sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue);
+			x = 0;
+			/* Find n. message in current stream queue */
+			while (sp != NULL && x < n) {
+				sp = TAILQ_NEXT(sp, next);
+				x++;
+			}
+			if (sp != NULL) {
+				sctp_ss_fcfs_add(stcb, &stcb->asoc, &stcb->asoc.strmout[i], sp, holds_lock);
+				add_more = 1;
+			}
+		}
+		n++;
+	}
+	return;
+}
+
+static void
+sctp_ss_fcfs_clear(struct sctp_tcb *stcb, struct sctp_association *asoc,
+                   int clear_values, int holds_lock)
+{
+	if (clear_values) {
+		if (holds_lock == 0) {
+			SCTP_TCB_SEND_LOCK(stcb);
+		}
+		while (!TAILQ_EMPTY(&asoc->ss_data.out.list)) {
+			TAILQ_REMOVE(&asoc->ss_data.out.list, TAILQ_FIRST(&asoc->ss_data.out.list), ss_next);
+		}
+		if (holds_lock == 0) {
+			SCTP_TCB_SEND_UNLOCK(stcb);
+		}
+	}
+	return;
+}
+
+static void
+sctp_ss_fcfs_init_stream(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq)
+{
+	if (with_strq != NULL) {
+		if (stcb->asoc.ss_data.locked_on_sending == with_strq) {
+			stcb->asoc.ss_data.locked_on_sending = strq;
+		}
+		if (stcb->asoc.ss_data.last_out_stream == with_strq) {
+			stcb->asoc.ss_data.last_out_stream = strq;
+		}
+	}
+	return;
+}
+
+static void
+sctp_ss_fcfs_add(struct sctp_tcb *stcb, struct sctp_association *asoc,
+                 struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp,
+                 int holds_lock)
+{
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_LOCK(stcb);
+	}
+	if (sp && (sp->ss_next.tqe_next == NULL) &&
+	    (sp->ss_next.tqe_prev == NULL)) {
+		TAILQ_INSERT_TAIL(&asoc->ss_data.out.list, sp, ss_next);
+	}
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_UNLOCK(stcb);
+	}
+	return;
+}
+
+static int
+sctp_ss_fcfs_is_empty(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_association *asoc)
+{
+	if (TAILQ_EMPTY(&asoc->ss_data.out.list)) {
+		return (1);
+	} else {
+		return (0);
+	}
+}
+
+static void
+sctp_ss_fcfs_remove(struct sctp_tcb *stcb, struct sctp_association *asoc,
+                    struct sctp_stream_out *strq SCTP_UNUSED, struct sctp_stream_queue_pending *sp,
+                    int holds_lock)
+{
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_LOCK(stcb);
+	}
+	if (sp &&
+	    ((sp->ss_next.tqe_next != NULL) ||
+	     (sp->ss_next.tqe_prev != NULL))) {
+		TAILQ_REMOVE(&asoc->ss_data.out.list, sp, ss_next);
+	}
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_UNLOCK(stcb);
+	}
+	return;
+}
+
+
+static struct sctp_stream_out *
+sctp_ss_fcfs_select(struct sctp_tcb *stcb SCTP_UNUSED, struct sctp_nets *net,
+                    struct sctp_association *asoc)
+{
+	struct sctp_stream_out *strq;
+	struct sctp_stream_queue_pending *sp;
+
+	sp = TAILQ_FIRST(&asoc->ss_data.out.list);
+default_again:
+	if (sp != NULL) {
+		strq = &asoc->strmout[sp->sid];
+	} else {
+		strq = NULL;
+	}
+
+	/*
+	 * If CMT is off, we must validate that
+	 * the stream in question has the first
+	 * item pointed towards are network destination
+	 * requested by the caller. Note that if we
+	 * turn out to be locked to a stream (assigning
+	 * TSN's then we must stop, since we cannot
+	 * look for another stream with data to send
+	 * to that destination). In CMT's case, by
+	 * skipping this check, we will send one
+	 * data packet towards the requested net.
+	 */
+	if (net != NULL && strq != NULL &&
+	    SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) {
+		if (TAILQ_FIRST(&strq->outqueue) &&
+		    TAILQ_FIRST(&strq->outqueue)->net != NULL &&
+		    TAILQ_FIRST(&strq->outqueue)->net != net) {
+			sp = TAILQ_NEXT(sp, ss_next);
+			goto default_again;
+		}
+	}
+	return (strq);
+}
+
+const struct sctp_ss_functions sctp_ss_functions[] = {
+/* SCTP_SS_DEFAULT */
+{
+#if defined(__Windows__) || defined(__Userspace_os_Windows)
+	sctp_ss_default_init,
+	sctp_ss_default_clear,
+	sctp_ss_default_init_stream,
+	sctp_ss_default_add,
+	sctp_ss_default_is_empty,
+	sctp_ss_default_remove,
+	sctp_ss_default_select,
+	sctp_ss_default_scheduled,
+	sctp_ss_default_packet_done,
+	sctp_ss_default_get_value,
+	sctp_ss_default_set_value,
+	sctp_ss_default_is_user_msgs_incomplete
+#else
+	.sctp_ss_init = sctp_ss_default_init,
+	.sctp_ss_clear = sctp_ss_default_clear,
+	.sctp_ss_init_stream = sctp_ss_default_init_stream,
+	.sctp_ss_add_to_stream = sctp_ss_default_add,
+	.sctp_ss_is_empty = sctp_ss_default_is_empty,
+	.sctp_ss_remove_from_stream = sctp_ss_default_remove,
+	.sctp_ss_select_stream = sctp_ss_default_select,
+	.sctp_ss_scheduled = sctp_ss_default_scheduled,
+	.sctp_ss_packet_done = sctp_ss_default_packet_done,
+	.sctp_ss_get_value = sctp_ss_default_get_value,
+	.sctp_ss_set_value = sctp_ss_default_set_value,
+	.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
+#endif
+},
+/* SCTP_SS_ROUND_ROBIN */
+{
+#if defined(__Windows__) || defined(__Userspace_os_Windows)
+	sctp_ss_default_init,
+	sctp_ss_default_clear,
+	sctp_ss_default_init_stream,
+	sctp_ss_rr_add,
+	sctp_ss_default_is_empty,
+	sctp_ss_default_remove,
+	sctp_ss_default_select,
+	sctp_ss_default_scheduled,
+	sctp_ss_default_packet_done,
+	sctp_ss_default_get_value,
+	sctp_ss_default_set_value,
+	sctp_ss_default_is_user_msgs_incomplete
+#else
+	.sctp_ss_init = sctp_ss_default_init,
+	.sctp_ss_clear = sctp_ss_default_clear,
+	.sctp_ss_init_stream = sctp_ss_default_init_stream,
+	.sctp_ss_add_to_stream = sctp_ss_rr_add,
+	.sctp_ss_is_empty = sctp_ss_default_is_empty,
+	.sctp_ss_remove_from_stream = sctp_ss_default_remove,
+	.sctp_ss_select_stream = sctp_ss_default_select,
+	.sctp_ss_scheduled = sctp_ss_default_scheduled,
+	.sctp_ss_packet_done = sctp_ss_default_packet_done,
+	.sctp_ss_get_value = sctp_ss_default_get_value,
+	.sctp_ss_set_value = sctp_ss_default_set_value,
+	.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
+#endif
+},
+/* SCTP_SS_ROUND_ROBIN_PACKET */
+{
+#if defined(__Windows__) || defined(__Userspace_os_Windows)
+	sctp_ss_default_init,
+	sctp_ss_default_clear,
+	sctp_ss_default_init_stream,
+	sctp_ss_rr_add,
+	sctp_ss_default_is_empty,
+	sctp_ss_default_remove,
+	sctp_ss_rrp_select,
+	sctp_ss_default_scheduled,
+	sctp_ss_rrp_packet_done,
+	sctp_ss_default_get_value,
+	sctp_ss_default_set_value,
+	sctp_ss_default_is_user_msgs_incomplete
+#else
+	.sctp_ss_init = sctp_ss_default_init,
+	.sctp_ss_clear = sctp_ss_default_clear,
+	.sctp_ss_init_stream = sctp_ss_default_init_stream,
+	.sctp_ss_add_to_stream = sctp_ss_rr_add,
+	.sctp_ss_is_empty = sctp_ss_default_is_empty,
+	.sctp_ss_remove_from_stream = sctp_ss_default_remove,
+	.sctp_ss_select_stream = sctp_ss_rrp_select,
+	.sctp_ss_scheduled = sctp_ss_default_scheduled,
+	.sctp_ss_packet_done = sctp_ss_rrp_packet_done,
+	.sctp_ss_get_value = sctp_ss_default_get_value,
+	.sctp_ss_set_value = sctp_ss_default_set_value,
+	.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
+#endif
+},
+/* SCTP_SS_PRIORITY */
+{
+#if defined(__Windows__) || defined(__Userspace_os_Windows)
+	sctp_ss_default_init,
+	sctp_ss_prio_clear,
+	sctp_ss_prio_init_stream,
+	sctp_ss_prio_add,
+	sctp_ss_default_is_empty,
+	sctp_ss_prio_remove,
+	sctp_ss_prio_select,
+	sctp_ss_default_scheduled,
+	sctp_ss_default_packet_done,
+	sctp_ss_prio_get_value,
+	sctp_ss_prio_set_value,
+	sctp_ss_default_is_user_msgs_incomplete
+#else
+	.sctp_ss_init = sctp_ss_default_init,
+	.sctp_ss_clear = sctp_ss_prio_clear,
+	.sctp_ss_init_stream = sctp_ss_prio_init_stream,
+	.sctp_ss_add_to_stream = sctp_ss_prio_add,
+	.sctp_ss_is_empty = sctp_ss_default_is_empty,
+	.sctp_ss_remove_from_stream = sctp_ss_prio_remove,
+	.sctp_ss_select_stream = sctp_ss_prio_select,
+	.sctp_ss_scheduled = sctp_ss_default_scheduled,
+	.sctp_ss_packet_done = sctp_ss_default_packet_done,
+	.sctp_ss_get_value = sctp_ss_prio_get_value,
+	.sctp_ss_set_value = sctp_ss_prio_set_value,
+	.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
+#endif
+},
+/* SCTP_SS_FAIR_BANDWITH */
+{
+#if defined(__Windows__) || defined(__Userspace_os_Windows)
+	sctp_ss_default_init,
+	sctp_ss_fb_clear,
+	sctp_ss_fb_init_stream,
+	sctp_ss_fb_add,
+	sctp_ss_default_is_empty,
+	sctp_ss_fb_remove,
+	sctp_ss_fb_select,
+	sctp_ss_fb_scheduled,
+	sctp_ss_default_packet_done,
+	sctp_ss_default_get_value,
+	sctp_ss_default_set_value,
+	sctp_ss_default_is_user_msgs_incomplete
+#else
+	.sctp_ss_init = sctp_ss_default_init,
+	.sctp_ss_clear = sctp_ss_fb_clear,
+	.sctp_ss_init_stream = sctp_ss_fb_init_stream,
+	.sctp_ss_add_to_stream = sctp_ss_fb_add,
+	.sctp_ss_is_empty = sctp_ss_default_is_empty,
+	.sctp_ss_remove_from_stream = sctp_ss_fb_remove,
+	.sctp_ss_select_stream = sctp_ss_fb_select,
+	.sctp_ss_scheduled = sctp_ss_fb_scheduled,
+	.sctp_ss_packet_done = sctp_ss_default_packet_done,
+	.sctp_ss_get_value = sctp_ss_default_get_value,
+	.sctp_ss_set_value = sctp_ss_default_set_value,
+	.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
+#endif
+},
+/* SCTP_SS_FIRST_COME */
+{
+#if defined(__Windows__) || defined(__Userspace_os_Windows)
+	sctp_ss_fcfs_init,
+	sctp_ss_fcfs_clear,
+	sctp_ss_fcfs_init_stream,
+	sctp_ss_fcfs_add,
+	sctp_ss_fcfs_is_empty,
+	sctp_ss_fcfs_remove,
+	sctp_ss_fcfs_select,
+	sctp_ss_default_scheduled,
+	sctp_ss_default_packet_done,
+	sctp_ss_default_get_value,
+	sctp_ss_default_set_value,
+	sctp_ss_default_is_user_msgs_incomplete
+#else
+	.sctp_ss_init = sctp_ss_fcfs_init,
+	.sctp_ss_clear = sctp_ss_fcfs_clear,
+	.sctp_ss_init_stream = sctp_ss_fcfs_init_stream,
+	.sctp_ss_add_to_stream = sctp_ss_fcfs_add,
+	.sctp_ss_is_empty = sctp_ss_fcfs_is_empty,
+	.sctp_ss_remove_from_stream = sctp_ss_fcfs_remove,
+	.sctp_ss_select_stream = sctp_ss_fcfs_select,
+	.sctp_ss_scheduled = sctp_ss_default_scheduled,
+	.sctp_ss_packet_done = sctp_ss_default_packet_done,
+	.sctp_ss_get_value = sctp_ss_default_get_value,
+	.sctp_ss_set_value = sctp_ss_default_set_value,
+	.sctp_ss_is_user_msgs_incomplete = sctp_ss_default_is_user_msgs_incomplete
+#endif
+}
+};
diff --git a/usrsctplib/netinet/sctp_structs.h b/usrsctplib/netinet/sctp_structs.h
new file mode 100755
index 0000000..b5f3ef2
--- /dev/null
+++ b/usrsctplib/netinet/sctp_structs.h
@@ -0,0 +1,1304 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_structs.h 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_STRUCTS_H_
+#define _NETINET_SCTP_STRUCTS_H_
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_auth.h>
+
+struct sctp_timer {
+	sctp_os_timer_t timer;
+
+	int type;
+	/*
+	 * Depending on the timer type these will be setup and cast with the
+	 * appropriate entity.
+	 */
+	void *ep;
+	void *tcb;
+	void *net;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+	void *vnet;
+#endif
+
+	/* for sanity checking */
+	void *self;
+	uint32_t ticks;
+	uint32_t stopped_from;
+};
+
+
+struct sctp_foo_stuff {
+	struct sctp_inpcb *inp;
+	uint32_t        lineno;
+	uint32_t        ticks;
+	int             updown;
+};
+
+
+/*
+ * This is the information we track on each interface that we know about from
+ * the distant end.
+ */
+TAILQ_HEAD(sctpnetlisthead, sctp_nets);
+
+struct sctp_stream_reset_list {
+	TAILQ_ENTRY(sctp_stream_reset_list) next_resp;
+	uint32_t seq;
+	uint32_t tsn;
+	uint32_t number_entries;
+	uint16_t list_of_streams[];
+};
+
+TAILQ_HEAD(sctp_resethead, sctp_stream_reset_list);
+
+/*
+ * Users of the iterator need to malloc a iterator with a call to
+ * sctp_initiate_iterator(inp_func, assoc_func, inp_func,  pcb_flags, pcb_features,
+ *     asoc_state, void-ptr-arg, uint32-arg, end_func, inp);
+ *
+ * Use the following two defines if you don't care what pcb flags are on the EP
+ * and/or you don't care what state the association is in.
+ *
+ * Note that if you specify an INP as the last argument then ONLY each
+ * association of that single INP will be executed upon. Note that the pcb
+ * flags STILL apply so if the inp you specify has different pcb_flags then
+ * what you put in pcb_flags nothing will happen. use SCTP_PCB_ANY_FLAGS to
+ * assure the inp you specify gets treated.
+ */
+#define SCTP_PCB_ANY_FLAGS	0x00000000
+#define SCTP_PCB_ANY_FEATURES	0x00000000
+#define SCTP_ASOC_ANY_STATE	0x00000000
+
+typedef void (*asoc_func) (struct sctp_inpcb *, struct sctp_tcb *, void *ptr,
+         uint32_t val);
+typedef int (*inp_func) (struct sctp_inpcb *, void *ptr, uint32_t val);
+typedef void (*end_func) (void *ptr, uint32_t val);
+
+#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP)
+/* whats on the mcore control struct */
+struct sctp_mcore_queue {
+	TAILQ_ENTRY(sctp_mcore_queue) next;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+	struct vnet *vn;
+#endif
+	struct mbuf *m;
+	int off;
+	int v6;
+};
+
+TAILQ_HEAD(sctp_mcore_qhead, sctp_mcore_queue);
+
+struct sctp_mcore_ctrl {
+	SCTP_PROCESS_STRUCT thread_proc;
+	struct sctp_mcore_qhead que;
+	struct mtx core_mtx;
+	struct mtx que_mtx;
+	int running;
+	int cpuid;
+};
+
+
+#endif
+
+
+struct sctp_iterator {
+	TAILQ_ENTRY(sctp_iterator) sctp_nxt_itr;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+	struct vnet *vn;
+#endif
+	struct sctp_timer tmr;
+	struct sctp_inpcb *inp;		/* current endpoint */
+	struct sctp_tcb *stcb;		/* current* assoc */
+	struct sctp_inpcb *next_inp;    /* special hook to skip to */
+	asoc_func function_assoc;	/* per assoc function */
+	inp_func function_inp;		/* per endpoint function */
+	inp_func function_inp_end;	/* end INP function */
+	end_func function_atend;	/* iterator completion function */
+	void *pointer;			/* pointer for apply func to use */
+	uint32_t val;			/* value for apply func to use */
+	uint32_t pcb_flags;		/* endpoint flags being checked */
+	uint32_t pcb_features;		/* endpoint features being checked */
+	uint32_t asoc_state;		/* assoc state being checked */
+	uint32_t iterator_flags;
+	uint8_t  no_chunk_output;
+	uint8_t  done_current_ep;
+};
+/* iterator_flags values */
+#define SCTP_ITERATOR_DO_ALL_INP	0x00000001
+#define SCTP_ITERATOR_DO_SINGLE_INP	0x00000002
+
+
+TAILQ_HEAD(sctpiterators, sctp_iterator);
+
+struct sctp_copy_all {
+	struct sctp_inpcb *inp;	/* ep */
+	struct mbuf *m;
+	struct sctp_sndrcvinfo sndrcv;
+	int sndlen;
+	int cnt_sent;
+	int cnt_failed;
+};
+
+struct sctp_asconf_iterator {
+	struct sctpladdr list_of_work;
+	int cnt;
+};
+
+struct iterator_control {
+#if defined(__FreeBSD__)
+	struct mtx ipi_iterator_wq_mtx;
+	struct mtx it_mtx;
+#elif defined(__APPLE__)
+	lck_mtx_t *ipi_iterator_wq_mtx;
+	lck_mtx_t *it_mtx;
+#elif defined(SCTP_PROCESS_LEVEL_LOCKS)
+#if defined(__Userspace__)
+	userland_mutex_t ipi_iterator_wq_mtx;
+	userland_mutex_t it_mtx;
+	userland_cond_t iterator_wakeup;
+#else
+	pthread_mutex_t ipi_iterator_wq_mtx;
+	pthread_mutex_t it_mtx;
+	pthread_cond_t iterator_wakeup;
+#endif
+#elif defined(__Windows__)
+	struct spinlock it_lock;
+	struct spinlock ipi_iterator_wq_lock;
+	KEVENT iterator_wakeup[2];
+	PFILE_OBJECT iterator_thread_obj;
+#else
+	void *it_mtx;
+#endif
+#if !defined(__Windows__)
+#if !defined(__Userspace__)
+	SCTP_PROCESS_STRUCT thread_proc;
+#else
+	userland_thread_t thread_proc;
+#endif
+#endif
+	struct sctpiterators iteratorhead;
+	struct sctp_iterator *cur_it;
+	uint32_t iterator_running;
+	uint32_t iterator_flags;
+};
+#if !defined(__FreeBSD__)
+#define SCTP_ITERATOR_MUST_EXIT		0x00000001
+#define SCTP_ITERATOR_EXITED		0x00000002
+#endif
+#define SCTP_ITERATOR_STOP_CUR_IT	0x00000004
+#define SCTP_ITERATOR_STOP_CUR_INP	0x00000008
+
+struct sctp_net_route {
+	sctp_rtentry_t *ro_rt;
+#if defined(__FreeBSD__)
+#if __FreeBSD_version < 1100093
+#if __FreeBSD_version >= 800000
+	void *ro_lle;
+#endif
+#if __FreeBSD_version >= 900000
+	void *ro_ia;
+	int ro_flags;
+#endif
+#else
+#if __FreeBSD_version >= 1100116
+	struct llentry *ro_lle;
+#endif
+	char		*ro_prepend;
+	uint16_t	ro_plen;
+	uint16_t	ro_flags;
+	uint16_t	ro_mtu;
+	uint16_t	spare;
+#endif
+#endif
+#if defined(__APPLE__)
+#if !defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) && !defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION)
+	struct ifaddr *ro_srcia;
+#endif
+#if !defined(APPLE_LEOPARD)
+	uint32_t ro_flags;
+#endif
+#endif
+	union sctp_sockstore _l_addr;	/* remote peer addr */
+	struct sctp_ifa *_s_addr;	/* our selected src addr */
+};
+
+struct htcp {
+	uint16_t	alpha;		/* Fixed point arith, << 7 */
+	uint8_t		beta;           /* Fixed point arith, << 7 */
+	uint8_t		modeswitch;     /* Delay modeswitch until we had at least one congestion event */
+	uint32_t	last_cong;	/* Time since last congestion event end */
+	uint32_t	undo_last_cong;
+	uint16_t	bytes_acked;
+	uint32_t	bytecount;
+	uint32_t	minRTT;
+	uint32_t	maxRTT;
+
+	uint32_t	undo_maxRTT;
+	uint32_t	undo_old_maxB;
+
+	/* Bandwidth estimation */
+	uint32_t	minB;
+	uint32_t	maxB;
+	uint32_t	old_maxB;
+	uint32_t	Bi;
+	uint32_t	lasttime;
+};
+
+struct rtcc_cc {
+	struct timeval tls;   /* The time we started the sending  */
+	uint64_t lbw;         /* Our last estimated bw */
+	uint64_t lbw_rtt;     /* RTT at bw estimate */
+	uint64_t bw_bytes;    /* The total bytes since this sending began */
+	uint64_t bw_tot_time; /* The total time since sending began */
+	uint64_t new_tot_time;  /* temp holding the new value */
+	uint64_t bw_bytes_at_last_rttc; /* What bw_bytes was at last rtt calc */
+	uint32_t cwnd_at_bw_set; /* Cwnd at last bw saved - lbw */
+	uint32_t vol_reduce;  /* cnt of voluntary reductions */
+	uint16_t steady_step; /* The number required to be in steady state*/
+	uint16_t step_cnt;    /* The current number */
+	uint8_t  ret_from_eq;  /* When all things are equal what do I return 0/1 - 1 no cc advance */
+	uint8_t  use_dccc_ecn;  /* Flag to enable DCCC ECN */
+	uint8_t  tls_needs_set; /* Flag to indicate we need to set tls 0 or 1 means set at send 2 not */
+	uint8_t  last_step_state; /* Last state if steady state stepdown is on */
+	uint8_t  rtt_set_this_sack; /* Flag saying this sack had RTT calc on it */
+	uint8_t  last_inst_ind; /* Last saved inst indication */
+};
+
+
+struct sctp_nets {
+	TAILQ_ENTRY(sctp_nets) sctp_next;	/* next link */
+
+	/*
+	 * Things on the top half may be able to be split into a common
+	 * structure shared by all.
+	 */
+	struct sctp_timer pmtu_timer;
+	struct sctp_timer hb_timer;
+
+	/*
+	 * The following two in combination equate to a route entry for v6
+	 * or v4.
+	 */
+	struct sctp_net_route ro;
+
+	/* mtu discovered so far */
+	uint32_t mtu;
+	uint32_t ssthresh;	/* not sure about this one for split */
+	uint32_t last_cwr_tsn;
+	uint32_t cwr_window_tsn;
+	uint32_t ecn_ce_pkt_cnt;
+	uint32_t lost_cnt;
+	/* smoothed average things for RTT and RTO itself */
+	int lastsa;
+	int lastsv;
+	uint64_t rtt; /* last measured rtt value in us */
+	unsigned int RTO;
+
+	/* This is used for SHUTDOWN/SHUTDOWN-ACK/SEND or INIT timers */
+	struct sctp_timer rxt_timer;
+
+	/* last time in seconds I sent to it */
+	struct timeval last_sent_time;
+	union cc_control_data {
+		struct htcp htcp_ca; 	/* JRS - struct used in HTCP algorithm */
+		struct rtcc_cc rtcc;    /* rtcc module cc stuff  */
+	} cc_mod;
+	int ref_count;
+
+	/* Congestion stats per destination */
+	/*
+	 * flight size variables and such, sorry Vern, I could not avoid
+	 * this if I wanted performance :>
+	 */
+	uint32_t flight_size;
+	uint32_t cwnd;		/* actual cwnd */
+	uint32_t prev_cwnd;	/* cwnd before any processing */
+	uint32_t ecn_prev_cwnd;	/* ECN prev cwnd at first ecn_echo seen in new window */
+	uint32_t partial_bytes_acked;	/* in CA tracks when to incr a MTU */
+	/* tracking variables to avoid the aloc/free in sack processing */
+	unsigned int net_ack;
+	unsigned int net_ack2;
+
+	/*
+	 * JRS - 5/8/07 - Variable to track last time
+	 *  a destination was active for CMT PF
+	 */
+	uint32_t last_active;
+
+	/*
+	 * CMT variables (iyengar@cis.udel.edu)
+	 */
+	uint32_t this_sack_highest_newack;	/* tracks highest TSN newly
+						 * acked for a given dest in
+						 * the current SACK. Used in
+						 * SFR and HTNA algos */
+	uint32_t pseudo_cumack;	/* CMT CUC algorithm. Maintains next expected
+				 * pseudo-cumack for this destination */
+	uint32_t rtx_pseudo_cumack;	/* CMT CUC algorithm. Maintains next
+					 * expected pseudo-cumack for this
+					 * destination */
+
+	/* CMT fast recovery variables */
+	uint32_t fast_recovery_tsn;
+	uint32_t heartbeat_random1;
+	uint32_t heartbeat_random2;
+#ifdef INET6
+	uint32_t flowlabel;
+#endif
+	uint8_t dscp;
+
+	struct timeval start_time;      /* time when this net was created */
+	uint32_t marked_retrans;        /* number or DATA chunks marked for
+	                                   timer based retransmissions */
+	uint32_t marked_fastretrans;
+	uint32_t heart_beat_delay;      /* Heart Beat delay in ms */
+
+	/* if this guy is ok or not ... status */
+	uint16_t dest_state;
+	/* number of timeouts to consider the destination unreachable */
+	uint16_t failure_threshold;
+	/* number of timeouts to consider the destination potentially failed */
+	uint16_t pf_threshold;
+	/* error stats on the destination */
+	uint16_t error_count;
+	/* UDP port number in case of UDP tunneling */
+	uint16_t port;
+
+	uint8_t fast_retran_loss_recovery;
+	uint8_t will_exit_fast_recovery;
+	/* Flags that probably can be combined into dest_state */
+	uint8_t fast_retran_ip;	/* fast retransmit in progress */
+	uint8_t hb_responded;
+	uint8_t saw_newack;	/* CMT's SFR algorithm flag */
+	uint8_t src_addr_selected;	/* if we split we move */
+	uint8_t indx_of_eligible_next_to_use;
+	uint8_t addr_is_local;	/* its a local address (if known) could move
+				 * in split */
+
+	/*
+	 * CMT variables (iyengar@cis.udel.edu)
+	 */
+	uint8_t find_pseudo_cumack;	/* CMT CUC algorithm. Flag used to
+					 * find a new pseudocumack. This flag
+					 * is set after a new pseudo-cumack
+					 * has been received and indicates
+					 * that the sender should find the
+					 * next pseudo-cumack expected for
+					 * this destination */
+	uint8_t find_rtx_pseudo_cumack;	/* CMT CUCv2 algorithm. Flag used to
+					 * find a new rtx-pseudocumack. This
+					 * flag is set after a new
+					 * rtx-pseudo-cumack has been received
+					 * and indicates that the sender
+					 * should find the next
+					 * rtx-pseudo-cumack expected for this
+					 * destination */
+	uint8_t new_pseudo_cumack;	/* CMT CUC algorithm. Flag used to
+					 * indicate if a new pseudo-cumack or
+					 * rtx-pseudo-cumack has been received */
+	uint8_t window_probe;		/* Doing a window probe? */
+	uint8_t RTO_measured;		/* Have we done the first measure */
+	uint8_t last_hs_used;	/* index into the last HS table entry we used */
+	uint8_t lan_type;
+	uint8_t rto_needed;
+#if defined(__FreeBSD__)
+	uint32_t flowid;
+	uint8_t flowtype;
+#endif
+};
+
+
+struct sctp_data_chunkrec {
+	uint32_t tsn;		/* the TSN of this transmit */
+	uint32_t mid;		/* the message identifier of this transmit */
+	uint16_t sid;		/* the stream number of this guy */
+	uint32_t ppid;
+	uint32_t context;	/* from send */
+	uint32_t cwnd_at_send;
+	/*
+	 * part of the Highest sacked algorithm to be able to stroke counts
+	 * on ones that are FR'd.
+	 */
+	uint32_t fast_retran_tsn;	/* sending_seq at the time of FR */
+	struct timeval timetodrop;	/* time we drop it from queue */
+	uint32_t fsn;			/* Fragment Sequence Number */
+	uint8_t doing_fast_retransmit;
+	uint8_t rcv_flags;	/* flags pulled from data chunk on inbound for
+				 * outbound holds sending flags for PR-SCTP.
+				 */
+	uint8_t state_flags;
+	uint8_t chunk_was_revoked;
+	uint8_t fwd_tsn_cnt;
+};
+
+TAILQ_HEAD(sctpchunk_listhead, sctp_tmit_chunk);
+
+/* The lower byte is used to enumerate PR_SCTP policies */
+#define CHUNK_FLAGS_PR_SCTP_TTL	        SCTP_PR_SCTP_TTL
+#define CHUNK_FLAGS_PR_SCTP_BUF	        SCTP_PR_SCTP_BUF
+#define CHUNK_FLAGS_PR_SCTP_RTX         SCTP_PR_SCTP_RTX
+
+/* The upper byte is used as a bit mask */
+#define CHUNK_FLAGS_FRAGMENT_OK	        0x0100
+
+struct chk_id {
+	uint8_t id;
+	uint8_t can_take_data;
+};
+
+
+struct sctp_tmit_chunk {
+	union {
+		struct sctp_data_chunkrec data;
+		struct chk_id chunk_id;
+	}     rec;
+	struct sctp_association *asoc;	/* bp to asoc this belongs to */
+	struct timeval sent_rcv_time;	/* filled in if RTT being calculated */
+	struct mbuf *data;	/* pointer to mbuf chain of data */
+	struct mbuf *last_mbuf;	/* pointer to last mbuf in chain */
+	struct sctp_nets *whoTo;
+	TAILQ_ENTRY(sctp_tmit_chunk) sctp_next;	/* next link */
+	int32_t sent;		/* the send status */
+	uint16_t snd_count;	/* number of times I sent */
+	uint16_t flags;		/* flags, such as FRAGMENT_OK */
+	uint16_t send_size;
+	uint16_t book_size;
+	uint16_t mbcnt;
+	uint16_t auth_keyid;
+	uint8_t holds_key_ref;	/* flag if auth keyid refcount is held */
+	uint8_t pad_inplace;
+	uint8_t do_rtt;
+	uint8_t book_size_scale;
+	uint8_t no_fr_allowed;
+	uint8_t copy_by_ref;
+	uint8_t window_probe;
+};
+
+struct sctp_queued_to_read {	/* sinfo structure Pluse more */
+	uint16_t sinfo_stream;	/* off the wire */
+	uint16_t sinfo_flags;	/* SCTP_UNORDERED from wire use SCTP_EOF for
+				 * EOR */
+	uint32_t sinfo_ppid;	/* off the wire */
+	uint32_t sinfo_context;	/* pick this up from assoc def context? */
+	uint32_t sinfo_timetolive;	/* not used by kernel */
+	uint32_t sinfo_tsn;	/* Use this in reassembly as first TSN */
+	uint32_t sinfo_cumtsn;	/* Use this in reassembly as last TSN */
+	sctp_assoc_t sinfo_assoc_id;	/* our assoc id */
+	/* Non sinfo stuff */
+	uint32_t mid;		/* Fragment Index */
+	uint32_t length;	/* length of data */
+	uint32_t held_length;	/* length held in sb */
+	uint32_t top_fsn;	/* Highest FSN in queue */
+	uint32_t fsn_included;  /* Highest FSN in *data portion */
+	struct sctp_nets *whoFrom;	/* where it came from */
+	struct mbuf *data;	/* front of the mbuf chain of data with
+				 * PKT_HDR */
+	struct mbuf *tail_mbuf;	/* used for multi-part data */
+	struct mbuf *aux_data;  /* used to hold/cache  control if o/s does not take it from us */
+	struct sctp_tcb *stcb;	/* assoc, used for window update */
+	TAILQ_ENTRY(sctp_queued_to_read) next;
+	TAILQ_ENTRY(sctp_queued_to_read) next_instrm;
+	struct sctpchunk_listhead reasm;
+	uint16_t port_from;
+	uint16_t spec_flags;	/* Flags to hold the notification field */
+	uint8_t  do_not_ref_stcb;
+	uint8_t  end_added;
+	uint8_t  pdapi_aborted;
+	uint8_t  pdapi_started;
+	uint8_t  some_taken;
+	uint8_t  last_frag_seen;
+	uint8_t  first_frag_seen;
+	uint8_t  on_read_q;
+	uint8_t  on_strm_q;
+};
+
+#define SCTP_ON_ORDERED 1
+#define SCTP_ON_UNORDERED 2
+
+/* This data structure will be on the outbound
+ * stream queues. Data will be pulled off from
+ * the front of the mbuf data and chunk-ified
+ * by the output routines. We will custom
+ * fit every chunk we pull to the send/sent
+ * queue to make up the next full packet
+ * if we can. An entry cannot be removed
+ * from the stream_out queue until
+ * the msg_is_complete flag is set. This
+ * means at times data/tail_mbuf MIGHT
+ * be NULL.. If that occurs it happens
+ * for one of two reasons. Either the user
+ * is blocked on a send() call and has not
+ * awoken to copy more data down... OR
+ * the user is in the explict MSG_EOR mode
+ * and wrote some data, but has not completed
+ * sending.
+ */
+struct sctp_stream_queue_pending {
+	struct mbuf *data;
+	struct mbuf *tail_mbuf;
+	struct timeval ts;
+	struct sctp_nets *net;
+	TAILQ_ENTRY (sctp_stream_queue_pending) next;
+	TAILQ_ENTRY (sctp_stream_queue_pending) ss_next;
+	uint32_t fsn;
+	uint32_t length;
+	uint32_t timetolive;
+	uint32_t ppid;
+	uint32_t context;
+	uint16_t sinfo_flags;
+	uint16_t sid;
+	uint16_t act_flags;
+	uint16_t auth_keyid;
+	uint8_t  holds_key_ref;
+	uint8_t  msg_is_complete;
+	uint8_t  some_taken;
+	uint8_t  sender_all_done;
+	uint8_t  put_last_out;
+	uint8_t  discard_rest;
+};
+
+/*
+ * this struct contains info that is used to track inbound stream data and
+ * help with ordering.
+ */
+TAILQ_HEAD(sctpwheelunrel_listhead, sctp_stream_in);
+struct sctp_stream_in {
+	struct sctp_readhead inqueue;
+	struct sctp_readhead uno_inqueue;
+	uint32_t last_mid_delivered;	/* used for re-order */
+	uint16_t sid;
+	uint8_t  delivery_started;
+	uint8_t  pd_api_started;
+};
+
+TAILQ_HEAD(sctpwheel_listhead, sctp_stream_out);
+TAILQ_HEAD(sctplist_listhead, sctp_stream_queue_pending);
+
+
+/* Round-robin schedulers */
+struct ss_rr {
+	/* next link in wheel */
+	TAILQ_ENTRY(sctp_stream_out) next_spoke;
+};
+
+/* Priority scheduler */
+struct ss_prio {
+	/* next link in wheel */
+	TAILQ_ENTRY(sctp_stream_out) next_spoke;
+	/* priority id */
+	uint16_t priority;
+};
+
+/* Fair Bandwidth scheduler */
+struct ss_fb {
+	/* next link in wheel */
+	TAILQ_ENTRY(sctp_stream_out) next_spoke;
+	/* stores message size */
+	int32_t rounds;
+};
+
+/*
+ * This union holds all data necessary for
+ * different stream schedulers.
+ */
+struct scheduling_data {
+	struct sctp_stream_out *locked_on_sending;
+	/* circular looking for output selection */
+	struct sctp_stream_out *last_out_stream;
+	union {
+		struct sctpwheel_listhead wheel;
+		struct sctplist_listhead list;
+	} out;
+};
+
+/*
+ * This union holds all parameters per stream
+ * necessary for different stream schedulers.
+ */
+union scheduling_parameters {
+	struct ss_rr rr;
+	struct ss_prio prio;
+	struct ss_fb fb;
+};
+
+/* States for outgoing streams */
+#define SCTP_STREAM_CLOSED           0x00
+#define SCTP_STREAM_OPENING          0x01
+#define SCTP_STREAM_OPEN             0x02
+#define SCTP_STREAM_RESET_PENDING    0x03
+#define SCTP_STREAM_RESET_IN_FLIGHT  0x04
+
+#define SCTP_MAX_STREAMS_AT_ONCE_RESET 200
+
+/* This struct is used to track the traffic on outbound streams */
+struct sctp_stream_out {
+	struct sctp_streamhead outqueue;
+	union scheduling_parameters ss_params;
+	uint32_t chunks_on_queues;      /* send queue and sent queue */
+#if defined(SCTP_DETAILED_STR_STATS)
+	uint32_t abandoned_unsent[SCTP_PR_SCTP_MAX + 1];
+	uint32_t abandoned_sent[SCTP_PR_SCTP_MAX + 1];
+#else
+	/* Only the aggregation */
+	uint32_t abandoned_unsent[1];
+	uint32_t abandoned_sent[1];
+#endif
+	/* For associations using DATA chunks, the lower 16-bit of
+	 * next_mid_ordered are used as the next SSN.
+	 */
+	uint32_t next_mid_ordered;
+	uint32_t next_mid_unordered;
+	uint16_t sid;
+	uint8_t last_msg_incomplete;
+	uint8_t state;
+};
+
+/* used to keep track of the addresses yet to try to add/delete */
+TAILQ_HEAD(sctp_asconf_addrhead, sctp_asconf_addr);
+struct sctp_asconf_addr {
+	TAILQ_ENTRY(sctp_asconf_addr) next;
+	struct sctp_asconf_addr_param ap;
+	struct sctp_ifa *ifa;	/* save the ifa for add/del ip */
+	uint8_t sent;		/* has this been sent yet? */
+	uint8_t special_del;	/* not to be used in lookup */
+};
+
+struct sctp_scoping {
+	uint8_t ipv4_addr_legal;
+	uint8_t ipv6_addr_legal;
+#if defined(__Userspace__)
+	uint8_t conn_addr_legal;
+#endif
+	uint8_t loopback_scope;
+	uint8_t ipv4_local_scope;
+	uint8_t local_scope;
+	uint8_t site_scope;
+};
+
+#define SCTP_TSN_LOG_SIZE 40
+
+struct sctp_tsn_log {
+	void     *stcb;
+	uint32_t tsn;
+	uint32_t seq;
+	uint16_t strm;
+	uint16_t sz;
+	uint16_t flgs;
+	uint16_t in_pos;
+	uint16_t in_out;
+	uint16_t resv;
+};
+
+#define SCTP_FS_SPEC_LOG_SIZE 200
+struct sctp_fs_spec_log {
+	uint32_t sent;
+	uint32_t total_flight;
+	uint32_t tsn;
+	uint16_t book;
+	uint8_t incr;
+	uint8_t decr;
+};
+
+/* This struct is here to cut out the compatiabilty
+ * pad that bulks up both the inp and stcb. The non
+ * pad portion MUST stay in complete sync with
+ * sctp_sndrcvinfo... i.e. if sinfo_xxxx is added
+ * this must be done here too.
+ */
+struct sctp_nonpad_sndrcvinfo {
+	uint16_t sinfo_stream;
+	uint16_t sinfo_ssn;
+	uint16_t sinfo_flags;
+	uint32_t sinfo_ppid;
+	uint32_t sinfo_context;
+	uint32_t sinfo_timetolive;
+	uint32_t sinfo_tsn;
+	uint32_t sinfo_cumtsn;
+	sctp_assoc_t sinfo_assoc_id;
+	uint16_t sinfo_keynumber;
+	uint16_t sinfo_keynumber_valid;
+};
+
+/*
+ * JRS - Structure to hold function pointers to the functions responsible
+ * for congestion control.
+ */
+
+struct sctp_cc_functions {
+	void (*sctp_set_initial_cc_param)(struct sctp_tcb *stcb, struct sctp_nets *net);
+	void (*sctp_cwnd_update_after_sack)(struct sctp_tcb *stcb,
+	                                    struct sctp_association *asoc,
+	                                    int accum_moved ,int reneged_all, int will_exit);
+	void (*sctp_cwnd_update_exit_pf)(struct sctp_tcb *stcb, struct sctp_nets *net);
+	void (*sctp_cwnd_update_after_fr)(struct sctp_tcb *stcb,
+			struct sctp_association *asoc);
+	void (*sctp_cwnd_update_after_timeout)(struct sctp_tcb *stcb,
+			struct sctp_nets *net);
+	void (*sctp_cwnd_update_after_ecn_echo)(struct sctp_tcb *stcb,
+			struct sctp_nets *net, int in_window, int num_pkt_lost);
+	void (*sctp_cwnd_update_after_packet_dropped)(struct sctp_tcb *stcb,
+			struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
+			uint32_t *bottle_bw, uint32_t *on_queue);
+	void (*sctp_cwnd_update_after_output)(struct sctp_tcb *stcb,
+			struct sctp_nets *net, int burst_limit);
+	void (*sctp_cwnd_update_packet_transmitted)(struct sctp_tcb *stcb,
+			struct sctp_nets *net);
+	void (*sctp_cwnd_update_tsn_acknowledged)(struct sctp_nets *net,
+			struct sctp_tmit_chunk *);
+	void (*sctp_cwnd_new_transmission_begins)(struct sctp_tcb *stcb,
+			struct sctp_nets *net);
+	void (*sctp_cwnd_prepare_net_for_sack)(struct sctp_tcb *stcb,
+			struct sctp_nets *net);
+	int (*sctp_cwnd_socket_option)(struct sctp_tcb *stcb, int set, struct sctp_cc_option *);
+	void (*sctp_rtt_calculated)(struct sctp_tcb *, struct sctp_nets *, struct timeval *);
+};
+
+/*
+ * RS - Structure to hold function pointers to the functions responsible
+ * for stream scheduling.
+ */
+struct sctp_ss_functions {
+	void (*sctp_ss_init)(struct sctp_tcb *stcb, struct sctp_association *asoc,
+		int holds_lock);
+	void (*sctp_ss_clear)(struct sctp_tcb *stcb, struct sctp_association *asoc,
+		int clear_values, int holds_lock);
+	void (*sctp_ss_init_stream)(struct sctp_tcb *stcb, struct sctp_stream_out *strq, struct sctp_stream_out *with_strq);
+	void (*sctp_ss_add_to_stream)(struct sctp_tcb *stcb, struct sctp_association *asoc,
+		struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp, int holds_lock);
+	int (*sctp_ss_is_empty)(struct sctp_tcb *stcb, struct sctp_association *asoc);
+	void (*sctp_ss_remove_from_stream)(struct sctp_tcb *stcb, struct sctp_association *asoc,
+		struct sctp_stream_out *strq, struct sctp_stream_queue_pending *sp, int holds_lock);
+	struct sctp_stream_out* (*sctp_ss_select_stream)(struct sctp_tcb *stcb,
+		struct sctp_nets *net, struct sctp_association *asoc);
+	void (*sctp_ss_scheduled)(struct sctp_tcb *stcb, struct sctp_nets *net,
+		struct sctp_association *asoc, struct sctp_stream_out *strq, int moved_how_much);
+	void (*sctp_ss_packet_done)(struct sctp_tcb *stcb, struct sctp_nets *net,
+		struct sctp_association *asoc);
+	int (*sctp_ss_get_value)(struct sctp_tcb *stcb, struct sctp_association *asoc,
+		struct sctp_stream_out *strq, uint16_t *value);
+	int (*sctp_ss_set_value)(struct sctp_tcb *stcb, struct sctp_association *asoc,
+		struct sctp_stream_out *strq, uint16_t value);
+	int (*sctp_ss_is_user_msgs_incomplete)(struct sctp_tcb *stcb, struct sctp_association *asoc);
+};
+
+/* used to save ASCONF chunks for retransmission */
+TAILQ_HEAD(sctp_asconf_head, sctp_asconf);
+struct sctp_asconf {
+	TAILQ_ENTRY(sctp_asconf) next;
+	uint32_t serial_number;
+	uint16_t snd_count;
+	struct mbuf *data;
+	uint16_t len;
+};
+
+/* used to save ASCONF-ACK chunks for retransmission */
+TAILQ_HEAD(sctp_asconf_ackhead, sctp_asconf_ack);
+struct sctp_asconf_ack {
+	TAILQ_ENTRY(sctp_asconf_ack) next;
+	uint32_t serial_number;
+	struct sctp_nets *last_sent_to;
+	struct mbuf *data;
+	uint16_t len;
+};
+
+/*
+ * Here we have information about each individual association that we track.
+ * We probably in production would be more dynamic. But for ease of
+ * implementation we will have a fixed array that we hunt for in a linear
+ * fashion.
+ */
+struct sctp_association {
+	/* association state */
+	int state;
+
+	/* queue of pending addrs to add/delete */
+	struct sctp_asconf_addrhead asconf_queue;
+
+	struct timeval time_entered;	/* time we entered state */
+	struct timeval time_last_rcvd;
+	struct timeval time_last_sent;
+	struct timeval time_last_sat_advance;
+	struct sctp_nonpad_sndrcvinfo def_send;
+
+	/* timers and such */
+	struct sctp_timer dack_timer;		/* Delayed ack timer */
+	struct sctp_timer asconf_timer;		/* asconf */
+	struct sctp_timer strreset_timer;	/* stream reset */
+	struct sctp_timer shut_guard_timer;	/* shutdown guard */
+	struct sctp_timer autoclose_timer;	/* automatic close timer */
+	struct sctp_timer delayed_event_timer;	/* timer for delayed events */
+	struct sctp_timer delete_prim_timer;	/* deleting primary dst */
+
+	/* list of restricted local addresses */
+	struct sctpladdr sctp_restricted_addrs;
+
+	/* last local address pending deletion (waiting for an address add) */
+	struct sctp_ifa *asconf_addr_del_pending;
+	/* Deleted primary destination (used to stop timer) */
+	struct sctp_nets *deleted_primary;
+
+	struct sctpnetlisthead nets;		/* remote address list */
+
+	/* Free chunk list */
+	struct sctpchunk_listhead free_chunks;
+
+	/* Control chunk queue */
+	struct sctpchunk_listhead control_send_queue;
+
+	/* ASCONF chunk queue */
+	struct sctpchunk_listhead asconf_send_queue;
+
+	/*
+	 * Once a TSN hits the wire it is moved to the sent_queue. We
+	 * maintain two counts here (don't know if any but retran_cnt is
+	 * needed). The idea is that the sent_queue_retran_cnt reflects how
+	 * many chunks have been marked for retranmission by either T3-rxt
+	 * or FR.
+	 */
+	struct sctpchunk_listhead sent_queue;
+	struct sctpchunk_listhead send_queue;
+
+	/* Scheduling queues */
+	struct scheduling_data ss_data;
+
+	/* If an iterator is looking at me, this is it */
+	struct sctp_iterator *stcb_starting_point_for_iterator;
+
+	/* ASCONF save the last ASCONF-ACK so we can resend it if necessary */
+	struct sctp_asconf_ackhead asconf_ack_sent;
+
+	/*
+	 * pointer to last stream reset queued to control queue by us with
+	 * requests.
+	 */
+	struct sctp_tmit_chunk *str_reset;
+	/*
+	 * if Source Address Selection happening, this will rotate through
+	 * the link list.
+	 */
+	struct sctp_laddr *last_used_address;
+
+	/* stream arrays */
+	struct sctp_stream_in *strmin;
+	struct sctp_stream_out *strmout;
+	uint8_t *mapping_array;
+	/* primary destination to use */
+	struct sctp_nets *primary_destination;
+	struct sctp_nets *alternate; /* If primary is down or PF */
+	/* For CMT */
+	struct sctp_nets *last_net_cmt_send_started;
+	/* last place I got a data chunk from */
+	struct sctp_nets *last_data_chunk_from;
+	/* last place I got a control from */
+	struct sctp_nets *last_control_chunk_from;
+
+
+	/*
+	 * wait to the point the cum-ack passes req->send_reset_at_tsn for
+	 * any req on the list.
+	 */
+	struct sctp_resethead resetHead;
+
+	/* queue of chunks waiting to be sent into the local stack */
+	struct sctp_readhead pending_reply_queue;
+
+	/* JRS - the congestion control functions are in this struct */
+	struct sctp_cc_functions cc_functions;
+	/* JRS - value to store the currently loaded congestion control module */
+	uint32_t congestion_control_module;
+	/* RS - the stream scheduling functions are in this struct */
+	struct sctp_ss_functions ss_functions;
+	/* RS - value to store the currently loaded stream scheduling module */
+	uint32_t stream_scheduling_module;
+
+	uint32_t vrf_id;
+	uint32_t cookie_preserve_req;
+	/* ASCONF next seq I am sending out, inits at init-tsn */
+	uint32_t asconf_seq_out;
+	uint32_t asconf_seq_out_acked;
+	/* ASCONF last received ASCONF from peer, starts at peer's TSN-1 */
+	uint32_t asconf_seq_in;
+
+	/* next seq I am sending in str reset messages */
+	uint32_t str_reset_seq_out;
+	/* next seq I am expecting in str reset messages */
+	uint32_t str_reset_seq_in;
+
+	/* various verification tag information */
+	uint32_t my_vtag;	/* The tag to be used. if assoc is re-initited
+				 * by remote end, and I have unlocked this
+				 * will be regenerated to a new random value. */
+	uint32_t peer_vtag;	/* The peers last tag */
+
+	uint32_t my_vtag_nonce;
+	uint32_t peer_vtag_nonce;
+
+	uint32_t assoc_id;
+
+	/* This is the SCTP fragmentation threshold */
+	uint32_t smallest_mtu;
+
+	/*
+	 * Special hook for Fast retransmit, allows us to track the highest
+	 * TSN that is NEW in this SACK if gap ack blocks are present.
+	 */
+	uint32_t this_sack_highest_gap;
+
+	/*
+	 * The highest consecutive TSN that has been acked by peer on my
+	 * sends
+	 */
+	uint32_t last_acked_seq;
+
+	/* The next TSN that I will use in sending. */
+	uint32_t sending_seq;
+
+	/* Original seq number I used ??questionable to keep?? */
+	uint32_t init_seq_number;
+
+
+	/* The Advanced Peer Ack Point, as required by the PR-SCTP */
+	/* (A1 in Section 4.2) */
+	uint32_t advanced_peer_ack_point;
+
+	/*
+	 * The highest consequetive TSN at the bottom of the mapping array
+	 * (for his sends).
+	 */
+	uint32_t cumulative_tsn;
+	/*
+	 * Used to track the mapping array and its offset bits. This MAY be
+	 * lower then cumulative_tsn.
+	 */
+	uint32_t mapping_array_base_tsn;
+	/*
+	 * used to track highest TSN we have received and is listed in the
+	 * mapping array.
+	 */
+	uint32_t highest_tsn_inside_map;
+
+	/* EY - new NR variables used for nr_sack based on mapping_array*/
+	uint8_t *nr_mapping_array;
+	uint32_t highest_tsn_inside_nr_map;
+
+	uint32_t fast_recovery_tsn;
+	uint32_t sat_t3_recovery_tsn;
+	uint32_t tsn_last_delivered;
+	/*
+	 * For the pd-api we should re-write this a bit more efficient. We
+	 * could have multiple sctp_queued_to_read's that we are building at
+	 * once. Now we only do this when we get ready to deliver to the
+	 * socket buffer. Note that we depend on the fact that the struct is
+	 * "stuck" on the read queue until we finish all the pd-api.
+	 */
+	struct sctp_queued_to_read *control_pdapi;
+
+	uint32_t tsn_of_pdapi_last_delivered;
+	uint32_t pdapi_ppid;
+	uint32_t context;
+	uint32_t last_reset_action[SCTP_MAX_RESET_PARAMS];
+	uint32_t last_sending_seq[SCTP_MAX_RESET_PARAMS];
+	uint32_t last_base_tsnsent[SCTP_MAX_RESET_PARAMS];
+#ifdef SCTP_ASOCLOG_OF_TSNS
+	/*
+	 * special log  - This adds considerable size
+	 * to the asoc, but provides a log that you
+	 * can use to detect problems via kgdb.
+	 */
+	struct sctp_tsn_log  in_tsnlog[SCTP_TSN_LOG_SIZE];
+	struct sctp_tsn_log  out_tsnlog[SCTP_TSN_LOG_SIZE];
+	uint32_t cumack_log[SCTP_TSN_LOG_SIZE];
+	uint32_t cumack_logsnt[SCTP_TSN_LOG_SIZE];
+	uint16_t tsn_in_at;
+	uint16_t tsn_out_at;
+	uint16_t tsn_in_wrapped;
+	uint16_t tsn_out_wrapped;
+	uint16_t cumack_log_at;
+	uint16_t cumack_log_atsnt;
+#endif /* SCTP_ASOCLOG_OF_TSNS */
+#ifdef SCTP_FS_SPEC_LOG
+	struct sctp_fs_spec_log fslog[SCTP_FS_SPEC_LOG_SIZE];
+	uint16_t fs_index;
+#endif
+
+	/*
+	 * window state information and smallest MTU that I use to bound
+	 * segmentation
+	 */
+	uint32_t peers_rwnd;
+	uint32_t my_rwnd;
+	uint32_t my_last_reported_rwnd;
+	uint32_t sctp_frag_point;
+
+	uint32_t total_output_queue_size;
+
+	uint32_t sb_cc;		       /* shadow of sb_cc */
+	uint32_t sb_send_resv;     /* amount reserved on a send */
+	uint32_t my_rwnd_control_len; /* shadow of sb_mbcnt used for rwnd control */
+#ifdef INET6
+	uint32_t default_flowlabel;
+#endif
+	uint32_t pr_sctp_cnt;
+	int ctrl_queue_cnt;	/* could be removed  REM - NO IT CAN'T!! RRS */
+	/*
+	 * All outbound datagrams queue into this list from the individual
+	 * stream queue. Here they get assigned a TSN and then await
+	 * sending. The stream seq comes when it is first put in the
+	 * individual str queue
+	 */
+	unsigned int stream_queue_cnt;
+	unsigned int send_queue_cnt;
+	unsigned int sent_queue_cnt;
+	unsigned int sent_queue_cnt_removeable;
+	/*
+	 * Number on sent queue that are marked for retran until this value
+	 * is 0 we only send one packet of retran'ed data.
+	 */
+	unsigned int sent_queue_retran_cnt;
+
+	unsigned int size_on_reasm_queue;
+	unsigned int cnt_on_reasm_queue;
+	unsigned int fwd_tsn_cnt;
+	/* amount of data (bytes) currently in flight (on all destinations) */
+	unsigned int total_flight;
+	/* Total book size in flight */
+	unsigned int total_flight_count;	/* count of chunks used with
+						 * book total */
+	/* count of destinaton nets and list of destination nets */
+	unsigned int numnets;
+
+	/* Total error count on this association */
+	unsigned int overall_error_count;
+
+	unsigned int cnt_msg_on_sb;
+
+	/* All stream count of chunks for delivery */
+	unsigned int size_on_all_streams;
+	unsigned int cnt_on_all_streams;
+
+	/* Heart Beat delay in ms */
+	uint32_t heart_beat_delay;
+
+	/* autoclose */
+	unsigned int sctp_autoclose_ticks;
+
+	/* how many preopen streams we have */
+	unsigned int pre_open_streams;
+
+	/* How many streams I support coming into me */
+	unsigned int max_inbound_streams;
+
+	/* the cookie life I award for any cookie, in seconds */
+	unsigned int cookie_life;
+	/* time to delay acks for */
+	unsigned int delayed_ack;
+	unsigned int old_delayed_ack;
+	unsigned int sack_freq;
+	unsigned int data_pkts_seen;
+
+	unsigned int numduptsns;
+	int dup_tsns[SCTP_MAX_DUP_TSNS];
+	unsigned int initial_init_rto_max;	/* initial RTO for INIT's */
+	unsigned int initial_rto;	/* initial send RTO */
+	unsigned int minrto;	/* per assoc RTO-MIN */
+	unsigned int maxrto;	/* per assoc RTO-MAX */
+
+	/* authentication fields */
+	sctp_auth_chklist_t *local_auth_chunks;
+	sctp_auth_chklist_t *peer_auth_chunks;
+	sctp_hmaclist_t *local_hmacs;	/* local HMACs supported */
+	sctp_hmaclist_t *peer_hmacs;	/* peer HMACs supported */
+	struct sctp_keyhead shared_keys;	/* assoc's shared keys */
+	sctp_authinfo_t authinfo;	/* randoms, cached keys */
+	/*
+	 * refcnt to block freeing when a sender or receiver is off coping
+	 * user data in.
+	 */
+	uint32_t refcnt;
+	uint32_t chunks_on_out_queue;	/* total chunks floating around,
+					 * locked by send socket buffer */
+	uint32_t peers_adaptation;
+	uint16_t peer_hmac_id;	/* peer HMAC id to send */
+
+	/*
+	 * Being that we have no bag to collect stale cookies, and that we
+	 * really would not want to anyway.. we will count them in this
+	 * counter. We of course feed them to the pigeons right away (I have
+	 * always thought of pigeons as flying rats).
+	 */
+	uint16_t stale_cookie_count;
+
+	/*
+	 * For the partial delivery API, if up, invoked this is what last
+	 * TSN I delivered
+	 */
+	uint16_t str_of_pdapi;
+	uint16_t ssn_of_pdapi;
+
+	/* counts of actual built streams. Allocation may be more however */
+	/* could re-arrange to optimize space here. */
+	uint16_t streamincnt;
+	uint16_t streamoutcnt;
+	uint16_t strm_realoutsize;
+	uint16_t strm_pending_add_size;
+	/* my maximum number of retrans of INIT and SEND */
+	/* copied from SCTP but should be individually setable */
+	uint16_t max_init_times;
+	uint16_t max_send_times;
+
+	uint16_t def_net_failure;
+
+	uint16_t def_net_pf_threshold;
+
+	/*
+	 * lock flag: 0 is ok to send, 1+ (duals as a retran count) is
+	 * awaiting ACK
+	 */
+	uint16_t mapping_array_size;
+
+	uint16_t last_strm_seq_delivered;
+	uint16_t last_strm_no_delivered;
+
+	uint16_t last_revoke_count;
+	int16_t num_send_timers_up;
+
+	uint16_t stream_locked_on;
+	uint16_t ecn_echo_cnt_onq;
+
+	uint16_t free_chunk_cnt;
+	uint8_t stream_locked;
+	uint8_t authenticated;	/* packet authenticated ok */
+	/*
+	 * This flag indicates that a SACK need to be sent.
+	 * Initially this is 1 to send the first sACK immediately.
+	 */
+	uint8_t send_sack;
+
+	/* max burst of new packets into the network */
+	uint32_t max_burst;
+	/* max burst of fast retransmit packets */
+	uint32_t fr_max_burst;
+
+	uint8_t sat_network;	/* RTT is in range of sat net or greater */
+	uint8_t sat_network_lockout;	/* lockout code */
+	uint8_t burst_limit_applied;	/* Burst limit in effect at last send? */
+	/* flag goes on when we are doing a partial delivery api */
+	uint8_t hb_random_values[4];
+	uint8_t fragmented_delivery_inprogress;
+	uint8_t fragment_flags;
+	uint8_t last_flags_delivered;
+	uint8_t hb_ect_randombit;
+	uint8_t hb_random_idx;
+	uint8_t default_dscp;
+	uint8_t asconf_del_pending;	/* asconf delete last addr pending */
+	uint8_t trigger_reset;
+	/*
+	 * This value, plus all other ack'd but above cum-ack is added
+	 * together to cross check against the bit that we have yet to
+	 * define (probably in the SACK). When the cum-ack is updated, this
+	 * sum is updated as well.
+	 */
+
+	/* Flags whether an extension is supported or not */
+	uint8_t ecn_supported;
+	uint8_t prsctp_supported;
+	uint8_t auth_supported;
+	uint8_t asconf_supported;
+	uint8_t reconfig_supported;
+	uint8_t nrsack_supported;
+	uint8_t pktdrop_supported;
+	uint8_t idata_supported;
+
+	/* Did the peer make the stream config (add out) request */
+	uint8_t peer_req_out;
+
+	uint8_t local_strreset_support;
+	uint8_t peer_supports_nat;
+
+	struct sctp_scoping scope;
+	/* flags to handle send alternate net tracking */
+	uint8_t used_alt_asconfack;
+	uint8_t fast_retran_loss_recovery;
+	uint8_t sat_t3_loss_recovery;
+	uint8_t dropped_special_cnt;
+	uint8_t seen_a_sack_this_pkt;
+	uint8_t stream_reset_outstanding;
+	uint8_t stream_reset_out_is_outstanding;
+	uint8_t delayed_connection;
+	uint8_t ifp_had_enobuf;
+	uint8_t saw_sack_with_frags;
+	uint8_t saw_sack_with_nr_frags;
+	uint8_t in_asocid_hash;
+	uint8_t assoc_up_sent;
+	uint8_t adaptation_needed;
+	uint8_t adaptation_sent;
+	/* CMT variables */
+	uint8_t cmt_dac_pkts_rcvd;
+	uint8_t sctp_cmt_on_off;
+	uint8_t iam_blocking;
+	uint8_t cookie_how[8];
+	/* JRS 5/21/07 - CMT PF variable */
+	uint8_t sctp_cmt_pf;
+	uint8_t use_precise_time;
+	uint64_t sctp_features;
+	uint32_t max_cwnd;
+	uint16_t port; /* remote UDP encapsulation port */
+	/*
+	 * The mapping array is used to track out of order sequences above
+	 * last_acked_seq. 0 indicates packet missing 1 indicates packet
+	 * rec'd. We slide it up every time we raise last_acked_seq and 0
+	 * trailing locactions out.  If I get a TSN above the array
+	 * mappingArraySz, I discard the datagram and let retransmit happen.
+	 */
+	uint32_t marked_retrans;
+	uint32_t timoinit;
+	uint32_t timodata;
+	uint32_t timosack;
+	uint32_t timoshutdown;
+	uint32_t timoheartbeat;
+	uint32_t timocookie;
+	uint32_t timoshutdownack;
+	struct timeval start_time;
+	struct timeval discontinuity_time;
+	uint64_t abandoned_unsent[SCTP_PR_SCTP_MAX + 1];
+	uint64_t abandoned_sent[SCTP_PR_SCTP_MAX + 1];
+};
+
+#endif
diff --git a/usrsctplib/netinet/sctp_sysctl.c b/usrsctplib/netinet/sctp_sysctl.c
new file mode 100755
index 0000000..8f81b0a
--- /dev/null
+++ b/usrsctplib/netinet/sctp_sysctl.c
@@ -0,0 +1,1687 @@
+/*-
+ * Copyright (c) 2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_sysctl.c 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_constants.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#ifdef __FreeBSD__
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#endif
+#if defined(__APPLE__)
+#include <netinet/sctp_bsd_addr.h>
+#endif
+
+#ifdef __FreeBSD__
+FEATURE(sctp, "Stream Control Transmission Protocol");
+#endif
+
+/*
+ * sysctl tunable variables
+ */
+
+void
+sctp_init_sysctls()
+{
+	SCTP_BASE_SYSCTL(sctp_sendspace) = SCTPCTL_MAXDGRAM_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_recvspace) = SCTPCTL_RECVSPACE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_auto_asconf) = SCTPCTL_AUTOASCONF_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_multiple_asconfs) = SCTPCTL_MULTIPLEASCONFS_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_ecn_enable) = SCTPCTL_ECN_ENABLE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_pr_enable) = SCTPCTL_PR_ENABLE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_auth_enable) = SCTPCTL_AUTH_ENABLE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_asconf_enable) = SCTPCTL_ASCONF_ENABLE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_reconfig_enable) = SCTPCTL_RECONFIG_ENABLE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_nrsack_enable) = SCTPCTL_NRSACK_ENABLE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_pktdrop_enable) = SCTPCTL_PKTDROP_ENABLE_DEFAULT;
+#if !(defined(__FreeBSD__) && __FreeBSD_version >= 800000)
+#if !defined(SCTP_WITH_NO_CSUM)
+	SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback) = SCTPCTL_LOOPBACK_NOCSUM_DEFAULT;
+#endif
+#endif
+	SCTP_BASE_SYSCTL(sctp_peer_chunk_oh) = SCTPCTL_PEER_CHKOH_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_max_burst_default) = SCTPCTL_MAXBURST_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_fr_max_burst_default) = SCTPCTL_FRMAXBURST_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = SCTPCTL_MAXCHUNKS_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_hashtblsize) = SCTPCTL_TCBHASHSIZE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_pcbtblsize) = SCTPCTL_PCBHASHSIZE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_min_split_point) = SCTPCTL_MIN_SPLIT_POINT_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_chunkscale) = SCTPCTL_CHUNKSCALE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default) = SCTPCTL_DELAYED_SACK_TIME_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_sack_freq_default) = SCTPCTL_SACK_FREQ_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_system_free_resc_limit) = SCTPCTL_SYS_RESOURCE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit) = SCTPCTL_ASOC_RESOURCE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default) = SCTPCTL_HEARTBEAT_INTERVAL_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default) = SCTPCTL_PMTU_RAISE_TIME_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default) = SCTPCTL_SHUTDOWN_GUARD_TIME_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_secret_lifetime_default) = SCTPCTL_SECRET_LIFETIME_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_rto_max_default) = SCTPCTL_RTO_MAX_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_rto_min_default) = SCTPCTL_RTO_MIN_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_rto_initial_default) = SCTPCTL_RTO_INITIAL_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_init_rto_max_default) = SCTPCTL_INIT_RTO_MAX_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default) = SCTPCTL_VALID_COOKIE_LIFE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_init_rtx_max_default) = SCTPCTL_INIT_RTX_MAX_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default) = SCTPCTL_ASSOC_RTX_MAX_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_path_rtx_max_default) = SCTPCTL_PATH_RTX_MAX_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_path_pf_threshold) = SCTPCTL_PATH_PF_THRESHOLD_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_add_more_threshold) = SCTPCTL_ADD_MORE_ON_OUTPUT_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_nr_incoming_streams_default) = SCTPCTL_INCOMING_STREAMS_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default) = SCTPCTL_OUTGOING_STREAMS_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_cmt_on_off) = SCTPCTL_CMT_ON_OFF_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_cmt_use_dac) = SCTPCTL_CMT_USE_DAC_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst) = SCTPCTL_CWND_MAXBURST_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_nat_friendly) = SCTPCTL_NAT_FRIENDLY_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_L2_abc_variable) = SCTPCTL_ABC_L_VAR_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count) = SCTPCTL_MAX_CHAINED_MBUFS_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_do_drain) = SCTPCTL_DO_SCTP_DRAIN_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_hb_maxburst) = SCTPCTL_HB_MAX_BURST_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit) = SCTPCTL_ABORT_AT_LIMIT_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_min_residual) = SCTPCTL_MIN_RESIDUAL_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_max_retran_chunk) = SCTPCTL_MAX_RETRAN_CHUNK_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_logging_level) = SCTPCTL_LOGGING_LEVEL_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_default_cc_module) = SCTPCTL_DEFAULT_CC_MODULE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_default_ss_module) = SCTPCTL_DEFAULT_SS_MODULE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_default_frag_interleave) = SCTPCTL_DEFAULT_FRAG_INTERLEAVE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_mobility_base) = SCTPCTL_MOBILITY_BASE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff) = SCTPCTL_MOBILITY_FASTHANDOFF_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_vtag_time_wait) = SCTPCTL_TIME_WAIT_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_buffer_splitting) = SCTPCTL_BUFFER_SPLITTING_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_initial_cwnd) = SCTPCTL_INITIAL_CWND_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_rttvar_bw) = SCTPCTL_RTTVAR_BW_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_rttvar_rtt) = SCTPCTL_RTTVAR_RTT_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_rttvar_eqret) = SCTPCTL_RTTVAR_EQRET_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_steady_step) = SCTPCTL_RTTVAR_STEADYS_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_use_dccc_ecn) = SCTPCTL_RTTVAR_DCCCECN_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_blackhole) = SCTPCTL_BLACKHOLE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_diag_info_code) = SCTPCTL_DIAG_INFO_CODE_DEFAULT;
+#if defined(SCTP_LOCAL_TRACE_BUF)
+#if defined(__Windows__)
+	/* On Windows, the resource for global variables is limited. */
+	MALLOC(SCTP_BASE_SYSCTL(sctp_log), struct sctp_log *, sizeof(struct sctp_log), M_SYSCTL, M_ZERO);
+#else
+	memset(&SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log));
+#endif
+#endif
+	SCTP_BASE_SYSCTL(sctp_udp_tunneling_port) = SCTPCTL_UDP_TUNNELING_PORT_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_enable_sack_immediately) = SCTPCTL_SACK_IMMEDIATELY_ENABLE_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly) = SCTPCTL_NAT_FRIENDLY_INITS_DEFAULT;
+#if defined(SCTP_DEBUG)
+	SCTP_BASE_SYSCTL(sctp_debug_on) = SCTPCTL_DEBUG_DEFAULT;
+#endif
+#if defined(__APPLE__)
+	SCTP_BASE_SYSCTL(sctp_ignore_vmware_interfaces) = SCTPCTL_IGNORE_VMWARE_INTERFACES_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_main_timer) = SCTPCTL_MAIN_TIMER_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_addr_watchdog_limit) = SCTPCTL_ADDR_WATCHDOG_LIMIT_DEFAULT;
+	SCTP_BASE_SYSCTL(sctp_vtag_watchdog_limit) = SCTPCTL_VTAG_WATCHDOG_LIMIT_DEFAULT;
+#endif
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	SCTP_BASE_SYSCTL(sctp_output_unlocked) = SCTPCTL_OUTPUT_UNLOCKED_DEFAULT;
+#endif
+}
+
+#if defined(__Windows__)
+void
+sctp_finish_sysctls()
+{
+#if defined(SCTP_LOCAL_TRACE_BUF)
+	if (SCTP_BASE_SYSCTL(sctp_log) != NULL) {
+		FREE(SCTP_BASE_SYSCTL(sctp_log), M_SYSCTL);
+		SCTP_BASE_SYSCTL(sctp_log) = NULL;
+	}
+#endif
+}
+#endif
+
+#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__Windows__)
+/* It returns an upper limit. No filtering is done here */
+static unsigned int
+sctp_sysctl_number_of_addresses(struct sctp_inpcb *inp)
+{
+	unsigned int cnt;
+	struct sctp_vrf *vrf;
+	struct sctp_ifn *sctp_ifn;
+	struct sctp_ifa *sctp_ifa;
+	struct sctp_laddr *laddr;
+
+	cnt = 0;
+	/* neither Mac OS X nor FreeBSD support mulitple routing functions */
+	if ((vrf = sctp_find_vrf(inp->def_vrf_id)) == NULL) {
+		return (0);
+	}
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+				switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+				case AF_INET:
+#endif
+#ifdef INET6
+				case AF_INET6:
+#endif
+					cnt++;
+					break;
+				default:
+					break;
+				}
+			}
+		}
+	} else {
+		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+			switch (laddr->ifa->address.sa.sa_family) {
+#ifdef INET
+			case AF_INET:
+#endif
+#ifdef INET6
+			case AF_INET6:
+#endif
+				cnt++;
+				break;
+			default:
+				break;
+			}
+		}
+	}
+	return (cnt);
+}
+
+static int
+sctp_sysctl_copy_out_local_addresses(struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sysctl_req *req)
+{
+	struct sctp_ifn *sctp_ifn;
+	struct sctp_ifa *sctp_ifa;
+	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
+	int ipv4_addr_legal, ipv6_addr_legal;
+#if defined(__Userspace__)
+	int conn_addr_legal;
+#endif
+	struct sctp_vrf *vrf;
+	struct xsctp_laddr xladdr;
+	struct sctp_laddr *laddr;
+	int error;
+
+	/* Turn on all the appropriate scope */
+	if (stcb) {
+		/* use association specific values */
+		loopback_scope = stcb->asoc.scope.loopback_scope;
+		ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
+		local_scope = stcb->asoc.scope.local_scope;
+		site_scope = stcb->asoc.scope.site_scope;
+		ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
+		ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
+#if defined(__Userspace__)
+		conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
+#endif
+	} else {
+		/* Use generic values for endpoints. */
+		loopback_scope = 1;
+		ipv4_local_scope = 1;
+		local_scope = 1;
+		site_scope = 1;
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+			ipv6_addr_legal = 1;
+			if (SCTP_IPV6_V6ONLY(inp)) {
+				ipv4_addr_legal = 0;
+			} else {
+				ipv4_addr_legal = 1;
+			}
+#if defined(__Userspace__)
+			conn_addr_legal = 0;
+#endif
+		} else {
+			ipv6_addr_legal = 0;
+#if defined(__Userspace__)
+			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
+				conn_addr_legal = 1;
+				ipv4_addr_legal = 0;
+			} else {
+				conn_addr_legal = 0;
+				ipv4_addr_legal = 1;
+			}
+#else
+			ipv4_addr_legal = 1;
+#endif
+		}
+	}
+
+	/* neither Mac OS X nor FreeBSD support mulitple routing functions */
+	if ((vrf = sctp_find_vrf(inp->def_vrf_id)) == NULL) {
+		SCTP_INP_RUNLOCK(inp);
+		SCTP_INP_INFO_RUNLOCK();
+		return (-1);
+	}
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+			if ((loopback_scope == 0) && SCTP_IFN_IS_IFT_LOOP(sctp_ifn))
+				/* Skip loopback if loopback_scope not set */
+				continue;
+			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+				if (stcb) {
+					/*
+					 * ignore if blacklisted at
+					 * association level
+					 */
+					if (sctp_is_addr_restricted(stcb, sctp_ifa))
+						continue;
+				}
+				switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+				case AF_INET:
+					if (ipv4_addr_legal) {
+						struct sockaddr_in *sin;
+
+						sin = &sctp_ifa->address.sin;
+						if (sin->sin_addr.s_addr == 0)
+							continue;
+#if defined(__FreeBSD__)
+						if (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+						                     &sin->sin_addr) != 0) {
+							continue;
+						}
+#endif
+						if ((ipv4_local_scope == 0) && (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)))
+							continue;
+					} else {
+						continue;
+					}
+					break;
+#endif
+#ifdef INET6
+				case AF_INET6:
+					if (ipv6_addr_legal) {
+						struct sockaddr_in6 *sin6;
+
+						sin6 = &sctp_ifa->address.sin6;
+						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
+							continue;
+#if defined(__FreeBSD__)
+						if (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+						                     &sin6->sin6_addr) != 0) {
+							continue;
+						}
+#endif
+						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+							if (local_scope == 0)
+								continue;
+						}
+						if ((site_scope == 0) && (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)))
+							continue;
+					} else {
+						continue;
+					}
+					break;
+#endif
+#if defined(__Userspace__)
+				case AF_CONN:
+					if (!conn_addr_legal) {
+						continue;
+					}
+					break;
+#endif
+				default:
+					continue;
+				}
+				memset((void *)&xladdr, 0, sizeof(struct xsctp_laddr));
+				memcpy((void *)&xladdr.address, (const void *)&sctp_ifa->address, sizeof(union sctp_sockstore));
+				SCTP_INP_RUNLOCK(inp);
+				SCTP_INP_INFO_RUNLOCK();
+				error = SYSCTL_OUT(req, &xladdr, sizeof(struct xsctp_laddr));
+				if (error) {
+					return (error);
+				} else {
+					SCTP_INP_INFO_RLOCK();
+					SCTP_INP_RLOCK(inp);
+				}
+			}
+		}
+	} else {
+		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+			/* ignore if blacklisted at association level */
+			if (stcb && sctp_is_addr_restricted(stcb, laddr->ifa))
+				continue;
+			memset((void *)&xladdr, 0, sizeof(struct xsctp_laddr));
+			memcpy((void *)&xladdr.address, (const void *)&laddr->ifa->address, sizeof(union sctp_sockstore));
+			xladdr.start_time.tv_sec = (uint32_t)laddr->start_time.tv_sec;
+			xladdr.start_time.tv_usec = (uint32_t)laddr->start_time.tv_usec;
+			SCTP_INP_RUNLOCK(inp);
+			SCTP_INP_INFO_RUNLOCK();
+			error = SYSCTL_OUT(req, &xladdr, sizeof(struct xsctp_laddr));
+			if (error) {
+				return (error);
+			} else {
+				SCTP_INP_INFO_RLOCK();
+				SCTP_INP_RLOCK(inp);
+			}
+		}
+	}
+	memset((void *)&xladdr, 0, sizeof(struct xsctp_laddr));
+	xladdr.last = 1;
+	SCTP_INP_RUNLOCK(inp);
+	SCTP_INP_INFO_RUNLOCK();
+	error = SYSCTL_OUT(req, &xladdr, sizeof(struct xsctp_laddr));
+
+	if (error) {
+		return (error);
+	} else {
+		SCTP_INP_INFO_RLOCK();
+		SCTP_INP_RLOCK(inp);
+		return (0);
+	}
+}
+
+/*
+ * sysctl functions
+ */
+#if defined(__APPLE__)
+static int
+sctp_sysctl_handle_assoclist SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+#else
+static int
+sctp_sysctl_handle_assoclist(SYSCTL_HANDLER_ARGS)
+{
+#endif
+	unsigned int number_of_endpoints;
+	unsigned int number_of_local_addresses;
+	unsigned int number_of_associations;
+	unsigned int number_of_remote_addresses;
+	unsigned int n;
+	int error;
+	struct sctp_inpcb *inp;
+	struct sctp_tcb *stcb;
+	struct sctp_nets *net;
+	struct xsctp_inpcb xinpcb;
+	struct xsctp_tcb xstcb;
+	struct xsctp_raddr xraddr;
+	struct socket *so;
+
+	number_of_endpoints = 0;
+	number_of_local_addresses = 0;
+	number_of_associations = 0;
+	number_of_remote_addresses = 0;
+
+	SCTP_INP_INFO_RLOCK();
+#if defined(__APPLE__)
+	if (req->oldptr == USER_ADDR_NULL) {
+#else
+	if (req->oldptr == NULL) {
+#endif
+		LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) {
+			SCTP_INP_RLOCK(inp);
+			number_of_endpoints++;
+			number_of_local_addresses += sctp_sysctl_number_of_addresses(inp);
+			LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+				number_of_associations++;
+				number_of_local_addresses += sctp_sysctl_number_of_addresses(inp);
+				TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+					number_of_remote_addresses++;
+				}
+			}
+			SCTP_INP_RUNLOCK(inp);
+		}
+		SCTP_INP_INFO_RUNLOCK();
+		n = (number_of_endpoints + 1) * sizeof(struct xsctp_inpcb) +
+		    (number_of_local_addresses + number_of_endpoints + number_of_associations) * sizeof(struct xsctp_laddr) +
+		    (number_of_associations + number_of_endpoints) * sizeof(struct xsctp_tcb) +
+		    (number_of_remote_addresses + number_of_associations) * sizeof(struct xsctp_raddr);
+
+		/* request some more memory than needed */
+#if !defined(__Windows__)
+		req->oldidx = (n + n / 8);
+#else
+		req->dataidx = (n + n / 8);
+#endif
+		return (0);
+	}
+#if defined(__APPLE__)
+	if (req->newptr != USER_ADDR_NULL) {
+#else
+	if (req->newptr != NULL) {
+#endif
+		SCTP_INP_INFO_RUNLOCK();
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_SYSCTL, EPERM);
+		return (EPERM);
+	}
+	LIST_FOREACH(inp, &SCTP_BASE_INFO(listhead), sctp_list) {
+		SCTP_INP_RLOCK(inp);
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
+			/* if its allgone it is being freed - skip it  */
+			goto skip;
+		}
+		xinpcb.last = 0;
+		xinpcb.local_port = ntohs(inp->sctp_lport);
+		xinpcb.flags = inp->sctp_flags;
+#if defined(__FreeBSD__) && __FreeBSD_version < 1000048
+		xinpcb.features = (uint32_t)inp->sctp_features;
+#else
+		xinpcb.features = inp->sctp_features;
+#endif
+		xinpcb.total_sends = inp->total_sends;
+		xinpcb.total_recvs = inp->total_recvs;
+		xinpcb.total_nospaces = inp->total_nospaces;
+		xinpcb.fragmentation_point = inp->sctp_frag_point;
+#if !(defined(__FreeBSD__) && (__FreeBSD_version < 1001517))
+		xinpcb.socket = inp->sctp_socket;
+#endif
+		so = inp->sctp_socket;
+		if ((so == NULL) ||
+		    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+			xinpcb.qlen = 0;
+			xinpcb.maxqlen = 0;
+		} else {
+			xinpcb.qlen = so->so_qlen;
+#if defined(__FreeBSD__) && __FreeBSD_version > 1100096
+			xinpcb.qlen_old = so->so_qlen > USHRT_MAX ?
+			    USHRT_MAX : (uint16_t) so->so_qlen;
+#endif
+			xinpcb.maxqlen = so->so_qlimit;
+#if defined(__FreeBSD__) && __FreeBSD_version > 1100096
+			xinpcb.maxqlen_old = so->so_qlimit > USHRT_MAX ?
+			    USHRT_MAX : (uint16_t) so->so_qlimit;
+#endif
+		}
+		SCTP_INP_INCR_REF(inp);
+		SCTP_INP_RUNLOCK(inp);
+		SCTP_INP_INFO_RUNLOCK();
+		error = SYSCTL_OUT(req, &xinpcb, sizeof(struct xsctp_inpcb));
+		if (error) {
+			SCTP_INP_DECR_REF(inp);
+			return (error);
+		}
+		SCTP_INP_INFO_RLOCK();
+		SCTP_INP_RLOCK(inp);
+		error = sctp_sysctl_copy_out_local_addresses(inp, NULL, req);
+		if (error) {
+			SCTP_INP_DECR_REF(inp);
+			return (error);
+		}
+		LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+			SCTP_TCB_LOCK(stcb);
+			atomic_add_int(&stcb->asoc.refcnt, 1);
+			SCTP_TCB_UNLOCK(stcb);
+			xstcb.last = 0;
+			xstcb.local_port = ntohs(inp->sctp_lport);
+			xstcb.remote_port = ntohs(stcb->rport);
+			if (stcb->asoc.primary_destination != NULL)
+				xstcb.primary_addr = stcb->asoc.primary_destination->ro._l_addr;
+			xstcb.heartbeat_interval = stcb->asoc.heart_beat_delay;
+			xstcb.state = (uint32_t)sctp_map_assoc_state(stcb->asoc.state);
+#if defined(__FreeBSD__)
+#if __FreeBSD_version >= 800000
+			/* 7.0 does not support these */
+			xstcb.assoc_id = sctp_get_associd(stcb);
+			xstcb.peers_rwnd = stcb->asoc.peers_rwnd;
+#endif
+#else
+			xstcb.assoc_id = sctp_get_associd(stcb);
+			xstcb.peers_rwnd = stcb->asoc.peers_rwnd;
+#endif
+			xstcb.in_streams = stcb->asoc.streamincnt;
+			xstcb.out_streams = stcb->asoc.streamoutcnt;
+			xstcb.max_nr_retrans = stcb->asoc.overall_error_count;
+			xstcb.primary_process = 0; /* not really supported yet */
+			xstcb.T1_expireries = stcb->asoc.timoinit + stcb->asoc.timocookie;
+			xstcb.T2_expireries = stcb->asoc.timoshutdown + stcb->asoc.timoshutdownack;
+			xstcb.retransmitted_tsns = stcb->asoc.marked_retrans;
+			xstcb.start_time.tv_sec = (uint32_t)stcb->asoc.start_time.tv_sec;
+			xstcb.start_time.tv_usec = (uint32_t)stcb->asoc.start_time.tv_usec;
+			xstcb.discontinuity_time.tv_sec = (uint32_t)stcb->asoc.discontinuity_time.tv_sec;
+			xstcb.discontinuity_time.tv_usec = (uint32_t)stcb->asoc.discontinuity_time.tv_usec;
+			xstcb.total_sends = stcb->total_sends;
+			xstcb.total_recvs = stcb->total_recvs;
+			xstcb.local_tag = stcb->asoc.my_vtag;
+			xstcb.remote_tag = stcb->asoc.peer_vtag;
+			xstcb.initial_tsn = stcb->asoc.init_seq_number;
+			xstcb.highest_tsn = stcb->asoc.sending_seq - 1;
+			xstcb.cumulative_tsn = stcb->asoc.last_acked_seq;
+			xstcb.cumulative_tsn_ack = stcb->asoc.cumulative_tsn;
+			xstcb.mtu = stcb->asoc.smallest_mtu;
+			xstcb.refcnt = stcb->asoc.refcnt;
+			SCTP_INP_RUNLOCK(inp);
+			SCTP_INP_INFO_RUNLOCK();
+			error = SYSCTL_OUT(req, &xstcb, sizeof(struct xsctp_tcb));
+			if (error) {
+				SCTP_INP_DECR_REF(inp);
+				atomic_subtract_int(&stcb->asoc.refcnt, 1);
+				return (error);
+			}
+			SCTP_INP_INFO_RLOCK();
+			SCTP_INP_RLOCK(inp);
+			error = sctp_sysctl_copy_out_local_addresses(inp, stcb, req);
+			if (error) {
+				SCTP_INP_DECR_REF(inp);
+				atomic_subtract_int(&stcb->asoc.refcnt, 1);
+				return (error);
+			}
+			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+				xraddr.last = 0;
+				xraddr.address = net->ro._l_addr;
+				xraddr.active = ((net->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE);
+				xraddr.confirmed = ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0);
+				xraddr.heartbeat_enabled = ((net->dest_state & SCTP_ADDR_NOHB) == 0);
+				xraddr.potentially_failed = ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF);
+				xraddr.rto = net->RTO;
+				xraddr.max_path_rtx = net->failure_threshold;
+				xraddr.rtx = net->marked_retrans;
+				xraddr.error_counter = net->error_count;
+				xraddr.cwnd = net->cwnd;
+				xraddr.flight_size = net->flight_size;
+				xraddr.mtu = net->mtu;
+#if defined(__FreeBSD__)
+#if __FreeBSD_version >= 800000
+				xraddr.rtt = net->rtt / 1000;
+				xraddr.heartbeat_interval = net->heart_beat_delay;
+				xraddr.ssthresh = net->ssthresh;
+#endif
+#else
+				xraddr.rtt = net->rtt / 1000;
+				xraddr.heartbeat_interval = net->heart_beat_delay;
+				xraddr.ssthresh = net->ssthresh;
+#endif
+				xraddr.start_time.tv_sec = (uint32_t)net->start_time.tv_sec;
+				xraddr.start_time.tv_usec = (uint32_t)net->start_time.tv_usec;
+				SCTP_INP_RUNLOCK(inp);
+				SCTP_INP_INFO_RUNLOCK();
+				error = SYSCTL_OUT(req, &xraddr, sizeof(struct xsctp_raddr));
+				if (error) {
+					SCTP_INP_DECR_REF(inp);
+					atomic_subtract_int(&stcb->asoc.refcnt, 1);
+					return (error);
+				}
+				SCTP_INP_INFO_RLOCK();
+				SCTP_INP_RLOCK(inp);
+			}
+			atomic_subtract_int(&stcb->asoc.refcnt, 1);
+			memset((void *)&xraddr, 0, sizeof(struct xsctp_raddr));
+			xraddr.last = 1;
+			SCTP_INP_RUNLOCK(inp);
+			SCTP_INP_INFO_RUNLOCK();
+			error = SYSCTL_OUT(req, &xraddr, sizeof(struct xsctp_raddr));
+			if (error) {
+				SCTP_INP_DECR_REF(inp);
+				return (error);
+			}
+			SCTP_INP_INFO_RLOCK();
+			SCTP_INP_RLOCK(inp);
+		}
+		SCTP_INP_DECR_REF(inp);
+		SCTP_INP_RUNLOCK(inp);
+		SCTP_INP_INFO_RUNLOCK();
+		memset((void *)&xstcb, 0, sizeof(struct xsctp_tcb));
+		xstcb.last = 1;
+		error = SYSCTL_OUT(req, &xstcb, sizeof(struct xsctp_tcb));
+		if (error) {
+			return (error);
+		}
+skip:
+		SCTP_INP_INFO_RLOCK();
+	}
+	SCTP_INP_INFO_RUNLOCK();
+
+	memset((void *)&xinpcb, 0, sizeof(struct xsctp_inpcb));
+	xinpcb.last = 1;
+	error = SYSCTL_OUT(req, &xinpcb, sizeof(struct xsctp_inpcb));
+	return (error);
+}
+
+#if defined(__APPLE__)
+static int
+sctp_sysctl_handle_udp_tunneling SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+#else
+static int
+sctp_sysctl_handle_udp_tunneling(SYSCTL_HANDLER_ARGS)
+{
+#endif
+	int error;
+	uint32_t old, new;
+
+	SCTP_INP_INFO_RLOCK();
+	old = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
+	SCTP_INP_INFO_RUNLOCK();
+	new = old;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800056 && __FreeBSD_version < 1000100
+#ifdef VIMAGE
+	error = vnet_sysctl_handle_int(oidp, &new, 0, req);
+#else
+	error = sysctl_handle_int(oidp, &new, 0, req);
+#endif
+#else
+	error = sysctl_handle_int(oidp, &new, 0, req);
+#endif
+	if ((error == 0) &&
+#if defined (__APPLE__)
+	    (req->newptr != USER_ADDR_NULL)) {
+#else
+	    (req->newptr != NULL)) {
+#endif
+#if defined(__Windows__)
+		SCTP_INP_INFO_WLOCK();
+		sctp_over_udp_restart();
+		SCTP_INP_INFO_WUNLOCK();
+#else
+#if (SCTPCTL_UDP_TUNNELING_PORT_MIN == 0)
+		if (new > SCTPCTL_UDP_TUNNELING_PORT_MAX) {
+#else
+		if ((new < SCTPCTL_UDP_TUNNELING_PORT_MIN) ||
+		    (new > SCTPCTL_UDP_TUNNELING_PORT_MAX)) {
+#endif
+			error = EINVAL;
+		} else {
+			SCTP_INP_INFO_WLOCK();
+			SCTP_BASE_SYSCTL(sctp_udp_tunneling_port) = new;
+			if (old != 0) {
+				sctp_over_udp_stop();
+			}
+			if (new != 0) {
+				error = sctp_over_udp_start();
+			}
+			SCTP_INP_INFO_WUNLOCK();
+		}
+#endif
+	}
+	return (error);
+}
+
+#if defined(__APPLE__)
+int sctp_is_vmware_interface(struct ifnet *);
+
+static int
+sctp_sysctl_handle_vmware_interfaces SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+	int error;
+	uint32_t old, new;
+
+	old = SCTP_BASE_SYSCTL(sctp_ignore_vmware_interfaces);
+	new = old;
+	error = sysctl_handle_int(oidp, &new, 0, req);
+	if ((error == 0) && (req->newptr != USER_ADDR_NULL)) {
+		if ((new < SCTPCTL_IGNORE_VMWARE_INTERFACES_MIN) ||
+		    (new > SCTPCTL_IGNORE_VMWARE_INTERFACES_MAX)) {
+			error = EINVAL;
+		} else {
+			if ((old == 1) && (new == 0)) {
+				sctp_add_or_del_interfaces(sctp_is_vmware_interface, 1);
+			}
+			if ((old == 0) && (new == 1)) {
+				sctp_add_or_del_interfaces(sctp_is_vmware_interface, 0);
+			}
+			if (old != new) {
+				SCTP_BASE_SYSCTL(sctp_ignore_vmware_interfaces) = new;
+			}
+		}
+	}
+	return (error);
+}
+#endif
+
+#if defined(__APPLE__)
+static int
+sctp_sysctl_handle_auth SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+#else
+static int
+sctp_sysctl_handle_auth(SYSCTL_HANDLER_ARGS)
+{
+#endif
+	int error;
+	uint32_t new;
+
+	new = SCTP_BASE_SYSCTL(sctp_auth_enable);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800056 && __FreeBSD_version < 1000100
+#ifdef VIMAGE
+	error = vnet_sysctl_handle_int(oidp, &new, 0, req);
+#else
+	error = sysctl_handle_int(oidp, &new, 0, req);
+#endif
+#else
+	error = sysctl_handle_int(oidp, &new, 0, req);
+#endif
+	if ((error == 0) &&
+#if defined (__APPLE__)
+	    (req->newptr != USER_ADDR_NULL)) {
+#else
+	    (req->newptr != NULL)) {
+#endif
+#if (SCTPCTL_AUTH_ENABLE_MIN == 0)
+		if ((new > SCTPCTL_AUTH_ENABLE_MAX) ||
+		    ((new == 0) && (SCTP_BASE_SYSCTL(sctp_asconf_enable) == 1))) {
+#else
+		if ((new < SCTPCTL_AUTH_ENABLE_MIN) ||
+		    (new > SCTPCTL_AUTH_ENABLE_MAX) ||
+		    ((new == 0) && (SCTP_BASE_SYSCTL(sctp_asconf_enable) == 1))) {
+#endif
+			error = EINVAL;
+		} else {
+			SCTP_BASE_SYSCTL(sctp_auth_enable) = new;
+		}
+	}
+	return (error);
+}
+
+#if defined(__APPLE__)
+static int
+sctp_sysctl_handle_asconf SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+#else
+static int
+sctp_sysctl_handle_asconf(SYSCTL_HANDLER_ARGS)
+{
+#endif
+	int error;
+	uint32_t new;
+
+	new = SCTP_BASE_SYSCTL(sctp_asconf_enable);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800056 && __FreeBSD_version < 1000100
+#ifdef VIMAGE
+	error = vnet_sysctl_handle_int(oidp, &new, 0, req);
+#else
+	error = sysctl_handle_int(oidp, &new, 0, req);
+#endif
+#else
+	error = sysctl_handle_int(oidp, &new, 0, req);
+#endif
+	if ((error == 0) &&
+#if defined (__APPLE__)
+	    (req->newptr != USER_ADDR_NULL)) {
+#else
+	    (req->newptr != NULL)) {
+#endif
+#if (SCTPCTL_ASCONF_ENABLE_MIN == 0)
+		if ((new > SCTPCTL_ASCONF_ENABLE_MAX) ||
+		    ((new == 1) && (SCTP_BASE_SYSCTL(sctp_auth_enable) == 0))) {
+#else
+		if ((new < SCTPCTL_ASCONF_ENABLE_MIN) ||
+		    (new > SCTPCTL_ASCONF_ENABLE_MAX) ||
+		    ((new == 1) && (SCTP_BASE_SYSCTL(sctp_auth_enable) == 0))) {
+#endif
+			error = EINVAL;
+		} else {
+			SCTP_BASE_SYSCTL(sctp_asconf_enable) = new;
+		}
+	}
+	return (error);
+}
+
+#if defined(__APPLE__)
+static int
+sctp_sysctl_handle_stats SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+#else
+static int
+sctp_sysctl_handle_stats(SYSCTL_HANDLER_ARGS)
+{
+#endif
+	int error;
+#if defined(__FreeBSD__)
+#if defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+	struct sctpstat *sarry;
+	struct sctpstat sb;
+	int cpu;
+#endif
+	struct sctpstat sb_temp;
+#endif
+
+#if defined (__APPLE__)
+	if ((req->newptr != USER_ADDR_NULL) &&
+#else
+	if ((req->newptr != NULL) &&
+#endif
+	    (req->newlen != sizeof(struct sctpstat))) {
+		return (EINVAL);
+	}
+#if defined(__FreeBSD__)
+	memset(&sb_temp, 0, sizeof(struct sctpstat));
+
+	if (req->newptr != NULL) {
+		error = SYSCTL_IN(req, &sb_temp, sizeof(struct sctpstat));
+		if (error != 0) {
+			return (error);
+		}
+	}
+#if defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+	memset(&sb, 0, sizeof(sb));
+	for (cpu = 0; cpu < mp_maxid; cpu++) {
+		sarry = &SCTP_BASE_STATS[cpu];
+		if (sarry->sctps_discontinuitytime.tv_sec > sb.sctps_discontinuitytime.tv_sec) {
+			sb.sctps_discontinuitytime.tv_sec = sarry->sctps_discontinuitytime.tv_sec;
+			sb.sctps_discontinuitytime.tv_usec = sarry->sctps_discontinuitytime.tv_usec;
+		}
+		sb.sctps_currestab += sarry->sctps_currestab;
+		sb.sctps_activeestab += sarry->sctps_activeestab;
+		sb.sctps_restartestab += sarry->sctps_restartestab;
+		sb.sctps_collisionestab += sarry->sctps_collisionestab;
+		sb.sctps_passiveestab += sarry->sctps_passiveestab;
+		sb.sctps_aborted += sarry->sctps_aborted;
+		sb.sctps_shutdown += sarry->sctps_shutdown;
+		sb.sctps_outoftheblue += sarry->sctps_outoftheblue;
+		sb.sctps_checksumerrors += sarry->sctps_checksumerrors;
+		sb.sctps_outcontrolchunks += sarry->sctps_outcontrolchunks;
+		sb.sctps_outorderchunks += sarry->sctps_outorderchunks;
+		sb.sctps_outunorderchunks += sarry->sctps_outunorderchunks;
+		sb.sctps_incontrolchunks += sarry->sctps_incontrolchunks;
+		sb.sctps_inorderchunks += sarry->sctps_inorderchunks;
+		sb.sctps_inunorderchunks += sarry->sctps_inunorderchunks;
+		sb.sctps_fragusrmsgs += sarry->sctps_fragusrmsgs;
+		sb.sctps_reasmusrmsgs += sarry->sctps_reasmusrmsgs;
+		sb.sctps_outpackets += sarry->sctps_outpackets;
+		sb.sctps_inpackets += sarry->sctps_inpackets;
+		sb.sctps_recvpackets += sarry->sctps_recvpackets;
+		sb.sctps_recvdatagrams += sarry->sctps_recvdatagrams;
+		sb.sctps_recvpktwithdata += sarry->sctps_recvpktwithdata;
+		sb.sctps_recvsacks += sarry->sctps_recvsacks;
+		sb.sctps_recvdata += sarry->sctps_recvdata;
+		sb.sctps_recvdupdata += sarry->sctps_recvdupdata;
+		sb.sctps_recvheartbeat += sarry->sctps_recvheartbeat;
+		sb.sctps_recvheartbeatack += sarry->sctps_recvheartbeatack;
+		sb.sctps_recvecne += sarry->sctps_recvecne;
+		sb.sctps_recvauth += sarry->sctps_recvauth;
+		sb.sctps_recvauthmissing += sarry->sctps_recvauthmissing;
+		sb.sctps_recvivalhmacid += sarry->sctps_recvivalhmacid;
+		sb.sctps_recvivalkeyid += sarry->sctps_recvivalkeyid;
+		sb.sctps_recvauthfailed += sarry->sctps_recvauthfailed;
+		sb.sctps_recvexpress += sarry->sctps_recvexpress;
+		sb.sctps_recvexpressm += sarry->sctps_recvexpressm;
+		sb.sctps_recvnocrc += sarry->sctps_recvnocrc;
+		sb.sctps_recvswcrc += sarry->sctps_recvswcrc;
+		sb.sctps_recvhwcrc += sarry->sctps_recvhwcrc;
+		sb.sctps_sendpackets += sarry->sctps_sendpackets;
+		sb.sctps_sendsacks += sarry->sctps_sendsacks;
+		sb.sctps_senddata += sarry->sctps_senddata;
+		sb.sctps_sendretransdata += sarry->sctps_sendretransdata;
+		sb.sctps_sendfastretrans += sarry->sctps_sendfastretrans;
+		sb.sctps_sendmultfastretrans += sarry->sctps_sendmultfastretrans;
+		sb.sctps_sendheartbeat += sarry->sctps_sendheartbeat;
+		sb.sctps_sendecne += sarry->sctps_sendecne;
+		sb.sctps_sendauth += sarry->sctps_sendauth;
+		sb.sctps_senderrors += sarry->sctps_senderrors;
+		sb.sctps_sendnocrc += sarry->sctps_sendnocrc;
+		sb.sctps_sendswcrc += sarry->sctps_sendswcrc;
+		sb.sctps_sendhwcrc += sarry->sctps_sendhwcrc;
+		sb.sctps_pdrpfmbox += sarry->sctps_pdrpfmbox;
+		sb.sctps_pdrpfehos += sarry->sctps_pdrpfehos;
+		sb.sctps_pdrpmbda += sarry->sctps_pdrpmbda;
+		sb.sctps_pdrpmbct += sarry->sctps_pdrpmbct;
+		sb.sctps_pdrpbwrpt += sarry->sctps_pdrpbwrpt;
+		sb.sctps_pdrpcrupt += sarry->sctps_pdrpcrupt;
+		sb.sctps_pdrpnedat += sarry->sctps_pdrpnedat;
+		sb.sctps_pdrppdbrk += sarry->sctps_pdrppdbrk;
+		sb.sctps_pdrptsnnf += sarry->sctps_pdrptsnnf;
+		sb.sctps_pdrpdnfnd += sarry->sctps_pdrpdnfnd;
+		sb.sctps_pdrpdiwnp += sarry->sctps_pdrpdiwnp;
+		sb.sctps_pdrpdizrw += sarry->sctps_pdrpdizrw;
+		sb.sctps_pdrpbadd += sarry->sctps_pdrpbadd;
+		sb.sctps_pdrpmark += sarry->sctps_pdrpmark;
+		sb.sctps_timoiterator += sarry->sctps_timoiterator;
+		sb.sctps_timodata += sarry->sctps_timodata;
+		sb.sctps_timowindowprobe += sarry->sctps_timowindowprobe;
+		sb.sctps_timoinit += sarry->sctps_timoinit;
+		sb.sctps_timosack += sarry->sctps_timosack;
+		sb.sctps_timoshutdown += sarry->sctps_timoshutdown;
+		sb.sctps_timoheartbeat += sarry->sctps_timoheartbeat;
+		sb.sctps_timocookie += sarry->sctps_timocookie;
+		sb.sctps_timosecret += sarry->sctps_timosecret;
+		sb.sctps_timopathmtu += sarry->sctps_timopathmtu;
+		sb.sctps_timoshutdownack += sarry->sctps_timoshutdownack;
+		sb.sctps_timoshutdownguard += sarry->sctps_timoshutdownguard;
+		sb.sctps_timostrmrst += sarry->sctps_timostrmrst;
+		sb.sctps_timoearlyfr += sarry->sctps_timoearlyfr;
+		sb.sctps_timoasconf += sarry->sctps_timoasconf;
+		sb.sctps_timodelprim += sarry->sctps_timodelprim;
+		sb.sctps_timoautoclose += sarry->sctps_timoautoclose;
+		sb.sctps_timoassockill += sarry->sctps_timoassockill;
+		sb.sctps_timoinpkill += sarry->sctps_timoinpkill;
+		sb.sctps_hdrops += sarry->sctps_hdrops;
+		sb.sctps_badsum += sarry->sctps_badsum;
+		sb.sctps_noport += sarry->sctps_noport;
+		sb.sctps_badvtag += sarry->sctps_badvtag;
+		sb.sctps_badsid += sarry->sctps_badsid;
+		sb.sctps_nomem += sarry->sctps_nomem;
+		sb.sctps_fastretransinrtt += sarry->sctps_fastretransinrtt;
+		sb.sctps_markedretrans += sarry->sctps_markedretrans;
+		sb.sctps_naglesent += sarry->sctps_naglesent;
+		sb.sctps_naglequeued += sarry->sctps_naglequeued;
+		sb.sctps_maxburstqueued += sarry->sctps_maxburstqueued;
+		sb.sctps_ifnomemqueued += sarry->sctps_ifnomemqueued;
+		sb.sctps_windowprobed += sarry->sctps_windowprobed;
+		sb.sctps_lowlevelerr += sarry->sctps_lowlevelerr;
+		sb.sctps_lowlevelerrusr += sarry->sctps_lowlevelerrusr;
+		sb.sctps_datadropchklmt += sarry->sctps_datadropchklmt;
+		sb.sctps_datadroprwnd += sarry->sctps_datadroprwnd;
+		sb.sctps_ecnereducedcwnd += sarry->sctps_ecnereducedcwnd;
+		sb.sctps_vtagexpress += sarry->sctps_vtagexpress;
+		sb.sctps_vtagbogus += sarry->sctps_vtagbogus;
+		sb.sctps_primary_randry += sarry->sctps_primary_randry;
+		sb.sctps_cmt_randry += sarry->sctps_cmt_randry;
+		sb.sctps_slowpath_sack += sarry->sctps_slowpath_sack;
+		sb.sctps_wu_sacks_sent += sarry->sctps_wu_sacks_sent;
+		sb.sctps_sends_with_flags += sarry->sctps_sends_with_flags;
+		sb.sctps_sends_with_unord += sarry->sctps_sends_with_unord;
+		sb.sctps_sends_with_eof += sarry->sctps_sends_with_eof;
+		sb.sctps_sends_with_abort += sarry->sctps_sends_with_abort;
+		sb.sctps_protocol_drain_calls += sarry->sctps_protocol_drain_calls;
+		sb.sctps_protocol_drains_done += sarry->sctps_protocol_drains_done;
+		sb.sctps_read_peeks += sarry->sctps_read_peeks;
+		sb.sctps_cached_chk += sarry->sctps_cached_chk;
+		sb.sctps_cached_strmoq += sarry->sctps_cached_strmoq;
+		sb.sctps_left_abandon += sarry->sctps_left_abandon;
+		sb.sctps_send_burst_avoid += sarry->sctps_send_burst_avoid;
+		sb.sctps_send_cwnd_avoid += sarry->sctps_send_cwnd_avoid;
+		sb.sctps_fwdtsn_map_over += sarry->sctps_fwdtsn_map_over;
+		if (req->newptr != NULL) {
+			memcpy(sarry, &sb_temp, sizeof(struct sctpstat));
+		}
+	}
+	error = SYSCTL_OUT(req, &sb, sizeof(struct sctpstat));
+#else
+	error = SYSCTL_OUT(req, &SCTP_BASE_STATS, sizeof(struct sctpstat));
+	if (error != 0) {
+		return (error);
+	}
+	if (req->newptr != NULL) {
+		memcpy(&SCTP_BASE_STATS, &sb_temp, sizeof(struct sctpstat));
+	}
+#endif
+#else
+	error = SYSCTL_OUT(req, &SCTP_BASE_STATS, sizeof(struct sctpstat));
+#endif
+	return (error);
+}
+
+#if defined(SCTP_LOCAL_TRACE_BUF)
+#if defined(__APPLE__)
+static int
+sctp_sysctl_handle_trace_log SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+#else
+static int
+sctp_sysctl_handle_trace_log(SYSCTL_HANDLER_ARGS)
+{
+#endif
+	int error;
+
+#if defined(__Windows__)
+	error = SYSCTL_OUT(req, SCTP_BASE_SYSCTL(sctp_log), sizeof(struct sctp_log));
+#else
+	error = SYSCTL_OUT(req, &SCTP_BASE_SYSCTL(sctp_log), sizeof(struct sctp_log));
+#endif
+	return (error);
+}
+
+#if defined(__APPLE__)
+static int
+sctp_sysctl_handle_trace_log_clear SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, req, oidp)
+#else
+static int
+sctp_sysctl_handle_trace_log_clear(SYSCTL_HANDLER_ARGS)
+{
+#endif
+	int error = 0;
+#if defined(__Windows__)
+	int value = 0;
+
+	if (req->new_data == NULL) {
+		return (error);
+	}
+	error = SYSCTL_IN(req, &value, sizeof(int));
+	if (error == 0 && value != 0 && SCTP_BASE_SYSCTL(sctp_log) != NULL) {
+		memset(SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log));
+	}
+#else
+
+	memset(&SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log));
+#endif
+	return (error);
+}
+#endif
+
+#if defined(__APPLE__) || defined(__FreeBSD__)
+#if defined(__FreeBSD__)
+#if __FreeBSD_version >= 800056 && __FreeBSD_version < 1000100
+#ifdef VIMAGE
+#define SCTP_UINT_SYSCTL(name, var_name, prefix)			\
+	static int							\
+	sctp_sysctl_handle_##mib_name(SYSCTL_HANDLER_ARGS)		\
+	{								\
+		int error;						\
+		uint32_t new;						\
+									\
+		new = SCTP_BASE_SYSCTL(var_name);			\
+		error = vnet_sysctl_handle_int(oidp, &new, 0, req);	\
+		if ((error == 0) && (req->newptr != NULL)) {		\
+			if ((new < prefix##_MIN) ||			\
+			    (new > prefix##_MAX)) {			\
+				error = EINVAL;				\
+			} else {					\
+				SCTP_BASE_SYSCTL(var_name) = new;	\
+			}						\
+		}							\
+		return (error);						\
+	}								\
+	SYSCTL_PROC(_net_inet_sctp, OID_AUTO, mib_name,			\
+	                 CTLTYPE_UINT|CTLFLAG_RW, NULL, 0,		\
+	                 sctp_sysctl_handle_##mib_name, "UI", prefix##_DESC);
+#else
+#define SCTP_UINT_SYSCTL(mib_name, var_name, prefix)			\
+	static int							\
+	sctp_sysctl_handle_##mib_name(SYSCTL_HANDLER_ARGS)		\
+	{								\
+		int error;						\
+		uint32_t new;						\
+									\
+		new = SCTP_BASE_SYSCTL(var_name);			\
+		error = sysctl_handle_int(oidp, &new, 0, req);		\
+		if ((error == 0) && (req->newptr != NULL)) {		\
+			if ((new < prefix##_MIN) ||			\
+			    (new > prefix##_MAX)) {			\
+				error = EINVAL;				\
+			} else {					\
+				SCTP_BASE_SYSCTL(var_name) = new;	\
+			}						\
+		}							\
+		return (error);						\
+	}								\
+	SYSCTL_PROC(_net_inet_sctp, OID_AUTO, mib_name,			\
+	                 CTLFLAG_VNET|CTLTYPE_UINT|CTLFLAG_RW, NULL, 0,	\
+	                 sctp_sysctl_handle_##mib_name, "UI", prefix##_DESC);
+#endif
+#else
+#define SCTP_UINT_SYSCTL(mib_name, var_name, prefix)			\
+	static int							\
+	sctp_sysctl_handle_##mib_name(SYSCTL_HANDLER_ARGS)		\
+	{								\
+		int error;						\
+		uint32_t new;						\
+									\
+		new = SCTP_BASE_SYSCTL(var_name);			\
+		error = sysctl_handle_int(oidp, &new, 0, req);		\
+		if ((error == 0) && (req->newptr != NULL)) {		\
+			if ((new < prefix##_MIN) ||			\
+			    (new > prefix##_MAX)) {			\
+				error = EINVAL;				\
+			} else {					\
+				SCTP_BASE_SYSCTL(var_name) = new;	\
+			}						\
+		}							\
+		return (error);						\
+	}								\
+	SYSCTL_PROC(_net_inet_sctp, OID_AUTO, mib_name,			\
+	                 CTLFLAG_VNET|CTLTYPE_UINT|CTLFLAG_RW, NULL, 0,	\
+	                 sctp_sysctl_handle_##mib_name, "UI", prefix##_DESC);
+#endif
+#else
+#define SCTP_UINT_SYSCTL(mib_name, var_name, prefix)			\
+	static int							\
+	sctp_sysctl_handle_##mib_name(struct sysctl_oid *oidp,		\
+	                          void *arg1 __attribute__((unused)),	\
+	                          int arg2 __attribute__((unused)),	\
+	                          struct sysctl_req *req)		\
+	{								\
+		int error;						\
+		uint32_t new;						\
+									\
+		new = SCTP_BASE_SYSCTL(var_name);			\
+		error = sysctl_handle_int(oidp, &new, 0, req);		\
+		if ((error == 0) && (req->newptr != USER_ADDR_NULL)) {	\
+			if ((new < prefix##_MIN) ||			\
+			    (new > prefix##_MAX)) {			\
+				error = EINVAL;				\
+			} else {					\
+				SCTP_BASE_SYSCTL(var_name) = new;	\
+			}						\
+		}							\
+		return (error);						\
+	}								\
+	SYSCTL_PROC(_net_inet_sctp, OID_AUTO, mib_name,			\
+	            CTLTYPE_INT | CTLFLAG_RW, NULL, 0,			\
+	            sctp_sysctl_handle_##mib_name, "I", prefix##_DESC);
+#define CTLTYPE_UINT CTLTYPE_INT
+#define CTLFLAG_VNET 0
+#endif
+
+/*
+ * sysctl definitions
+ */
+
+SCTP_UINT_SYSCTL(sendspace, sctp_sendspace, SCTPCTL_MAXDGRAM)
+SCTP_UINT_SYSCTL(recvspace, sctp_recvspace, SCTPCTL_RECVSPACE)
+SCTP_UINT_SYSCTL(auto_asconf, sctp_auto_asconf, SCTPCTL_AUTOASCONF)
+SCTP_UINT_SYSCTL(ecn_enable, sctp_ecn_enable, SCTPCTL_ECN_ENABLE)
+SCTP_UINT_SYSCTL(pr_enable, sctp_pr_enable, SCTPCTL_PR_ENABLE)
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, auth_enable, CTLFLAG_VNET|CTLTYPE_UINT|CTLFLAG_RW,
+            NULL, 0, sctp_sysctl_handle_auth, "IU", SCTPCTL_AUTH_ENABLE_DESC);
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, asconf_enable, CTLFLAG_VNET|CTLTYPE_UINT|CTLFLAG_RW,
+            NULL, 0, sctp_sysctl_handle_asconf, "IU", SCTPCTL_ASCONF_ENABLE_DESC);
+SCTP_UINT_SYSCTL(reconfig_enable, sctp_reconfig_enable, SCTPCTL_RECONFIG_ENABLE)
+SCTP_UINT_SYSCTL(nrsack_enable, sctp_nrsack_enable, SCTPCTL_NRSACK_ENABLE)
+SCTP_UINT_SYSCTL(pktdrop_enable, sctp_pktdrop_enable, SCTPCTL_PKTDROP_ENABLE)
+#if defined(__APPLE__)
+#if !defined(SCTP_WITH_NO_CSUM)
+SCTP_UINT_SYSCTL(loopback_nocsum, sctp_no_csum_on_loopback, SCTPCTL_LOOPBACK_NOCSUM)
+#endif
+#endif
+SCTP_UINT_SYSCTL(peer_chkoh, sctp_peer_chunk_oh, SCTPCTL_PEER_CHKOH)
+SCTP_UINT_SYSCTL(maxburst, sctp_max_burst_default, SCTPCTL_MAXBURST)
+SCTP_UINT_SYSCTL(fr_maxburst, sctp_fr_max_burst_default, SCTPCTL_FRMAXBURST)
+SCTP_UINT_SYSCTL(maxchunks, sctp_max_chunks_on_queue, SCTPCTL_MAXCHUNKS)
+SCTP_UINT_SYSCTL(tcbhashsize, sctp_hashtblsize, SCTPCTL_TCBHASHSIZE)
+SCTP_UINT_SYSCTL(pcbhashsize, sctp_pcbtblsize, SCTPCTL_PCBHASHSIZE)
+SCTP_UINT_SYSCTL(min_split_point, sctp_min_split_point, SCTPCTL_MIN_SPLIT_POINT)
+SCTP_UINT_SYSCTL(chunkscale, sctp_chunkscale, SCTPCTL_CHUNKSCALE)
+SCTP_UINT_SYSCTL(delayed_sack_time, sctp_delayed_sack_time_default, SCTPCTL_DELAYED_SACK_TIME)
+SCTP_UINT_SYSCTL(sack_freq, sctp_sack_freq_default, SCTPCTL_SACK_FREQ)
+SCTP_UINT_SYSCTL(sys_resource, sctp_system_free_resc_limit, SCTPCTL_SYS_RESOURCE)
+SCTP_UINT_SYSCTL(asoc_resource, sctp_asoc_free_resc_limit, SCTPCTL_ASOC_RESOURCE)
+SCTP_UINT_SYSCTL(heartbeat_interval, sctp_heartbeat_interval_default, SCTPCTL_HEARTBEAT_INTERVAL)
+SCTP_UINT_SYSCTL(pmtu_raise_time, sctp_pmtu_raise_time_default, SCTPCTL_PMTU_RAISE_TIME)
+SCTP_UINT_SYSCTL(shutdown_guard_time, sctp_shutdown_guard_time_default, SCTPCTL_SHUTDOWN_GUARD_TIME)
+SCTP_UINT_SYSCTL(secret_lifetime, sctp_secret_lifetime_default, SCTPCTL_SECRET_LIFETIME)
+SCTP_UINT_SYSCTL(rto_max, sctp_rto_max_default, SCTPCTL_RTO_MAX)
+SCTP_UINT_SYSCTL(rto_min, sctp_rto_min_default, SCTPCTL_RTO_MIN)
+SCTP_UINT_SYSCTL(rto_initial, sctp_rto_initial_default, SCTPCTL_RTO_INITIAL)
+SCTP_UINT_SYSCTL(init_rto_max, sctp_init_rto_max_default, SCTPCTL_INIT_RTO_MAX)
+SCTP_UINT_SYSCTL(valid_cookie_life, sctp_valid_cookie_life_default, SCTPCTL_VALID_COOKIE_LIFE)
+SCTP_UINT_SYSCTL(init_rtx_max, sctp_init_rtx_max_default, SCTPCTL_INIT_RTX_MAX)
+SCTP_UINT_SYSCTL(assoc_rtx_max, sctp_assoc_rtx_max_default, SCTPCTL_ASSOC_RTX_MAX)
+SCTP_UINT_SYSCTL(path_rtx_max, sctp_path_rtx_max_default, SCTPCTL_PATH_RTX_MAX)
+SCTP_UINT_SYSCTL(path_pf_threshold, sctp_path_pf_threshold, SCTPCTL_PATH_PF_THRESHOLD)
+SCTP_UINT_SYSCTL(add_more_on_output, sctp_add_more_threshold, SCTPCTL_ADD_MORE_ON_OUTPUT)
+SCTP_UINT_SYSCTL(incoming_streams, sctp_nr_incoming_streams_default, SCTPCTL_INCOMING_STREAMS)
+SCTP_UINT_SYSCTL(outgoing_streams, sctp_nr_outgoing_streams_default, SCTPCTL_OUTGOING_STREAMS)
+SCTP_UINT_SYSCTL(cmt_on_off, sctp_cmt_on_off, SCTPCTL_CMT_ON_OFF)
+SCTP_UINT_SYSCTL(cmt_use_dac, sctp_cmt_use_dac, SCTPCTL_CMT_USE_DAC)
+SCTP_UINT_SYSCTL(cwnd_maxburst, sctp_use_cwnd_based_maxburst, SCTPCTL_CWND_MAXBURST)
+SCTP_UINT_SYSCTL(nat_friendly, sctp_nat_friendly, SCTPCTL_NAT_FRIENDLY)
+SCTP_UINT_SYSCTL(abc_l_var, sctp_L2_abc_variable, SCTPCTL_ABC_L_VAR)
+SCTP_UINT_SYSCTL(max_chained_mbufs, sctp_mbuf_threshold_count, SCTPCTL_MAX_CHAINED_MBUFS)
+SCTP_UINT_SYSCTL(do_sctp_drain, sctp_do_drain, SCTPCTL_DO_SCTP_DRAIN)
+SCTP_UINT_SYSCTL(hb_max_burst, sctp_hb_maxburst, SCTPCTL_HB_MAX_BURST)
+SCTP_UINT_SYSCTL(abort_at_limit, sctp_abort_if_one_2_one_hits_limit, SCTPCTL_ABORT_AT_LIMIT)
+SCTP_UINT_SYSCTL(min_residual, sctp_min_residual, SCTPCTL_MIN_RESIDUAL)
+SCTP_UINT_SYSCTL(max_retran_chunk, sctp_max_retran_chunk, SCTPCTL_MAX_RETRAN_CHUNK)
+SCTP_UINT_SYSCTL(log_level, sctp_logging_level, SCTPCTL_LOGGING_LEVEL)
+SCTP_UINT_SYSCTL(default_cc_module, sctp_default_cc_module, SCTPCTL_DEFAULT_CC_MODULE)
+SCTP_UINT_SYSCTL(default_ss_module, sctp_default_ss_module, SCTPCTL_DEFAULT_SS_MODULE)
+SCTP_UINT_SYSCTL(default_frag_interleave, sctp_default_frag_interleave, SCTPCTL_DEFAULT_FRAG_INTERLEAVE)
+SCTP_UINT_SYSCTL(mobility_base, sctp_mobility_base, SCTPCTL_MOBILITY_BASE)
+SCTP_UINT_SYSCTL(mobility_fasthandoff, sctp_mobility_fasthandoff, SCTPCTL_MOBILITY_FASTHANDOFF)
+#if defined(SCTP_LOCAL_TRACE_BUF)
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, log, CTLFLAG_VNET|CTLTYPE_STRUCT|CTLFLAG_RD,
+            NULL, 0, sctp_sysctl_handle_trace_log, "S,sctplog", "SCTP logging (struct sctp_log)");
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, clear_trace, CTLFLAG_VNET|CTLTYPE_UINT | CTLFLAG_RW,
+            NULL, 0, sctp_sysctl_handle_trace_log_clear, "IU", "Clear SCTP Logging buffer");
+#endif
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, udp_tunneling_port, CTLFLAG_VNET|CTLTYPE_UINT|CTLFLAG_RW,
+            NULL, 0, sctp_sysctl_handle_udp_tunneling, "IU", SCTPCTL_UDP_TUNNELING_PORT_DESC);
+SCTP_UINT_SYSCTL(enable_sack_immediately, sctp_enable_sack_immediately, SCTPCTL_SACK_IMMEDIATELY_ENABLE)
+SCTP_UINT_SYSCTL(nat_friendly_init, sctp_inits_include_nat_friendly, SCTPCTL_NAT_FRIENDLY_INITS)
+SCTP_UINT_SYSCTL(vtag_time_wait, sctp_vtag_time_wait, SCTPCTL_TIME_WAIT)
+SCTP_UINT_SYSCTL(buffer_splitting, sctp_buffer_splitting, SCTPCTL_BUFFER_SPLITTING)
+SCTP_UINT_SYSCTL(initial_cwnd, sctp_initial_cwnd, SCTPCTL_INITIAL_CWND)
+SCTP_UINT_SYSCTL(rttvar_bw, sctp_rttvar_bw, SCTPCTL_RTTVAR_BW)
+SCTP_UINT_SYSCTL(rttvar_rtt, sctp_rttvar_rtt, SCTPCTL_RTTVAR_RTT)
+SCTP_UINT_SYSCTL(rttvar_eqret, sctp_rttvar_eqret, SCTPCTL_RTTVAR_EQRET)
+SCTP_UINT_SYSCTL(rttvar_steady_step, sctp_steady_step, SCTPCTL_RTTVAR_STEADYS)
+SCTP_UINT_SYSCTL(use_dcccecn, sctp_use_dccc_ecn, SCTPCTL_RTTVAR_DCCCECN)
+SCTP_UINT_SYSCTL(blackhole, sctp_blackhole, SCTPCTL_BLACKHOLE)
+SCTP_UINT_SYSCTL(diag_info_code, sctp_diag_info_code, SCTPCTL_DIAG_INFO_CODE)
+#ifdef SCTP_DEBUG
+SCTP_UINT_SYSCTL(debug, sctp_debug_on, SCTPCTL_DEBUG)
+#endif
+#if defined(__APPLE__)
+SCTP_UINT_SYSCTL(main_timer, sctp_main_timer, SCTPCTL_MAIN_TIMER)
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, ignore_vmware_interfaces, CTLTYPE_UINT|CTLFLAG_RW,
+            NULL, 0, sctp_sysctl_handle_vmware_interfaces, "IU", SCTPCTL_IGNORE_VMWARE_INTERFACES_DESC);
+SCTP_UINT_SYSCTL(addr_watchdog_limit, sctp_addr_watchdog_limit, SCTPCTL_ADDR_WATCHDOG_LIMIT)
+SCTP_UINT_SYSCTL(vtag_watchdog_limit, sctp_vtag_watchdog_limit, SCTPCTL_VTAG_WATCHDOG_LIMIT)
+#endif
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+SCTP_UINT_SYSCTL(output_unlocked, sctp_output_unlocked, SCTPCTL_OUTPUT_UNLOCKED)
+#endif
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, stats, CTLFLAG_VNET|CTLTYPE_STRUCT|CTLFLAG_RW,
+            NULL, 0, sctp_sysctl_handle_stats, "S,sctpstat", "SCTP statistics (struct sctp_stat)");
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, assoclist, CTLFLAG_VNET|CTLTYPE_OPAQUE|CTLFLAG_RD,
+            NULL, 0, sctp_sysctl_handle_assoclist, "S,xassoc", "List of active SCTP associations");
+
+#elif defined(__Windows__)
+
+#define RANGECHK(var, min, max) \
+	if ((var) < (min)) { (var) = (min); } \
+	else if ((var) > (max)) { (var) = (max); }
+
+static int
+sctp_sysctl_handle_int(SYSCTL_HANDLER_ARGS)
+{
+	int error;
+
+	error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
+	if (error == 0) {
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_sendspace), SCTPCTL_MAXDGRAM_MIN, SCTPCTL_MAXDGRAM_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_recvspace), SCTPCTL_RECVSPACE_MIN, SCTPCTL_RECVSPACE_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_auto_asconf), SCTPCTL_AUTOASCONF_MIN, SCTPCTL_AUTOASCONF_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_auto_asconf), SCTPCTL_AUTOASCONF_MIN, SCTPCTL_AUTOASCONF_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_ecn_enable), SCTPCTL_ECN_ENABLE_MIN, SCTPCTL_ECN_ENABLE_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_pr_enable), SCTPCTL_PR_ENABLE_MIN, SCTPCTL_PR_ENABLE_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_reconfig_enable), SCTPCTL_RECONFIG_ENABLE_MIN, SCTPCTL_RECONFIG_ENABLE_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_nrsack_enable), SCTPCTL_NRSACK_ENABLE_MIN, SCTPCTL_NRSACK_ENABLE_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_pktdrop_enable), SCTPCTL_PKTDROP_ENABLE_MIN, SCTPCTL_PKTDROP_ENABLE_MAX);
+#if !defined(SCTP_WITH_NO_CSUM)
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback), SCTPCTL_LOOPBACK_NOCSUM_MIN, SCTPCTL_LOOPBACK_NOCSUM_MAX);
+#endif
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_peer_chunk_oh), SCTPCTL_PEER_CHKOH_MIN, SCTPCTL_PEER_CHKOH_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_max_burst_default), SCTPCTL_MAXBURST_MIN, SCTPCTL_MAXBURST_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_fr_max_burst_default), SCTPCTL_FRMAXBURST_MIN, SCTPCTL_FRMAXBURST_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue), SCTPCTL_MAXCHUNKS_MIN, SCTPCTL_MAXCHUNKS_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_hashtblsize), SCTPCTL_TCBHASHSIZE_MIN, SCTPCTL_TCBHASHSIZE_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_pcbtblsize), SCTPCTL_PCBHASHSIZE_MIN, SCTPCTL_PCBHASHSIZE_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_min_split_point), SCTPCTL_MIN_SPLIT_POINT_MIN, SCTPCTL_MIN_SPLIT_POINT_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_chunkscale), SCTPCTL_CHUNKSCALE_MIN, SCTPCTL_CHUNKSCALE_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default), SCTPCTL_DELAYED_SACK_TIME_MIN, SCTPCTL_DELAYED_SACK_TIME_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_sack_freq_default), SCTPCTL_SACK_FREQ_MIN, SCTPCTL_SACK_FREQ_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_system_free_resc_limit), SCTPCTL_SYS_RESOURCE_MIN, SCTPCTL_SYS_RESOURCE_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit), SCTPCTL_ASOC_RESOURCE_MIN, SCTPCTL_ASOC_RESOURCE_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default), SCTPCTL_HEARTBEAT_INTERVAL_MIN, SCTPCTL_HEARTBEAT_INTERVAL_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default), SCTPCTL_PMTU_RAISE_TIME_MIN, SCTPCTL_PMTU_RAISE_TIME_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default), SCTPCTL_SHUTDOWN_GUARD_TIME_MIN, SCTPCTL_SHUTDOWN_GUARD_TIME_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_secret_lifetime_default), SCTPCTL_SECRET_LIFETIME_MIN, SCTPCTL_SECRET_LIFETIME_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_rto_max_default), SCTPCTL_RTO_MAX_MIN, SCTPCTL_RTO_MAX_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_rto_min_default), SCTPCTL_RTO_MIN_MIN, SCTPCTL_RTO_MIN_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_rto_initial_default), SCTPCTL_RTO_INITIAL_MIN, SCTPCTL_RTO_INITIAL_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_init_rto_max_default), SCTPCTL_INIT_RTO_MAX_MIN, SCTPCTL_INIT_RTO_MAX_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default), SCTPCTL_VALID_COOKIE_LIFE_MIN, SCTPCTL_VALID_COOKIE_LIFE_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_init_rtx_max_default), SCTPCTL_INIT_RTX_MAX_MIN, SCTPCTL_INIT_RTX_MAX_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default), SCTPCTL_ASSOC_RTX_MAX_MIN, SCTPCTL_ASSOC_RTX_MAX_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_path_rtx_max_default), SCTPCTL_PATH_RTX_MAX_MIN, SCTPCTL_PATH_RTX_MAX_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_path_pf_threshold), SCTPCTL_PATH_PF_THRESHOLD_MIN, SCTPCTL_PATH_PF_THRESHOLD_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_add_more_threshold), SCTPCTL_ADD_MORE_ON_OUTPUT_MIN, SCTPCTL_ADD_MORE_ON_OUTPUT_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_nr_incoming_streams_default), SCTPCTL_INCOMING_STREAMS_MIN, SCTPCTL_INCOMING_STREAMS_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default), SCTPCTL_OUTGOING_STREAMS_MIN, SCTPCTL_OUTGOING_STREAMS_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_cmt_on_off), SCTPCTL_CMT_ON_OFF_MIN, SCTPCTL_CMT_ON_OFF_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_cmt_use_dac), SCTPCTL_CMT_USE_DAC_MIN, SCTPCTL_CMT_USE_DAC_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst), SCTPCTL_CWND_MAXBURST_MIN, SCTPCTL_CWND_MAXBURST_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_nat_friendly), SCTPCTL_NAT_FRIENDLY_MIN, SCTPCTL_NAT_FRIENDLY_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_L2_abc_variable), SCTPCTL_ABC_L_VAR_MIN, SCTPCTL_ABC_L_VAR_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count), SCTPCTL_MAX_CHAINED_MBUFS_MIN, SCTPCTL_MAX_CHAINED_MBUFS_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_do_drain), SCTPCTL_DO_SCTP_DRAIN_MIN, SCTPCTL_DO_SCTP_DRAIN_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_hb_maxburst), SCTPCTL_HB_MAX_BURST_MIN, SCTPCTL_HB_MAX_BURST_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit), SCTPCTL_ABORT_AT_LIMIT_MIN, SCTPCTL_ABORT_AT_LIMIT_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_min_residual), SCTPCTL_MIN_RESIDUAL_MIN, SCTPCTL_MIN_RESIDUAL_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_max_retran_chunk), SCTPCTL_MAX_RETRAN_CHUNK_MIN, SCTPCTL_MAX_RETRAN_CHUNK_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_logging_level), SCTPCTL_LOGGING_LEVEL_MIN, SCTPCTL_LOGGING_LEVEL_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_default_cc_module), SCTPCTL_DEFAULT_CC_MODULE_MIN, SCTPCTL_DEFAULT_CC_MODULE_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_default_ss_module), SCTPCTL_DEFAULT_SS_MODULE_MIN, SCTPCTL_DEFAULT_SS_MODULE_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_default_frag_interleave), SCTPCTL_DEFAULT_FRAG_INTERLEAVE_MIN, SCTPCTL_DEFAULT_FRAG_INTERLEAVE_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_vtag_time_wait), SCTPCTL_TIME_WAIT_MIN, SCTPCTL_TIME_WAIT_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_buffer_splitting), SCTPCTL_BUFFER_SPLITTING_MIN, SCTPCTL_BUFFER_SPLITTING_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_initial_cwnd), SCTPCTL_INITIAL_CWND_MIN, SCTPCTL_INITIAL_CWND_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_rttvar_bw), SCTPCTL_RTTVAR_BW_MIN, SCTPCTL_RTTVAR_BW_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_rttvar_rtt), SCTPCTL_RTTVAR_RTT_MIN, SCTPCTL_RTTVAR_RTT_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_rttvar_eqret), SCTPCTL_RTTVAR_EQRET_MIN, SCTPCTL_RTTVAR_EQRET_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_steady_step), SCTPCTL_RTTVAR_STEADYS_MIN, SCTPCTL_RTTVAR_STEADYS_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_use_dccc_ecn), SCTPCTL_RTTVAR_DCCCECN_MIN, SCTPCTL_RTTVAR_DCCCECN_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_mobility_base), SCTPCTL_MOBILITY_BASE_MIN, SCTPCTL_MOBILITY_BASE_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff), SCTPCTL_MOBILITY_FASTHANDOFF_MIN, SCTPCTL_MOBILITY_FASTHANDOFF_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_enable_sack_immediately), SCTPCTL_SACK_IMMEDIATELY_ENABLE_MIN, SCTPCTL_SACK_IMMEDIATELY_ENABLE_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly), SCTPCTL_NAT_FRIENDLY_INITS_MIN, SCTPCTL_NAT_FRIENDLY_INITS_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_blackhole), SCTPCTL_BLACKHOLE_MIN, SCTPCTL_BLACKHOLE_MAX);
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_diag_info_code), SCTPCTL_DIAG_INFO_CODE_MIN, SCTPCTL_DIAG_INFO_CODE_MAX);
+#ifdef SCTP_DEBUG
+		RANGECHK(SCTP_BASE_SYSCTL(sctp_debug_on), SCTPCTL_DEBUG_MIN, SCTPCTL_DEBUG_MAX);
+#endif
+	}
+	return (error);
+}
+
+void
+sysctl_setup_sctp(void)
+{
+	sysctl_add_oid(&sysctl_oid_top, "sendspace", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_sendspace), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_MAXDGRAM_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "recvspace", CTLTYPE_INT|CTLFLAG_RW,
+           &SCTP_BASE_SYSCTL(sctp_recvspace), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_RECVSPACE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "auto_asconf", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_auto_asconf), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_AUTOASCONF_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "ecn_enable", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_ecn_enable), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_ECN_ENABLE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "pr_enable", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_pr_enable), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_PR_ENABLE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "auth_enable", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_auth_enable), 0, sctp_sysctl_handle_auth,
+	    SCTPCTL_AUTH_ENABLE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "asconf_enable", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_asconf_enable), 0, sctp_sysctl_handle_asconf,
+	    SCTPCTL_ASCONF_ENABLE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "reconfig_enable", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_reconfig_enable), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_RECONFIG_ENABLE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "nrsack_enable", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_nrsack_enable), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_NRSACK_ENABLE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "pktdrop_enable", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_pktdrop_enable), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_PKTDROP_ENABLE_DESC);
+
+#if !defined(SCTP_WITH_NO_CSUM)
+	sysctl_add_oid(&sysctl_oid_top, "loopback_nocsum", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_no_csum_on_loopback), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_LOOPBACK_NOCSUM_DESC);
+#endif
+
+	sysctl_add_oid(&sysctl_oid_top, "peer_chkoh", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_peer_chunk_oh), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_PEER_CHKOH_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "maxburst", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_max_burst_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_MAXBURST_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "fr_maxburst", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_fr_max_burst_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_FRMAXBURST_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "maxchunks", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_MAXCHUNKS_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "tcbhashsize", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_hashtblsize), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_TCBHASHSIZE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "pcbhashsize", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_pcbtblsize), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_PCBHASHSIZE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "min_split_point", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_min_split_point), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_MIN_SPLIT_POINT_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "chunkscale", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_chunkscale), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_CHUNKSCALE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "delayed_sack_time", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_delayed_sack_time_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_DELAYED_SACK_TIME_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "sack_freq", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_sack_freq_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_SACK_FREQ_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "sys_resource", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_system_free_resc_limit), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_SYS_RESOURCE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "asoc_resource", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_ASOC_RESOURCE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "heartbeat_interval", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_heartbeat_interval_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_HEARTBEAT_INTERVAL_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "pmtu_raise_time", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_pmtu_raise_time_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_PMTU_RAISE_TIME_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "shutdown_guard_time", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_shutdown_guard_time_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_SHUTDOWN_GUARD_TIME_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "secret_lifetime", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_secret_lifetime_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_SECRET_LIFETIME_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "rto_max", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_rto_max_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_RTO_MAX_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "rto_min", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_rto_min_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_RTO_MIN_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "rto_initial", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_rto_initial_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_RTO_INITIAL_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "init_rto_max", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_init_rto_max_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_INIT_RTO_MAX_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "valid_cookie_life", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_valid_cookie_life_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_VALID_COOKIE_LIFE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "init_rtx_max", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_init_rtx_max_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_INIT_RTX_MAX_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "assoc_rtx_max", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_assoc_rtx_max_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_ASSOC_RTX_MAX_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "path_rtx_max", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_path_rtx_max_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_PATH_RTX_MAX_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "path_pf_threshold", CTLTYPE_INT|CTLFLAG_RW,
+	    &SCTP_BASE_SYSCTL(sctp_path_pf_threshold), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_PATH_PF_THRESHOLD_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "add_more_on_output", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_add_more_threshold), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_ADD_MORE_ON_OUTPUT_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "incoming_streams", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_nr_incoming_streams_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_INCOMING_STREAMS_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "outgoing_streams", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_nr_outgoing_streams_default), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_OUTGOING_STREAMS_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "cmt_on_off", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_cmt_on_off), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_CMT_ON_OFF_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "cmt_use_dac", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_cmt_use_dac), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_CMT_USE_DAC_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "cwnd_maxburst", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_use_cwnd_based_maxburst), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_CWND_MAXBURST_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "nat_friendly", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_nat_friendly), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_NAT_FRIENDLY_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "abc_l_var", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_L2_abc_variable), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_ABC_L_VAR_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "max_chained_mbufs", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_mbuf_threshold_count), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_MAX_CHAINED_MBUFS_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "do_sctp_drain", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_do_drain), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_DO_SCTP_DRAIN_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "hb_max_burst", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_hb_maxburst), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_HB_MAX_BURST_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "abort_at_limit", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_ABORT_AT_LIMIT_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "min_residual", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_min_residual), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_MIN_RESIDUAL_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "max_retran_chunk", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_max_retran_chunk), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_MAX_RETRAN_CHUNK_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "log_level", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_logging_level), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_LOGGING_LEVEL_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "default_cc_module", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_default_cc_module), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_DEFAULT_CC_MODULE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "default_ss_module", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_default_ss_module), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_DEFAULT_SS_MODULE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "default_frag_interleave", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_default_frag_interleave), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_DEFAULT_FRAG_INTERLEAVE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "mobility_base", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_mobility_base), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_MOBILITY_BASE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "mobility_fasthandoff", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_MOBILITY_FASTHANDOFF_DESC);
+
+#if defined(SCTP_LOCAL_TRACE_BUF)
+	sysctl_add_oid(&sysctl_oid_top, "sctp_log", CTLTYPE_STRUCT|CTLFLAG_RD,
+	    SCTP_BASE_SYSCTL(sctp_log), sizeof(struct sctp_log), NULL,
+	    "SCTP logging (struct sctp_log)");
+
+	sysctl_add_oid(&sysctl_oid_top, "clear_trace", CTLTYPE_INT|CTLFLAG_WR,
+	    NULL, 0, sctp_sysctl_handle_trace_log_clear,
+	    "Clear SCTP Logging buffer");
+#endif
+
+	sysctl_add_oid(&sysctl_oid_top, "udp_tunneling_port", CTLTYPE_INT|CTLFLAG_RW,
+	    &SCTP_BASE_SYSCTL(sctp_udp_tunneling_port), 0, sctp_sysctl_handle_udp_tunneling,
+	    SCTPCTL_UDP_TUNNELING_PORT_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "enable_sack_immediately", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_enable_sack_immediately), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_SACK_IMMEDIATELY_ENABLE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "nat_friendly_init", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_inits_include_nat_friendly), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_NAT_FRIENDLY_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "vtag_time_wait", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_vtag_time_wait), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_TIME_WAIT_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "buffer_splitting", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_buffer_splitting), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_BUFFER_SPLITTING_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "initial_cwnd", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_initial_cwnd), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_INITIAL_CWND_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "rttvar_bw", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_rttvar_bw), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_RTTVAR_BW_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "rttvar_rtt", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_rttvar_rtt), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_RTTVAR_RTT_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "rttvar_eqret", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_rttvar_eqret), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_RTTVAR_EQRET_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "rttvar_steady_step", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_steady_step), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_RTTVAR_STEADYS_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "use_dcccecn", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_use_dccc_ecn), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_RTTVAR_DCCCECN_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "blackhole", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_blackhole), 0, sctp_sysctl_handle_int,
+            SCTPCTL_BLACKHOLE_DESC);
+
+	sysctl_add_oid(&sysctl_oid_top, "diag_info_code", CTLTYPE_INT|CTLFLAG_RW,
+            &SCTP_BASE_SYSCTL(sctp_diag_info_code), 0, sctp_sysctl_handle_int,
+            SCTPCTL_DIAG_INFO_CODE_DESC);
+
+#ifdef SCTP_DEBUG
+	sysctl_add_oid(&sysctl_oid_top, "debug", CTLTYPE_INT|CTLFLAG_RW,
+	    &SCTP_BASE_SYSCTL(sctp_debug_on), 0, sctp_sysctl_handle_int,
+	    SCTPCTL_DEBUG_DESC);
+#endif
+
+	sysctl_add_oid(&sysctl_oid_top, "stats", CTLTYPE_STRUCT|CTLFLAG_RW,
+	    &SCTP_BASE_STATS, sizeof(SCTP_BASE_STATS), NULL,
+	    "SCTP statistics (struct sctp_stat)");
+
+	sysctl_add_oid(&sysctl_oid_top, "assoclist", CTLTYPE_STRUCT|CTLFLAG_RD,
+	    NULL, 0, sctp_assoclist,
+	    "List of active SCTP associations");
+}
+#endif
+#endif
diff --git a/usrsctplib/netinet/sctp_sysctl.h b/usrsctplib/netinet/sctp_sysctl.h
new file mode 100755
index 0000000..cbd29e3
--- /dev/null
+++ b/usrsctplib/netinet/sctp_sysctl.h
@@ -0,0 +1,625 @@
+/*-
+ * Copyright (c) 2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_sysctl.h 299543 2016-05-12 16:34:59Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_SYSCTL_H_
+#define _NETINET_SCTP_SYSCTL_H_
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_constants.h>
+
+struct sctp_sysctl {
+	uint32_t sctp_sendspace;
+	uint32_t sctp_recvspace;
+	uint32_t sctp_auto_asconf;
+	uint32_t sctp_multiple_asconfs;
+	uint32_t sctp_ecn_enable;
+	uint32_t sctp_pr_enable;
+	uint32_t sctp_auth_enable;
+	uint32_t sctp_asconf_enable;
+	uint32_t sctp_reconfig_enable;
+	uint32_t sctp_nrsack_enable;
+	uint32_t sctp_pktdrop_enable;
+	uint32_t sctp_fr_max_burst_default;
+#if !(defined(__FreeBSD__) && __FreeBSD_version >= 800000)
+#if !defined(SCTP_WITH_NO_CSUM)
+	uint32_t sctp_no_csum_on_loopback;
+#endif
+#endif
+	uint32_t sctp_peer_chunk_oh;
+	uint32_t sctp_max_burst_default;
+	uint32_t sctp_max_chunks_on_queue;
+	uint32_t sctp_hashtblsize;
+	uint32_t sctp_pcbtblsize;
+	uint32_t sctp_min_split_point;
+	uint32_t sctp_chunkscale;
+	uint32_t sctp_delayed_sack_time_default;
+	uint32_t sctp_sack_freq_default;
+	uint32_t sctp_system_free_resc_limit;
+	uint32_t sctp_asoc_free_resc_limit;
+	uint32_t sctp_heartbeat_interval_default;
+	uint32_t sctp_pmtu_raise_time_default;
+	uint32_t sctp_shutdown_guard_time_default;
+	uint32_t sctp_secret_lifetime_default;
+	uint32_t sctp_rto_max_default;
+	uint32_t sctp_rto_min_default;
+	uint32_t sctp_rto_initial_default;
+	uint32_t sctp_init_rto_max_default;
+	uint32_t sctp_valid_cookie_life_default;
+	uint32_t sctp_init_rtx_max_default;
+	uint32_t sctp_assoc_rtx_max_default;
+	uint32_t sctp_path_rtx_max_default;
+	uint32_t sctp_path_pf_threshold;
+	uint32_t sctp_add_more_threshold;
+	uint32_t sctp_nr_incoming_streams_default;
+	uint32_t sctp_nr_outgoing_streams_default;
+	uint32_t sctp_cmt_on_off;
+	uint32_t sctp_cmt_use_dac;
+	uint32_t sctp_use_cwnd_based_maxburst;
+	uint32_t sctp_nat_friendly;
+	uint32_t sctp_L2_abc_variable;
+	uint32_t sctp_mbuf_threshold_count;
+	uint32_t sctp_do_drain;
+	uint32_t sctp_hb_maxburst;
+	uint32_t sctp_abort_if_one_2_one_hits_limit;
+	uint32_t sctp_min_residual;
+	uint32_t sctp_max_retran_chunk;
+	uint32_t sctp_logging_level;
+	/* JRS - Variable for default congestion control module */
+	uint32_t sctp_default_cc_module;
+	/* RS - Variable for default stream scheduling module */
+	uint32_t sctp_default_ss_module;
+	uint32_t sctp_default_frag_interleave;
+	uint32_t sctp_mobility_base;
+	uint32_t sctp_mobility_fasthandoff;
+	uint32_t sctp_inits_include_nat_friendly;
+	uint32_t sctp_rttvar_bw;
+	uint32_t sctp_rttvar_rtt;
+	uint32_t sctp_rttvar_eqret;
+	uint32_t sctp_steady_step;
+	uint32_t sctp_use_dccc_ecn;
+	uint32_t sctp_diag_info_code;
+#if defined(SCTP_LOCAL_TRACE_BUF)
+#if defined(__Windows__)
+	struct sctp_log *sctp_log;
+#else
+	struct sctp_log sctp_log;
+#endif
+#endif
+	uint32_t sctp_udp_tunneling_port;
+	uint32_t sctp_enable_sack_immediately;
+	uint32_t sctp_vtag_time_wait;
+	uint32_t sctp_buffer_splitting;
+	uint32_t sctp_initial_cwnd;
+	uint32_t sctp_blackhole;
+#if defined(SCTP_DEBUG)
+	uint32_t sctp_debug_on;
+#endif
+#if defined(__APPLE__)
+	uint32_t sctp_ignore_vmware_interfaces;
+	uint32_t sctp_main_timer;
+	uint32_t sctp_addr_watchdog_limit;
+	uint32_t sctp_vtag_watchdog_limit;
+#endif
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	uint32_t sctp_output_unlocked;
+#endif
+};
+
+/*
+ * limits for the sysctl variables
+ */
+/* maxdgram: Maximum outgoing SCTP buffer size */
+#define SCTPCTL_MAXDGRAM_DESC		"Maximum outgoing SCTP buffer size"
+#define SCTPCTL_MAXDGRAM_MIN		0
+#define SCTPCTL_MAXDGRAM_MAX		0xFFFFFFFF
+#if defined(__Userspace__)
+#define SCTPCTL_MAXDGRAM_DEFAULT	SB_MAX
+#else
+#define SCTPCTL_MAXDGRAM_DEFAULT	262144	/* 256k */
+#endif
+
+/* recvspace: Maximum incoming SCTP buffer size */
+#define SCTPCTL_RECVSPACE_DESC		"Maximum incoming SCTP buffer size"
+#define SCTPCTL_RECVSPACE_MIN		0
+#define SCTPCTL_RECVSPACE_MAX		0xFFFFFFFF
+#if defined(__Userspace__)
+#define SCTPCTL_RECVSPACE_DEFAULT	SB_RAW
+#else
+#define SCTPCTL_RECVSPACE_DEFAULT	262144	/* 256k */
+#endif
+
+/* autoasconf: Enable SCTP Auto-ASCONF */
+#define SCTPCTL_AUTOASCONF_DESC		"Enable SCTP Auto-ASCONF"
+#define SCTPCTL_AUTOASCONF_MIN		0
+#define SCTPCTL_AUTOASCONF_MAX		1
+#define SCTPCTL_AUTOASCONF_DEFAULT	1
+
+/* autoasconf: Enable SCTP Auto-ASCONF */
+#define SCTPCTL_MULTIPLEASCONFS_DESC	"Enable SCTP Muliple-ASCONFs"
+#define SCTPCTL_MULTIPLEASCONFS_MIN	0
+#define SCTPCTL_MULTIPLEASCONFS_MAX	1
+#define SCTPCTL_MULTIPLEASCONFS_DEFAULT	SCTP_DEFAULT_MULTIPLE_ASCONFS
+
+/* ecn_enable: Enable SCTP ECN */
+#define SCTPCTL_ECN_ENABLE_DESC		"Enable SCTP ECN"
+#define SCTPCTL_ECN_ENABLE_MIN		0
+#define SCTPCTL_ECN_ENABLE_MAX		1
+#define SCTPCTL_ECN_ENABLE_DEFAULT	1
+
+/* pr_enable: Enable PR-SCTP */
+#define SCTPCTL_PR_ENABLE_DESC		"Enable PR-SCTP"
+#define SCTPCTL_PR_ENABLE_MIN		0
+#define SCTPCTL_PR_ENABLE_MAX		1
+#define SCTPCTL_PR_ENABLE_DEFAULT	1
+
+/* auth_enable: Enable SCTP AUTH function */
+#define SCTPCTL_AUTH_ENABLE_DESC	"Enable SCTP AUTH function"
+#define SCTPCTL_AUTH_ENABLE_MIN		0
+#define SCTPCTL_AUTH_ENABLE_MAX		1
+#define SCTPCTL_AUTH_ENABLE_DEFAULT	1
+
+/* asconf_enable: Enable SCTP ASCONF */
+#define SCTPCTL_ASCONF_ENABLE_DESC	"Enable SCTP ASCONF"
+#define SCTPCTL_ASCONF_ENABLE_MIN	0
+#define SCTPCTL_ASCONF_ENABLE_MAX	1
+#define SCTPCTL_ASCONF_ENABLE_DEFAULT	1
+
+/* reconfig_enable: Enable SCTP RE-CONFIG */
+#define SCTPCTL_RECONFIG_ENABLE_DESC	"Enable SCTP RE-CONFIG"
+#define SCTPCTL_RECONFIG_ENABLE_MIN	0
+#define SCTPCTL_RECONFIG_ENABLE_MAX	1
+#define SCTPCTL_RECONFIG_ENABLE_DEFAULT	1
+
+/* nrsack_enable: Enable NR_SACK */
+#define SCTPCTL_NRSACK_ENABLE_DESC	"Enable SCTP NR-SACK"
+#define SCTPCTL_NRSACK_ENABLE_MIN	0
+#define SCTPCTL_NRSACK_ENABLE_MAX	1
+#define SCTPCTL_NRSACK_ENABLE_DEFAULT	0
+
+/* pktdrop_enable: Enable SCTP Packet Drop Reports */
+#define SCTPCTL_PKTDROP_ENABLE_DESC	"Enable SCTP PKTDROP"
+#define SCTPCTL_PKTDROP_ENABLE_MIN	0
+#define SCTPCTL_PKTDROP_ENABLE_MAX	1
+#define SCTPCTL_PKTDROP_ENABLE_DEFAULT	0
+
+/* loopback_nocsum: Enable NO Csum on packets sent on loopback */
+#define SCTPCTL_LOOPBACK_NOCSUM_DESC	"Enable NO Csum on packets sent on loopback"
+#define SCTPCTL_LOOPBACK_NOCSUM_MIN	0
+#define SCTPCTL_LOOPBACK_NOCSUM_MAX	1
+#define SCTPCTL_LOOPBACK_NOCSUM_DEFAULT	1
+
+/* peer_chkoh: Amount to debit peers rwnd per chunk sent */
+#define SCTPCTL_PEER_CHKOH_DESC		"Amount to debit peers rwnd per chunk sent"
+#define SCTPCTL_PEER_CHKOH_MIN		0
+#define SCTPCTL_PEER_CHKOH_MAX		0xFFFFFFFF
+#define SCTPCTL_PEER_CHKOH_DEFAULT	256
+
+/* maxburst: Default max burst for sctp endpoints */
+#define SCTPCTL_MAXBURST_DESC		"Default max burst for sctp endpoints"
+#define SCTPCTL_MAXBURST_MIN		0
+#define SCTPCTL_MAXBURST_MAX		0xFFFFFFFF
+#define SCTPCTL_MAXBURST_DEFAULT	SCTP_DEF_MAX_BURST
+
+/* fr_maxburst: Default max burst for sctp endpoints when fast retransmitting */
+#define SCTPCTL_FRMAXBURST_DESC		"Default fr max burst for sctp endpoints"
+#define SCTPCTL_FRMAXBURST_MIN		0
+#define SCTPCTL_FRMAXBURST_MAX		0xFFFFFFFF
+#define SCTPCTL_FRMAXBURST_DEFAULT	SCTP_DEF_FRMAX_BURST
+
+
+/* maxchunks: Default max chunks on queue per asoc */
+#define SCTPCTL_MAXCHUNKS_DESC		"Default max chunks on queue per asoc"
+#define SCTPCTL_MAXCHUNKS_MIN		0
+#define SCTPCTL_MAXCHUNKS_MAX		0xFFFFFFFF
+#define SCTPCTL_MAXCHUNKS_DEFAULT	SCTP_ASOC_MAX_CHUNKS_ON_QUEUE
+
+/* tcbhashsize: Tunable for Hash table sizes */
+#define SCTPCTL_TCBHASHSIZE_DESC	"Tunable for TCB hash table sizes"
+#define SCTPCTL_TCBHASHSIZE_MIN		1
+#define SCTPCTL_TCBHASHSIZE_MAX		0xFFFFFFFF
+#define SCTPCTL_TCBHASHSIZE_DEFAULT	SCTP_TCBHASHSIZE
+
+/* pcbhashsize: Tunable for PCB Hash table sizes */
+#define SCTPCTL_PCBHASHSIZE_DESC	"Tunable for PCB hash table sizes"
+#define SCTPCTL_PCBHASHSIZE_MIN		1
+#define SCTPCTL_PCBHASHSIZE_MAX		0xFFFFFFFF
+#define SCTPCTL_PCBHASHSIZE_DEFAULT	SCTP_PCBHASHSIZE
+
+/* min_split_point: Minimum size when splitting a chunk */
+#define SCTPCTL_MIN_SPLIT_POINT_DESC	"Minimum size when splitting a chunk"
+#define SCTPCTL_MIN_SPLIT_POINT_MIN	0
+#define SCTPCTL_MIN_SPLIT_POINT_MAX	0xFFFFFFFF
+#define SCTPCTL_MIN_SPLIT_POINT_DEFAULT	SCTP_DEFAULT_SPLIT_POINT_MIN
+
+/* chunkscale: Tunable for Scaling of number of chunks and messages */
+#define SCTPCTL_CHUNKSCALE_DESC		"Tunable for Scaling of number of chunks and messages"
+#define SCTPCTL_CHUNKSCALE_MIN		1
+#define SCTPCTL_CHUNKSCALE_MAX		0xFFFFFFFF
+#define SCTPCTL_CHUNKSCALE_DEFAULT	SCTP_CHUNKQUEUE_SCALE
+
+/* delayed_sack_time: Default delayed SACK timer in ms */
+#define SCTPCTL_DELAYED_SACK_TIME_DESC	"Default delayed SACK timer in ms"
+#define SCTPCTL_DELAYED_SACK_TIME_MIN	0
+#define SCTPCTL_DELAYED_SACK_TIME_MAX	0xFFFFFFFF
+#define SCTPCTL_DELAYED_SACK_TIME_DEFAULT	SCTP_RECV_MSEC
+
+/* sack_freq: Default SACK frequency */
+#define SCTPCTL_SACK_FREQ_DESC		"Default SACK frequency"
+#define SCTPCTL_SACK_FREQ_MIN		0
+#define SCTPCTL_SACK_FREQ_MAX		0xFFFFFFFF
+#define SCTPCTL_SACK_FREQ_DEFAULT	SCTP_DEFAULT_SACK_FREQ
+
+/* sys_resource: Max number of cached resources in the system */
+#define SCTPCTL_SYS_RESOURCE_DESC	"Max number of cached resources in the system"
+#define SCTPCTL_SYS_RESOURCE_MIN	0
+#define SCTPCTL_SYS_RESOURCE_MAX	0xFFFFFFFF
+#define SCTPCTL_SYS_RESOURCE_DEFAULT	SCTP_DEF_SYSTEM_RESC_LIMIT
+
+/* asoc_resource: Max number of cached resources in an asoc */
+#define SCTPCTL_ASOC_RESOURCE_DESC	"Max number of cached resources in an asoc"
+#define SCTPCTL_ASOC_RESOURCE_MIN	0
+#define SCTPCTL_ASOC_RESOURCE_MAX	0xFFFFFFFF
+#define SCTPCTL_ASOC_RESOURCE_DEFAULT	SCTP_DEF_ASOC_RESC_LIMIT
+
+/* heartbeat_interval: Default heartbeat interval in ms */
+#define SCTPCTL_HEARTBEAT_INTERVAL_DESC	"Default heartbeat interval in ms"
+#define SCTPCTL_HEARTBEAT_INTERVAL_MIN	0
+#define SCTPCTL_HEARTBEAT_INTERVAL_MAX	0xFFFFFFFF
+#define SCTPCTL_HEARTBEAT_INTERVAL_DEFAULT	SCTP_HB_DEFAULT_MSEC
+
+/* pmtu_raise_time: Default PMTU raise timer in seconds */
+#define SCTPCTL_PMTU_RAISE_TIME_DESC	"Default PMTU raise timer in seconds"
+#define SCTPCTL_PMTU_RAISE_TIME_MIN	0
+#define SCTPCTL_PMTU_RAISE_TIME_MAX	0xFFFFFFFF
+#define SCTPCTL_PMTU_RAISE_TIME_DEFAULT	SCTP_DEF_PMTU_RAISE_SEC
+
+/* shutdown_guard_time: Default shutdown guard timer in seconds */
+#define SCTPCTL_SHUTDOWN_GUARD_TIME_DESC	"Shutdown guard timer in seconds (0 means 5 times RTO.Max)"
+#define SCTPCTL_SHUTDOWN_GUARD_TIME_MIN		0
+#define SCTPCTL_SHUTDOWN_GUARD_TIME_MAX		0xFFFFFFFF
+#define SCTPCTL_SHUTDOWN_GUARD_TIME_DEFAULT	0
+
+/* secret_lifetime: Default secret lifetime in seconds */
+#define SCTPCTL_SECRET_LIFETIME_DESC	"Default secret lifetime in seconds"
+#define SCTPCTL_SECRET_LIFETIME_MIN	0
+#define SCTPCTL_SECRET_LIFETIME_MAX	0xFFFFFFFF
+#define SCTPCTL_SECRET_LIFETIME_DEFAULT	SCTP_DEFAULT_SECRET_LIFE_SEC
+
+/* rto_max: Default maximum retransmission timeout in ms */
+#define SCTPCTL_RTO_MAX_DESC		"Default maximum retransmission timeout in ms"
+#define SCTPCTL_RTO_MAX_MIN		0
+#define SCTPCTL_RTO_MAX_MAX		0xFFFFFFFF
+#define SCTPCTL_RTO_MAX_DEFAULT		SCTP_RTO_UPPER_BOUND
+
+/* rto_min: Default minimum retransmission timeout in ms */
+#define SCTPCTL_RTO_MIN_DESC		"Default minimum retransmission timeout in ms"
+#define SCTPCTL_RTO_MIN_MIN		0
+#define SCTPCTL_RTO_MIN_MAX		0xFFFFFFFF
+#define SCTPCTL_RTO_MIN_DEFAULT		SCTP_RTO_LOWER_BOUND
+
+/* rto_initial: Default initial retransmission timeout in ms */
+#define SCTPCTL_RTO_INITIAL_DESC	"Default initial retransmission timeout in ms"
+#define SCTPCTL_RTO_INITIAL_MIN		0
+#define SCTPCTL_RTO_INITIAL_MAX		0xFFFFFFFF
+#define SCTPCTL_RTO_INITIAL_DEFAULT	SCTP_RTO_INITIAL
+
+/* init_rto_max: Default maximum retransmission timeout during association setup in ms */
+#define SCTPCTL_INIT_RTO_MAX_DESC	"Default maximum retransmission timeout during association setup in ms"
+#define SCTPCTL_INIT_RTO_MAX_MIN	0
+#define SCTPCTL_INIT_RTO_MAX_MAX	0xFFFFFFFF
+#define SCTPCTL_INIT_RTO_MAX_DEFAULT	SCTP_RTO_UPPER_BOUND
+
+/* valid_cookie_life: Default cookie lifetime in sec */
+#define SCTPCTL_VALID_COOKIE_LIFE_DESC	"Default cookie lifetime in seconds"
+#define SCTPCTL_VALID_COOKIE_LIFE_MIN	0
+#define SCTPCTL_VALID_COOKIE_LIFE_MAX	0xFFFFFFFF
+#define SCTPCTL_VALID_COOKIE_LIFE_DEFAULT	SCTP_DEFAULT_COOKIE_LIFE
+
+/* init_rtx_max: Default maximum number of retransmission for INIT chunks */
+#define SCTPCTL_INIT_RTX_MAX_DESC	"Default maximum number of retransmission for INIT chunks"
+#define SCTPCTL_INIT_RTX_MAX_MIN	0
+#define SCTPCTL_INIT_RTX_MAX_MAX	0xFFFFFFFF
+#define SCTPCTL_INIT_RTX_MAX_DEFAULT	SCTP_DEF_MAX_INIT
+
+/* assoc_rtx_max: Default maximum number of retransmissions per association */
+#define SCTPCTL_ASSOC_RTX_MAX_DESC	"Default maximum number of retransmissions per association"
+#define SCTPCTL_ASSOC_RTX_MAX_MIN	0
+#define SCTPCTL_ASSOC_RTX_MAX_MAX	0xFFFFFFFF
+#define SCTPCTL_ASSOC_RTX_MAX_DEFAULT	SCTP_DEF_MAX_SEND
+
+/* path_rtx_max: Default maximum of retransmissions per path */
+#define SCTPCTL_PATH_RTX_MAX_DESC	"Default maximum of retransmissions per path"
+#define SCTPCTL_PATH_RTX_MAX_MIN	0
+#define SCTPCTL_PATH_RTX_MAX_MAX	0xFFFFFFFF
+#define SCTPCTL_PATH_RTX_MAX_DEFAULT	SCTP_DEF_MAX_PATH_RTX
+
+/* path_pf_threshold: threshold for considering the path potentially failed */
+#define SCTPCTL_PATH_PF_THRESHOLD_DESC		"Default potentially failed threshold"
+#define SCTPCTL_PATH_PF_THRESHOLD_MIN		0
+#define SCTPCTL_PATH_PF_THRESHOLD_MAX		0xFFFF
+#define SCTPCTL_PATH_PF_THRESHOLD_DEFAULT	SCTPCTL_PATH_PF_THRESHOLD_MAX
+
+/* add_more_on_output: When space-wise is it worthwhile to try to add more to a socket send buffer */
+#define SCTPCTL_ADD_MORE_ON_OUTPUT_DESC	"When space-wise is it worthwhile to try to add more to a socket send buffer"
+#define SCTPCTL_ADD_MORE_ON_OUTPUT_MIN	0
+#define SCTPCTL_ADD_MORE_ON_OUTPUT_MAX	0xFFFFFFFF
+#define SCTPCTL_ADD_MORE_ON_OUTPUT_DEFAULT SCTP_DEFAULT_ADD_MORE
+
+/* incoming_streams: Default number of incoming streams */
+#define SCTPCTL_INCOMING_STREAMS_DESC	"Default number of incoming streams"
+#define SCTPCTL_INCOMING_STREAMS_MIN	1
+#define SCTPCTL_INCOMING_STREAMS_MAX	65535
+#define SCTPCTL_INCOMING_STREAMS_DEFAULT SCTP_ISTREAM_INITIAL
+
+/* outgoing_streams: Default number of outgoing streams */
+#define SCTPCTL_OUTGOING_STREAMS_DESC	"Default number of outgoing streams"
+#define SCTPCTL_OUTGOING_STREAMS_MIN	1
+#define SCTPCTL_OUTGOING_STREAMS_MAX	65535
+#define SCTPCTL_OUTGOING_STREAMS_DEFAULT SCTP_OSTREAM_INITIAL
+
+/* cmt_on_off: CMT on/off flag */
+#define SCTPCTL_CMT_ON_OFF_DESC		"CMT settings"
+#define SCTPCTL_CMT_ON_OFF_MIN		SCTP_CMT_OFF
+#define SCTPCTL_CMT_ON_OFF_MAX		SCTP_CMT_MAX
+#define SCTPCTL_CMT_ON_OFF_DEFAULT	SCTP_CMT_OFF
+
+/* cmt_use_dac: CMT DAC on/off flag */
+#define SCTPCTL_CMT_USE_DAC_DESC	"CMT DAC on/off flag"
+#define SCTPCTL_CMT_USE_DAC_MIN		0
+#define SCTPCTL_CMT_USE_DAC_MAX		1
+#define SCTPCTL_CMT_USE_DAC_DEFAULT    	0
+
+/* cwnd_maxburst: Use a CWND adjusting maxburst */
+#define SCTPCTL_CWND_MAXBURST_DESC	"Use a CWND adjusting maxburst"
+#define SCTPCTL_CWND_MAXBURST_MIN	0
+#define SCTPCTL_CWND_MAXBURST_MAX	1
+#define SCTPCTL_CWND_MAXBURST_DEFAULT	1
+
+/* nat_friendly: SCTP NAT friendly operation */
+#define SCTPCTL_NAT_FRIENDLY_DESC	"SCTP NAT friendly operation"
+#define SCTPCTL_NAT_FRIENDLY_MIN	0
+#define SCTPCTL_NAT_FRIENDLY_MAX	1
+#define SCTPCTL_NAT_FRIENDLY_DEFAULT	1
+
+/* abc_l_var: SCTP ABC max increase per SACK (L) */
+#define SCTPCTL_ABC_L_VAR_DESC		"SCTP ABC max increase per SACK (L)"
+#define SCTPCTL_ABC_L_VAR_MIN		0
+#define SCTPCTL_ABC_L_VAR_MAX		0xFFFFFFFF
+#define SCTPCTL_ABC_L_VAR_DEFAULT	2
+
+/* max_chained_mbufs: Default max number of small mbufs on a chain */
+#define SCTPCTL_MAX_CHAINED_MBUFS_DESC	"Default max number of small mbufs on a chain"
+#define SCTPCTL_MAX_CHAINED_MBUFS_MIN	0
+#define SCTPCTL_MAX_CHAINED_MBUFS_MAX	0xFFFFFFFF
+#define SCTPCTL_MAX_CHAINED_MBUFS_DEFAULT	SCTP_DEFAULT_MBUFS_IN_CHAIN
+
+/* do_sctp_drain: Should SCTP respond to the drain calls */
+#define SCTPCTL_DO_SCTP_DRAIN_DESC	"Should SCTP respond to the drain calls"
+#define SCTPCTL_DO_SCTP_DRAIN_MIN	0
+#define SCTPCTL_DO_SCTP_DRAIN_MAX	1
+#define SCTPCTL_DO_SCTP_DRAIN_DEFAULT	1
+
+/* hb_max_burst: Confirmation Heartbeat max burst? */
+#define SCTPCTL_HB_MAX_BURST_DESC	"Confirmation Heartbeat max burst"
+#define SCTPCTL_HB_MAX_BURST_MIN	1
+#define SCTPCTL_HB_MAX_BURST_MAX	0xFFFFFFFF
+#define SCTPCTL_HB_MAX_BURST_DEFAULT	SCTP_DEF_HBMAX_BURST
+
+/* abort_at_limit: When one-2-one hits qlimit abort */
+#define SCTPCTL_ABORT_AT_LIMIT_DESC	"When one-2-one hits qlimit abort"
+#define SCTPCTL_ABORT_AT_LIMIT_MIN	0
+#define SCTPCTL_ABORT_AT_LIMIT_MAX	1
+#define SCTPCTL_ABORT_AT_LIMIT_DEFAULT	0
+
+/* min_residual: min residual in a data fragment leftover */
+#define SCTPCTL_MIN_RESIDUAL_DESC	"Minimum residual data chunk in second part of split"
+#define SCTPCTL_MIN_RESIDUAL_MIN	20
+#define SCTPCTL_MIN_RESIDUAL_MAX	65535
+#define SCTPCTL_MIN_RESIDUAL_DEFAULT	1452
+
+/* max_retran_chunk: max chunk retransmissions */
+#define SCTPCTL_MAX_RETRAN_CHUNK_DESC	"Maximum times an unlucky chunk can be retran'd before assoc abort"
+#define SCTPCTL_MAX_RETRAN_CHUNK_MIN	0
+#define SCTPCTL_MAX_RETRAN_CHUNK_MAX	65535
+#define SCTPCTL_MAX_RETRAN_CHUNK_DEFAULT	30
+
+/* sctp_logging: This gives us logging when the options are enabled */
+#define SCTPCTL_LOGGING_LEVEL_DESC	"Ltrace/KTR trace logging level"
+#define SCTPCTL_LOGGING_LEVEL_MIN	0
+#define SCTPCTL_LOGGING_LEVEL_MAX	0xffffffff
+#define SCTPCTL_LOGGING_LEVEL_DEFAULT	0
+
+/* JRS - default congestion control module sysctl */
+#define SCTPCTL_DEFAULT_CC_MODULE_DESC		"Default congestion control module"
+#define SCTPCTL_DEFAULT_CC_MODULE_MIN		0
+#define SCTPCTL_DEFAULT_CC_MODULE_MAX		2
+#define SCTPCTL_DEFAULT_CC_MODULE_DEFAULT	0
+
+/* RS - default stream scheduling module sysctl */
+#define SCTPCTL_DEFAULT_SS_MODULE_DESC		"Default stream scheduling module"
+#define SCTPCTL_DEFAULT_SS_MODULE_MIN		0
+#define SCTPCTL_DEFAULT_SS_MODULE_MAX		5
+#define SCTPCTL_DEFAULT_SS_MODULE_DEFAULT	0
+
+/* RRS - default fragment interleave */
+#define SCTPCTL_DEFAULT_FRAG_INTERLEAVE_DESC	"Default fragment interleave level"
+#define SCTPCTL_DEFAULT_FRAG_INTERLEAVE_MIN	0
+#define SCTPCTL_DEFAULT_FRAG_INTERLEAVE_MAX	2
+#define SCTPCTL_DEFAULT_FRAG_INTERLEAVE_DEFAULT	1
+
+/* mobility_base: Enable SCTP mobility support */
+#define SCTPCTL_MOBILITY_BASE_DESC	"Enable SCTP base mobility"
+#define SCTPCTL_MOBILITY_BASE_MIN	0
+#define SCTPCTL_MOBILITY_BASE_MAX	1
+#define SCTPCTL_MOBILITY_BASE_DEFAULT	0
+
+/* mobility_fasthandoff: Enable SCTP fast handoff support */
+#define SCTPCTL_MOBILITY_FASTHANDOFF_DESC	"Enable SCTP fast handoff"
+#define SCTPCTL_MOBILITY_FASTHANDOFF_MIN	0
+#define SCTPCTL_MOBILITY_FASTHANDOFF_MAX	1
+#define SCTPCTL_MOBILITY_FASTHANDOFF_DEFAULT	0
+
+/* Enable SCTP/UDP tunneling port */
+#define SCTPCTL_UDP_TUNNELING_PORT_DESC		"Set the SCTP/UDP tunneling port"
+#define SCTPCTL_UDP_TUNNELING_PORT_MIN		0
+#define SCTPCTL_UDP_TUNNELING_PORT_MAX		65535
+#if defined(__FreeBSD__)
+#define SCTPCTL_UDP_TUNNELING_PORT_DEFAULT	0
+#else
+#define SCTPCTL_UDP_TUNNELING_PORT_DEFAULT	SCTP_OVER_UDP_TUNNELING_PORT
+#endif
+
+/* Enable sending of the SACK-IMMEDIATELY bit */
+#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_DESC	"Enable sending of the SACK-IMMEDIATELY-bit."
+#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_MIN	0
+#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_MAX	1
+#define SCTPCTL_SACK_IMMEDIATELY_ENABLE_DEFAULT	SCTPCTL_SACK_IMMEDIATELY_ENABLE_MAX
+
+/* Enable sending of the NAT-FRIENDLY message */
+#define SCTPCTL_NAT_FRIENDLY_INITS_DESC	"Enable sending of the nat-friendly SCTP option on INITs."
+#define SCTPCTL_NAT_FRIENDLY_INITS_MIN	0
+#define SCTPCTL_NAT_FRIENDLY_INITS_MAX	1
+#define SCTPCTL_NAT_FRIENDLY_INITS_DEFAULT	SCTPCTL_NAT_FRIENDLY_INITS_MIN
+
+/* Vtag time wait in seconds */
+#define SCTPCTL_TIME_WAIT_DESC	"Vtag time wait time in seconds, 0 disables it."
+#define SCTPCTL_TIME_WAIT_MIN	0
+#define SCTPCTL_TIME_WAIT_MAX	0xffffffff
+#define SCTPCTL_TIME_WAIT_DEFAULT	SCTP_TIME_WAIT
+
+/* Enable Send/Receive buffer splitting */
+#define SCTPCTL_BUFFER_SPLITTING_DESC		"Enable send/receive buffer splitting."
+#define SCTPCTL_BUFFER_SPLITTING_MIN		0
+#define SCTPCTL_BUFFER_SPLITTING_MAX		0x3
+#define SCTPCTL_BUFFER_SPLITTING_DEFAULT	SCTPCTL_BUFFER_SPLITTING_MIN
+
+/* Initial congestion window in MTU */
+#define SCTPCTL_INITIAL_CWND_DESC	"Initial congestion window in MTUs"
+#define SCTPCTL_INITIAL_CWND_MIN	0
+#define SCTPCTL_INITIAL_CWND_MAX	0xffffffff
+#define SCTPCTL_INITIAL_CWND_DEFAULT	3
+
+/* rttvar smooth avg for bw calc  */
+#define SCTPCTL_RTTVAR_BW_DESC	"Shift amount for bw smoothing on rtt calc"
+#define SCTPCTL_RTTVAR_BW_MIN	0
+#define SCTPCTL_RTTVAR_BW_MAX	32
+#define SCTPCTL_RTTVAR_BW_DEFAULT	4
+
+/* rttvar smooth avg for bw calc  */
+#define SCTPCTL_RTTVAR_RTT_DESC	"Shift amount for rtt smoothing on rtt calc"
+#define SCTPCTL_RTTVAR_RTT_MIN	0
+#define SCTPCTL_RTTVAR_RTT_MAX	32
+#define SCTPCTL_RTTVAR_RTT_DEFAULT	5
+
+#define SCTPCTL_RTTVAR_EQRET_DESC	"What to return when rtt and bw are unchanged"
+#define SCTPCTL_RTTVAR_EQRET_MIN	0
+#define SCTPCTL_RTTVAR_EQRET_MAX	1
+#define SCTPCTL_RTTVAR_EQRET_DEFAULT	0
+
+#define SCTPCTL_RTTVAR_STEADYS_DESC	"How many the sames it takes to try step down of cwnd"
+#define SCTPCTL_RTTVAR_STEADYS_MIN	0
+#define SCTPCTL_RTTVAR_STEADYS_MAX	0xFFFF
+#define SCTPCTL_RTTVAR_STEADYS_DEFAULT	20 /* 0 means disable feature */
+
+#define SCTPCTL_RTTVAR_DCCCECN_DESC	"Enable for RTCC CC datacenter ECN"
+#define SCTPCTL_RTTVAR_DCCCECN_MIN	0
+#define SCTPCTL_RTTVAR_DCCCECN_MAX	1
+#define SCTPCTL_RTTVAR_DCCCECN_DEFAULT	1 /* 0 means disable feature */
+
+#define SCTPCTL_BLACKHOLE_DESC		"Enable SCTP blackholing. See blackhole(4) for more details."
+#define SCTPCTL_BLACKHOLE_MIN		0
+#define SCTPCTL_BLACKHOLE_MAX		2
+#define SCTPCTL_BLACKHOLE_DEFAULT	SCTPCTL_BLACKHOLE_MIN
+
+#define SCTPCTL_DIAG_INFO_CODE_DESC	"Diagnostic information error cause code"
+#define SCTPCTL_DIAG_INFO_CODE_MIN	0
+#define SCTPCTL_DIAG_INFO_CODE_MAX	65535
+#define SCTPCTL_DIAG_INFO_CODE_DEFAULT	0
+
+#if defined(SCTP_DEBUG)
+/* debug: Configure debug output */
+#define SCTPCTL_DEBUG_DESC	"Configure debug output"
+#define SCTPCTL_DEBUG_MIN	0
+#define SCTPCTL_DEBUG_MAX	0xFFFFFFFF
+#define SCTPCTL_DEBUG_DEFAULT	0
+#endif
+
+#if defined(__APPLE__)
+#define SCTPCTL_MAIN_TIMER_DESC		"Main timer interval in ms"
+#define SCTPCTL_MAIN_TIMER_MIN		1
+#define SCTPCTL_MAIN_TIMER_MAX		0xFFFFFFFF
+#define SCTPCTL_MAIN_TIMER_DEFAULT	10
+
+#define SCTPCTL_IGNORE_VMWARE_INTERFACES_DESC		"Ignore VMware Interfaces"
+#define SCTPCTL_IGNORE_VMWARE_INTERFACES_MIN		0
+#define SCTPCTL_IGNORE_VMWARE_INTERFACES_MAX		1
+#define SCTPCTL_IGNORE_VMWARE_INTERFACES_DEFAULT	SCTPCTL_IGNORE_VMWARE_INTERFACES_MAX
+#endif
+
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+#define SCTPCTL_OUTPUT_UNLOCKED_DESC	"Unlock socket when sending packets down to IP."
+#define SCTPCTL_OUTPUT_UNLOCKED_MIN	0
+#define SCTPCTL_OUTPUT_UNLOCKED_MAX	1
+#define SCTPCTL_OUTPUT_UNLOCKED_DEFAULT	SCTPCTL_OUTPUT_UNLOCKED_MIN
+#endif
+
+#if defined(__APPLE__)
+#define	SCTPCTL_ADDR_WATCHDOG_LIMIT_DESC	"Address watchdog limit"
+#define	SCTPCTL_ADDR_WATCHDOG_LIMIT_MIN		0
+#define	SCTPCTL_ADDR_WATCHDOG_LIMIT_MAX		0xFFFFFFFF
+#define	SCTPCTL_ADDR_WATCHDOG_LIMIT_DEFAULT	SCTPCTL_ADDR_WATCHDOG_LIMIT_MIN
+
+#define	SCTPCTL_VTAG_WATCHDOG_LIMIT_DESC	"VTag watchdog limit"
+#define	SCTPCTL_VTAG_WATCHDOG_LIMIT_MIN		0
+#define	SCTPCTL_VTAG_WATCHDOG_LIMIT_MAX		0xFFFFFFFF
+#define	SCTPCTL_VTAG_WATCHDOG_LIMIT_DEFAULT	SCTPCTL_VTAG_WATCHDOG_LIMIT_MIN
+#endif
+
+#if defined(_KERNEL) || defined(__Userspace__)
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Userspace__)
+#if defined(SYSCTL_DECL)
+SYSCTL_DECL(_net_inet_sctp);
+#endif
+#endif
+
+void sctp_init_sysctls(void);
+#if defined(__Windows__)
+void sctp_finish_sysctls(void);
+#endif
+
+#endif /* _KERNEL */
+#endif /* __sctp_sysctl_h__ */
diff --git a/usrsctplib/netinet/sctp_timer.c b/usrsctplib/netinet/sctp_timer.c
new file mode 100755
index 0000000..7cf62ca
--- /dev/null
+++ b/usrsctplib/netinet/sctp_timer.c
@@ -0,0 +1,1616 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.c 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#define _IP_VHL
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_pcb.h>
+#ifdef INET6
+#if defined(__Userspace_os_FreeBSD)
+#include <netinet6/sctp6_var.h>
+#endif
+#endif
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_input.h>
+#include <netinet/sctp.h>
+#include <netinet/sctp_uio.h>
+#if defined(INET) || defined(INET6)
+#if !defined(__Userspace_os_Windows)
+#include <netinet/udp.h>
+#endif
+#endif
+
+#if defined(__APPLE__)
+#define APPLE_FILE_NO 6
+#endif
+
+void
+sctp_audit_retranmission_queue(struct sctp_association *asoc)
+{
+	struct sctp_tmit_chunk *chk;
+
+	SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n",
+			asoc->sent_queue_retran_cnt,
+			asoc->sent_queue_cnt);
+	asoc->sent_queue_retran_cnt = 0;
+	asoc->sent_queue_cnt = 0;
+	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+		if (chk->sent == SCTP_DATAGRAM_RESEND) {
+			sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+		}
+		asoc->sent_queue_cnt++;
+	}
+	TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
+		if (chk->sent == SCTP_DATAGRAM_RESEND) {
+			sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+		}
+	}
+	TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) {
+		if (chk->sent == SCTP_DATAGRAM_RESEND) {
+			sctp_ucount_incr(asoc->sent_queue_retran_cnt);
+		}
+	}
+	SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n",
+		asoc->sent_queue_retran_cnt,
+		asoc->sent_queue_cnt);
+}
+
+static int
+sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+    struct sctp_nets *net, uint16_t threshold)
+{
+	if (net) {
+		net->error_count++;
+		SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n",
+			(void *)net, net->error_count,
+			net->failure_threshold);
+		if (net->error_count > net->failure_threshold) {
+			/* We had a threshold failure */
+			if (net->dest_state & SCTP_ADDR_REACHABLE) {
+				net->dest_state &= ~SCTP_ADDR_REACHABLE;
+				net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
+				net->dest_state &= ~SCTP_ADDR_PF;
+				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
+				    stcb, 0,
+				    (void *)net, SCTP_SO_NOT_LOCKED);
+			}
+		} else if ((net->pf_threshold < net->failure_threshold) &&
+		           (net->error_count > net->pf_threshold)) {
+			if (!(net->dest_state & SCTP_ADDR_PF)) {
+				net->dest_state |= SCTP_ADDR_PF;
+				net->last_active = sctp_get_tick_count();
+				sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
+				sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
+				                inp, stcb, net,
+				                SCTP_FROM_SCTP_TIMER + SCTP_LOC_1);
+				sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
+			}
+		}
+	}
+	if (stcb == NULL)
+		return (0);
+
+	if (net) {
+		if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+				sctp_misc_ints(SCTP_THRESHOLD_INCR,
+					       stcb->asoc.overall_error_count,
+					       (stcb->asoc.overall_error_count+1),
+					       SCTP_FROM_SCTP_TIMER,
+					       __LINE__);
+			}
+			stcb->asoc.overall_error_count++;
+		}
+	} else {
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
+			sctp_misc_ints(SCTP_THRESHOLD_INCR,
+				       stcb->asoc.overall_error_count,
+				       (stcb->asoc.overall_error_count+1),
+				       SCTP_FROM_SCTP_TIMER,
+				       __LINE__);
+		}
+		stcb->asoc.overall_error_count++;
+	}
+	SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n",
+		(void *)&stcb->asoc, stcb->asoc.overall_error_count,
+		(uint32_t)threshold,
+		((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state));
+	/*
+	 * We specifically do not do >= to give the assoc one more change
+	 * before we fail it.
+	 */
+	if (stcb->asoc.overall_error_count > threshold) {
+		/* Abort notification sends a ULP notify */
+		struct mbuf *op_err;
+
+		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+		                             "Association error counter exceeded");
+		inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_2;
+		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+		return (1);
+	}
+	return (0);
+}
+
+/*
+ * sctp_find_alternate_net() returns a non-NULL pointer as long
+ * the argument net is non-NULL.
+ */
+struct sctp_nets *
+sctp_find_alternate_net(struct sctp_tcb *stcb,
+    struct sctp_nets *net,
+    int mode)
+{
+	/* Find and return an alternate network if possible */
+	struct sctp_nets *alt, *mnet, *min_errors_net = NULL , *max_cwnd_net = NULL;
+	int once;
+	/* JRS 5/14/07 - Initialize min_errors to an impossible value. */
+	int min_errors = -1;
+	uint32_t max_cwnd = 0;
+
+	if (stcb->asoc.numnets == 1) {
+		/* No others but net */
+		return (TAILQ_FIRST(&stcb->asoc.nets));
+	}
+	/*
+	 * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate net algorithm.
+	 * This algorithm chooses the active destination (not in PF state) with the largest
+	 * cwnd value. If all destinations are in PF state, unreachable, or unconfirmed, choose
+	 * the desination that is in PF state with the lowest error count. In case of a tie,
+	 * choose the destination that was most recently active.
+	 */
+	if (mode == 2) {
+		TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
+			/* JRS 5/14/07 - If the destination is unreachable or unconfirmed, skip it. */
+			if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
+			    (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) {
+				continue;
+			}
+			/*
+			 * JRS 5/14/07 -  If the destination is reachable but in PF state, compare
+			 *  the error count of the destination to the minimum error count seen thus far.
+			 *  Store the destination with the lower error count.  If the error counts are
+			 *  equal, store the destination that was most recently active.
+			 */
+			if (mnet->dest_state & SCTP_ADDR_PF) {
+				/*
+				 * JRS 5/14/07 - If the destination under consideration is the current
+				 *  destination, work as if the error count is one higher.  The
+				 *  actual error count will not be incremented until later in the
+				 *  t3 handler.
+				 */
+				if (mnet == net) {
+					if (min_errors == -1) {
+						min_errors = mnet->error_count + 1;
+						min_errors_net = mnet;
+					} else if (mnet->error_count + 1 < min_errors) {
+						min_errors = mnet->error_count + 1;
+						min_errors_net = mnet;
+					} else if (mnet->error_count + 1 == min_errors
+								&& mnet->last_active > min_errors_net->last_active) {
+						min_errors_net = mnet;
+						min_errors = mnet->error_count + 1;
+					}
+					continue;
+				} else {
+					if (min_errors == -1) {
+						min_errors = mnet->error_count;
+						min_errors_net = mnet;
+					} else if (mnet->error_count < min_errors) {
+						min_errors = mnet->error_count;
+						min_errors_net = mnet;
+					} else if (mnet->error_count == min_errors
+								&& mnet->last_active > min_errors_net->last_active) {
+						min_errors_net = mnet;
+						min_errors = mnet->error_count;
+					}
+					continue;
+				}
+			}
+			/*
+			 * JRS 5/14/07 - If the destination is reachable and not in PF state, compare the
+			 *  cwnd of the destination to the highest cwnd seen thus far.  Store the
+			 *  destination with the higher cwnd value.  If the cwnd values are equal,
+			 *  randomly choose one of the two destinations.
+			 */
+			if (max_cwnd < mnet->cwnd) {
+				max_cwnd_net = mnet;
+				max_cwnd = mnet->cwnd;
+			} else if (max_cwnd == mnet->cwnd) {
+				uint32_t rndval;
+				uint8_t this_random;
+
+				if (stcb->asoc.hb_random_idx > 3) {
+					rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
+					memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values));
+					this_random = stcb->asoc.hb_random_values[0];
+					stcb->asoc.hb_random_idx++;
+					stcb->asoc.hb_ect_randombit = 0;
+				} else {
+					this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
+					stcb->asoc.hb_random_idx++;
+					stcb->asoc.hb_ect_randombit = 0;
+				}
+				if (this_random % 2 == 1) {
+					max_cwnd_net = mnet;
+					max_cwnd = mnet->cwnd; /* Useless? */
+				}
+			}
+		}
+		if (max_cwnd_net == NULL) {
+			if (min_errors_net == NULL) {
+				return (net);
+			}
+			return (min_errors_net);
+		} else {
+			return (max_cwnd_net);
+		}
+	} /* JRS 5/14/07 - If mode is set to 1, use the CMT policy for choosing an alternate net. */
+	else if (mode == 1) {
+		TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
+			if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
+			    (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) {
+				/*
+				 * will skip ones that are not-reachable or
+				 * unconfirmed
+				 */
+				continue;
+			}
+			if (max_cwnd < mnet->cwnd) {
+				max_cwnd_net = mnet;
+				max_cwnd = mnet->cwnd;
+			} else if (max_cwnd == mnet->cwnd) {
+				uint32_t rndval;
+				uint8_t this_random;
+
+				if (stcb->asoc.hb_random_idx > 3) {
+					rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
+					memcpy(stcb->asoc.hb_random_values, &rndval,
+					    sizeof(stcb->asoc.hb_random_values));
+					this_random = stcb->asoc.hb_random_values[0];
+					stcb->asoc.hb_random_idx = 0;
+					stcb->asoc.hb_ect_randombit = 0;
+				} else {
+					this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
+					stcb->asoc.hb_random_idx++;
+					stcb->asoc.hb_ect_randombit = 0;
+				}
+				if (this_random % 2) {
+					max_cwnd_net = mnet;
+					max_cwnd = mnet->cwnd;
+				}
+			}
+		}
+		if (max_cwnd_net) {
+			return (max_cwnd_net);
+		}
+	}
+	mnet = net;
+	once = 0;
+
+	if (mnet == NULL) {
+		mnet = TAILQ_FIRST(&stcb->asoc.nets);
+		if (mnet == NULL) {
+			return (NULL);
+		}
+	}
+	for (;;) {
+		alt = TAILQ_NEXT(mnet, sctp_next);
+		if (alt == NULL) {
+			once++;
+			if (once > 1) {
+				break;
+			}
+			alt = TAILQ_FIRST(&stcb->asoc.nets);
+			if (alt == NULL) {
+				return (NULL);
+			}
+		}
+		if (alt->ro.ro_rt == NULL) {
+			if (alt->ro._s_addr) {
+				sctp_free_ifa(alt->ro._s_addr);
+				alt->ro._s_addr = NULL;
+			}
+			alt->src_addr_selected = 0;
+		}
+		if (((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
+		    (alt->ro.ro_rt != NULL) &&
+		    (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))) {
+			/* Found a reachable address */
+			break;
+		}
+		mnet = alt;
+	}
+
+	if (alt == NULL) {
+		/* Case where NO insv network exists (dormant state) */
+		/* we rotate destinations */
+		once = 0;
+		mnet = net;
+		for (;;) {
+			if (mnet == NULL) {
+				return (TAILQ_FIRST(&stcb->asoc.nets));
+			}
+			alt = TAILQ_NEXT(mnet, sctp_next);
+			if (alt == NULL) {
+				once++;
+				if (once > 1) {
+					break;
+				}
+				alt = TAILQ_FIRST(&stcb->asoc.nets);
+				if (alt == NULL) {
+					break;
+				}
+			}
+			if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
+			    (alt != net)) {
+				/* Found an alternate address */
+				break;
+			}
+			mnet = alt;
+		}
+	}
+	if (alt == NULL) {
+		return (net);
+	}
+	return (alt);
+}
+
+static void
+sctp_backoff_on_timeout(struct sctp_tcb *stcb,
+    struct sctp_nets *net,
+    int win_probe,
+    int num_marked, int num_abandoned)
+{
+	if (net->RTO == 0) {
+		if (net->RTO_measured) {
+			net->RTO = stcb->asoc.minrto;
+		} else {
+			net->RTO = stcb->asoc.initial_rto;
+		}
+	}
+	net->RTO <<= 1;
+	if (net->RTO > stcb->asoc.maxrto) {
+		net->RTO = stcb->asoc.maxrto;
+	}
+	if ((win_probe == 0) && (num_marked || num_abandoned)) {
+		/* We don't apply penalty to window probe scenarios */
+		/* JRS - Use the congestion control given in the CC module */
+		stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net);
+	}
+}
+
+#ifndef INVARIANTS
+static void
+sctp_recover_sent_list(struct sctp_tcb *stcb)
+{
+	struct sctp_tmit_chunk *chk, *nchk;
+	struct sctp_association *asoc;
+
+	asoc = &stcb->asoc;
+	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
+		if (SCTP_TSN_GE(asoc->last_acked_seq, chk->rec.data.tsn)) {
+			SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n",
+			            (void *)chk, chk->rec.data.tsn, asoc->last_acked_seq);
+			if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
+				if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+					asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
+				}
+			}
+			if ((asoc->strmout[chk->rec.data.sid].chunks_on_queues == 0) &&
+			    (asoc->strmout[chk->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
+			    TAILQ_EMPTY(&asoc->strmout[chk->rec.data.sid].outqueue)) {
+				asoc->trigger_reset = 1;
+			}
+			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
+			if (PR_SCTP_ENABLED(chk->flags)) {
+				if (asoc->pr_sctp_cnt != 0)
+					asoc->pr_sctp_cnt--;
+			}
+			if (chk->data) {
+				/*sa_ignore NO_NULL_CHK*/
+				sctp_free_bufspace(stcb, asoc, chk, 1);
+				sctp_m_freem(chk->data);
+				chk->data = NULL;
+				if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(chk->flags)) {
+					asoc->sent_queue_cnt_removeable--;
+				}
+			}
+			asoc->sent_queue_cnt--;
+			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
+		}
+	}
+	SCTP_PRINTF("after recover order is as follows\n");
+	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+		SCTP_PRINTF("chk:%p TSN:%x\n", (void *)chk, chk->rec.data.tsn);
+	}
+}
+#endif
+
+static int
+sctp_mark_all_for_resend(struct sctp_tcb *stcb,
+    struct sctp_nets *net,
+    struct sctp_nets *alt,
+    int window_probe,
+    int *num_marked,
+    int *num_abandoned)
+{
+
+	/*
+	 * Mark all chunks (well not all) that were sent to *net for
+	 * retransmission. Move them to alt for there destination as well...
+	 * We only mark chunks that have been outstanding long enough to
+	 * have received feed-back.
+	 */
+	struct sctp_tmit_chunk *chk, *nchk;
+	struct sctp_nets *lnets;
+	struct timeval now, min_wait, tv;
+	int cur_rto;
+	int cnt_abandoned;
+	int audit_tf, num_mk, fir;
+	unsigned int cnt_mk;
+	uint32_t orig_flight, orig_tf;
+	uint32_t tsnlast, tsnfirst;
+	int recovery_cnt = 0;
+
+
+	/* none in flight now */
+	audit_tf = 0;
+	fir = 0;
+	/*
+	 * figure out how long a data chunk must be pending before we can
+	 * mark it ..
+	 */
+	(void)SCTP_GETTIME_TIMEVAL(&now);
+	/* get cur rto in micro-seconds */
+	cur_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
+	cur_rto *= 1000;
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+		sctp_log_fr(cur_rto,
+			    stcb->asoc.peers_rwnd,
+			    window_probe,
+			    SCTP_FR_T3_MARK_TIME);
+		sctp_log_fr(net->flight_size, 0, 0, SCTP_FR_CWND_REPORT);
+		sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT);
+	}
+	tv.tv_sec = cur_rto / 1000000;
+	tv.tv_usec = cur_rto % 1000000;
+#ifndef __FreeBSD__
+	timersub(&now, &tv, &min_wait);
+#else
+	min_wait = now;
+	timevalsub(&min_wait, &tv);
+#endif
+	if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
+		/*
+		 * if we hit here, we don't have enough seconds on the clock
+		 * to account for the RTO. We just let the lower seconds be
+		 * the bounds and don't worry about it. This may mean we
+		 * will mark a lot more than we should.
+		 */
+		min_wait.tv_sec = min_wait.tv_usec = 0;
+	}
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+		sctp_log_fr(cur_rto, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
+		sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
+	}
+	/*
+	 * Our rwnd will be incorrect here since we are not adding back the
+	 * cnt * mbuf but we will fix that down below.
+	 */
+	orig_flight = net->flight_size;
+	orig_tf = stcb->asoc.total_flight;
+
+	net->fast_retran_ip = 0;
+	/* Now on to each chunk */
+	cnt_abandoned = 0;
+	num_mk = cnt_mk = 0;
+	tsnfirst = tsnlast = 0;
+#ifndef INVARIANTS
+ start_again:
+#endif
+	TAILQ_FOREACH_SAFE(chk, &stcb->asoc.sent_queue, sctp_next, nchk) {
+		if (SCTP_TSN_GE(stcb->asoc.last_acked_seq, chk->rec.data.tsn)) {
+			/* Strange case our list got out of order? */
+			SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x\n",
+			            (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.tsn);
+			recovery_cnt++;
+#ifdef INVARIANTS
+			panic("last acked >= chk on sent-Q");
+#else
+			SCTP_PRINTF("Recover attempts a restart cnt:%d\n", recovery_cnt);
+			sctp_recover_sent_list(stcb);
+			if (recovery_cnt < 10) {
+				goto start_again;
+			} else {
+				SCTP_PRINTF("Recovery fails %d times??\n", recovery_cnt);
+			}
+#endif
+		}
+		if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
+			/*
+			 * found one to mark: If it is less than
+			 * DATAGRAM_ACKED it MUST not be a skipped or marked
+			 * TSN but instead one that is either already set
+			 * for retransmission OR one that needs
+			 * retransmission.
+			 */
+
+			/* validate its been outstanding long enough */
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+				sctp_log_fr(chk->rec.data.tsn,
+					    chk->sent_rcv_time.tv_sec,
+					    chk->sent_rcv_time.tv_usec,
+					    SCTP_FR_T3_MARK_TIME);
+			}
+			if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) {
+				/*
+				 * we have reached a chunk that was sent
+				 * some seconds past our min.. forget it we
+				 * will find no more to send.
+				 */
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+					sctp_log_fr(0,
+						    chk->sent_rcv_time.tv_sec,
+						    chk->sent_rcv_time.tv_usec,
+						    SCTP_FR_T3_STOPPED);
+				}
+				continue;
+			} else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) &&
+				   (window_probe == 0)) {
+				/*
+				 * we must look at the micro seconds to
+				 * know.
+				 */
+				if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
+					/*
+					 * ok it was sent after our boundary
+					 * time.
+					 */
+					continue;
+				}
+			}
+			if (stcb->asoc.prsctp_supported && PR_SCTP_TTL_ENABLED(chk->flags)) {
+				/* Is it expired? */
+#ifndef __FreeBSD__
+				if (timercmp(&now, &chk->rec.data.timetodrop, >)) {
+#else
+				if (timevalcmp(&now, &chk->rec.data.timetodrop, >)) {
+#endif
+					/* Yes so drop it */
+					if (chk->data) {
+						(void)sctp_release_pr_sctp_chunk(stcb,
+										 chk,
+										 1,
+										 SCTP_SO_NOT_LOCKED);
+						cnt_abandoned++;
+					}
+					continue;
+				}
+			}
+			if (stcb->asoc.prsctp_supported && PR_SCTP_RTX_ENABLED(chk->flags)) {
+				/* Has it been retransmitted tv_sec times? */
+				if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) {
+					if (chk->data) {
+						(void)sctp_release_pr_sctp_chunk(stcb,
+										 chk,
+										 1,
+										 SCTP_SO_NOT_LOCKED);
+						cnt_abandoned++;
+					}
+					continue;
+				}
+			}
+			if (chk->sent < SCTP_DATAGRAM_RESEND) {
+				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+				num_mk++;
+				if (fir == 0) {
+					fir = 1;
+					tsnfirst = chk->rec.data.tsn;
+				}
+				tsnlast = chk->rec.data.tsn;
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+					sctp_log_fr(chk->rec.data.tsn, chk->snd_count,
+						    0, SCTP_FR_T3_MARKED);
+				}
+
+				if (chk->rec.data.chunk_was_revoked) {
+					/* deflate the cwnd */
+					chk->whoTo->cwnd -= chk->book_size;
+					chk->rec.data.chunk_was_revoked = 0;
+				}
+				net->marked_retrans++;
+				stcb->asoc.marked_retrans++;
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO,
+						       chk->whoTo->flight_size,
+						       chk->book_size,
+						       (uint32_t)(uintptr_t)chk->whoTo,
+						       chk->rec.data.tsn);
+				}
+				sctp_flight_size_decrease(chk);
+				sctp_total_flight_decrease(stcb, chk);
+				stcb->asoc.peers_rwnd += chk->send_size;
+				stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
+			}
+			chk->sent = SCTP_DATAGRAM_RESEND;
+			SCTP_STAT_INCR(sctps_markedretrans);
+
+			/* reset the TSN for striking and other FR stuff */
+			chk->rec.data.doing_fast_retransmit = 0;
+			/* Clear any time so NO RTT is being done */
+
+			if (chk->do_rtt) {
+				if (chk->whoTo->rto_needed == 0) {
+					chk->whoTo->rto_needed = 1;
+				}
+			}
+			chk->do_rtt = 0;
+			if (alt != net) {
+				sctp_free_remote_addr(chk->whoTo);
+				chk->no_fr_allowed = 1;
+				chk->whoTo = alt;
+				atomic_add_int(&alt->ref_count, 1);
+			} else {
+				chk->no_fr_allowed = 0;
+				if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
+					chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
+				} else {
+					chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.tsn;
+				}
+			}
+			/* CMT: Do not allow FRs on retransmitted TSNs.
+			 */
+			if (stcb->asoc.sctp_cmt_on_off > 0) {
+				chk->no_fr_allowed = 1;
+			}
+#ifdef THIS_SHOULD_NOT_BE_DONE
+		} else if (chk->sent == SCTP_DATAGRAM_ACKED) {
+			/* remember highest acked one */
+			could_be_sent = chk;
+#endif
+		}
+		if (chk->sent == SCTP_DATAGRAM_RESEND) {
+			cnt_mk++;
+		}
+	}
+	if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) {
+		/* we did not subtract the same things? */
+		audit_tf = 1;
+	}
+
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+		sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
+	}
+#ifdef SCTP_DEBUG
+	if (num_mk) {
+		SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n",
+			tsnlast);
+		SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%ld\n",
+			num_mk, (u_long)stcb->asoc.peers_rwnd);
+		SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n",
+			tsnlast);
+		SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%d\n",
+			num_mk,
+			(int)stcb->asoc.peers_rwnd);
+	}
+#endif
+	*num_marked = num_mk;
+	*num_abandoned = cnt_abandoned;
+	/* Now check for a ECN Echo that may be stranded And
+	 * include the cnt_mk'd to have all resends in the
+	 * control queue.
+	 */
+	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
+		if (chk->sent == SCTP_DATAGRAM_RESEND) {
+			cnt_mk++;
+		}
+		if ((chk->whoTo == net) &&
+		    (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
+			sctp_free_remote_addr(chk->whoTo);
+			chk->whoTo = alt;
+			if (chk->sent != SCTP_DATAGRAM_RESEND) {
+				chk->sent = SCTP_DATAGRAM_RESEND;
+				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+				cnt_mk++;
+			}
+			atomic_add_int(&alt->ref_count, 1);
+		}
+	}
+#ifdef THIS_SHOULD_NOT_BE_DONE
+	if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) {
+		/* fix it so we retransmit the highest acked anyway */
+		sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+		cnt_mk++;
+		could_be_sent->sent = SCTP_DATAGRAM_RESEND;
+	}
+#endif
+	if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
+#ifdef INVARIANTS
+		SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n",
+			    cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk);
+#endif
+#ifndef SCTP_AUDITING_ENABLED
+		stcb->asoc.sent_queue_retran_cnt = cnt_mk;
+#endif
+	}
+	if (audit_tf) {
+		SCTPDBG(SCTP_DEBUG_TIMER4,
+			"Audit total flight due to negative value net:%p\n",
+			(void *)net);
+		stcb->asoc.total_flight = 0;
+		stcb->asoc.total_flight_count = 0;
+		/* Clear all networks flight size */
+		TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
+			lnets->flight_size = 0;
+			SCTPDBG(SCTP_DEBUG_TIMER4,
+				"Net:%p c-f cwnd:%d ssthresh:%d\n",
+				(void *)lnets, lnets->cwnd, lnets->ssthresh);
+		}
+		TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+			if (chk->sent < SCTP_DATAGRAM_RESEND) {
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+					sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
+						       chk->whoTo->flight_size,
+						       chk->book_size,
+						       (uint32_t)(uintptr_t)chk->whoTo,
+						       chk->rec.data.tsn);
+				}
+
+				sctp_flight_size_increase(chk);
+				sctp_total_flight_increase(stcb, chk);
+			}
+		}
+	}
+	/* We return 1 if we only have a window probe outstanding */
+	return (0);
+}
+
+
+int
+sctp_t3rxt_timer(struct sctp_inpcb *inp,
+    struct sctp_tcb *stcb,
+    struct sctp_nets *net)
+{
+	struct sctp_nets *alt;
+	int win_probe, num_mk, num_abandoned;
+
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
+		sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
+	}
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
+		struct sctp_nets *lnet;
+
+		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+			if (net == lnet) {
+				sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3);
+			} else {
+				sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3);
+			}
+		}
+	}
+	/* Find an alternate and mark those for retransmission */
+	if ((stcb->asoc.peers_rwnd == 0) &&
+	    (stcb->asoc.total_flight < net->mtu)) {
+		SCTP_STAT_INCR(sctps_timowindowprobe);
+		win_probe = 1;
+	} else {
+		win_probe = 0;
+	}
+
+	if (win_probe == 0) {
+		/* We don't do normal threshold management on window probes */
+		if (sctp_threshold_management(inp, stcb, net,
+		    stcb->asoc.max_send_times)) {
+			/* Association was destroyed */
+			return (1);
+		} else {
+			if (net != stcb->asoc.primary_destination) {
+				/* send a immediate HB if our RTO is stale */
+				struct timeval now;
+				unsigned int ms_goneby;
+
+				(void)SCTP_GETTIME_TIMEVAL(&now);
+				if (net->last_sent_time.tv_sec) {
+					ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000;
+				} else {
+					ms_goneby = 0;
+				}
+				if ((net->dest_state & SCTP_ADDR_PF) == 0) {
+					if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
+						/*
+						 * no recent feed back in an RTO or
+						 * more, request a RTT update
+						 */
+						sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
+					}
+				}
+			}
+		}
+	} else {
+		/*
+		 * For a window probe we don't penalize the net's but only
+		 * the association. This may fail it if SACKs are not coming
+		 * back. If sack's are coming with rwnd locked at 0, we will
+		 * continue to hold things waiting for rwnd to raise
+		 */
+		if (sctp_threshold_management(inp, stcb, NULL,
+		    stcb->asoc.max_send_times)) {
+			/* Association was destroyed */
+			return (1);
+		}
+	}
+	if (stcb->asoc.sctp_cmt_on_off > 0) {
+		if (net->pf_threshold < net->failure_threshold) {
+			alt = sctp_find_alternate_net(stcb, net, 2);
+		} else {
+		        /*
+			 * CMT: Using RTX_SSTHRESH policy for CMT.
+			 * If CMT is being used, then pick dest with
+			 * largest ssthresh for any retransmission.
+			 */
+			alt = sctp_find_alternate_net(stcb, net, 1);
+			/*
+			 * CUCv2: If a different dest is picked for
+			 * the retransmission, then new
+			 * (rtx-)pseudo_cumack needs to be tracked
+			 * for orig dest. Let CUCv2 track new (rtx-)
+			 * pseudo-cumack always.
+			 */
+			net->find_pseudo_cumack = 1;
+			net->find_rtx_pseudo_cumack = 1;
+		}
+	} else {
+		alt = sctp_find_alternate_net(stcb, net, 0);
+	}
+
+	num_mk = 0;
+	num_abandoned = 0;
+	(void)sctp_mark_all_for_resend(stcb, net, alt, win_probe,
+				      &num_mk, &num_abandoned);
+	/* FR Loss recovery just ended with the T3. */
+	stcb->asoc.fast_retran_loss_recovery = 0;
+
+	/* CMT FR loss recovery ended with the T3 */
+	net->fast_retran_loss_recovery = 0;
+	if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
+	    (net->flight_size == 0)) {
+		(*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins)(stcb, net);
+	}
+
+	/*
+	 * setup the sat loss recovery that prevents satellite cwnd advance.
+	 */
+	stcb->asoc.sat_t3_loss_recovery = 1;
+	stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
+
+	/* Backoff the timer and cwnd */
+	sctp_backoff_on_timeout(stcb, net, win_probe, num_mk, num_abandoned);
+	if ((!(net->dest_state & SCTP_ADDR_REACHABLE)) ||
+	    (net->dest_state & SCTP_ADDR_PF)) {
+		/* Move all pending over too */
+		sctp_move_chunks_from_net(stcb, net);
+
+		/* Get the address that failed, to
+		 * force a new src address selecton and
+		 * a route allocation.
+		 */
+		if (net->ro._s_addr) {
+			sctp_free_ifa(net->ro._s_addr);
+			net->ro._s_addr = NULL;
+		}
+		net->src_addr_selected = 0;
+
+		/* Force a route allocation too */
+		if (net->ro.ro_rt) {
+			RTFREE(net->ro.ro_rt);
+			net->ro.ro_rt = NULL;
+		}
+
+		/* Was it our primary? */
+		if ((stcb->asoc.primary_destination == net) && (alt != net)) {
+			/*
+			 * Yes, note it as such and find an alternate note:
+			 * this means HB code must use this to resent the
+			 * primary if it goes active AND if someone does a
+			 * change-primary then this flag must be cleared
+			 * from any net structures.
+			 */
+			if (stcb->asoc.alternate) {
+				sctp_free_remote_addr(stcb->asoc.alternate);
+			}
+			stcb->asoc.alternate = alt;
+			atomic_add_int(&stcb->asoc.alternate->ref_count, 1);
+		}
+	}
+	/*
+	 * Special case for cookie-echo'ed case, we don't do output but must
+	 * await the COOKIE-ACK before retransmission
+	 */
+	if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
+		/*
+		 * Here we just reset the timer and start again since we
+		 * have not established the asoc
+		 */
+		sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
+		return (0);
+	}
+	if (stcb->asoc.prsctp_supported) {
+		struct sctp_tmit_chunk *lchk;
+
+		lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
+		/* C3. See if we need to send a Fwd-TSN */
+		if (SCTP_TSN_GT(stcb->asoc.advanced_peer_ack_point, stcb->asoc.last_acked_seq)) {
+			send_forward_tsn(stcb, &stcb->asoc);
+			if (lchk) {
+				/* Assure a timer is up */
+				sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
+			}
+		}
+	}
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
+		sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX);
+	}
+	return (0);
+}
+
+int
+sctp_t1init_timer(struct sctp_inpcb *inp,
+    struct sctp_tcb *stcb,
+    struct sctp_nets *net)
+{
+	/* bump the thresholds */
+	if (stcb->asoc.delayed_connection) {
+		/*
+		 * special hook for delayed connection. The library did NOT
+		 * complete the rest of its sends.
+		 */
+		stcb->asoc.delayed_connection = 0;
+		sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED);
+		return (0);
+	}
+	if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) {
+		return (0);
+	}
+	if (sctp_threshold_management(inp, stcb, net,
+	    stcb->asoc.max_init_times)) {
+		/* Association was destroyed */
+		return (1);
+	}
+	stcb->asoc.dropped_special_cnt = 0;
+	sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0, 0);
+	if (stcb->asoc.initial_init_rto_max < net->RTO) {
+		net->RTO = stcb->asoc.initial_init_rto_max;
+	}
+	if (stcb->asoc.numnets > 1) {
+		/* If we have more than one addr use it */
+		struct sctp_nets *alt;
+
+		alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0);
+		if (alt != stcb->asoc.primary_destination) {
+			sctp_move_chunks_from_net(stcb, stcb->asoc.primary_destination);
+			stcb->asoc.primary_destination = alt;
+		}
+	}
+	/* Send out a new init */
+	sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED);
+	return (0);
+}
+
+/*
+ * For cookie and asconf we actually need to find and mark for resend, then
+ * increment the resend counter (after all the threshold management stuff of
+ * course).
+ */
+int
+sctp_cookie_timer(struct sctp_inpcb *inp,
+    struct sctp_tcb *stcb,
+    struct sctp_nets *net SCTP_UNUSED)
+{
+	struct sctp_nets *alt;
+	struct sctp_tmit_chunk *cookie;
+
+	/* first before all else we must find the cookie */
+	TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
+		if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
+			break;
+		}
+	}
+	if (cookie == NULL) {
+		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
+			/* FOOBAR! */
+			struct mbuf *op_err;
+
+			op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+			                             "Cookie timer expired, but no cookie");
+			inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_3;
+			sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+		} else {
+#ifdef INVARIANTS
+			panic("Cookie timer expires in wrong state?");
+#else
+			SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc));
+			return (0);
+#endif
+		}
+		return (0);
+	}
+	/* Ok we found the cookie, threshold management next */
+	if (sctp_threshold_management(inp, stcb, cookie->whoTo,
+	    stcb->asoc.max_init_times)) {
+		/* Assoc is over */
+		return (1);
+	}
+	/*
+	 * Cleared threshold management, now lets backoff the address
+	 * and select an alternate
+	 */
+	stcb->asoc.dropped_special_cnt = 0;
+	sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0, 0);
+	alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0);
+	if (alt != cookie->whoTo) {
+		sctp_free_remote_addr(cookie->whoTo);
+		cookie->whoTo = alt;
+		atomic_add_int(&alt->ref_count, 1);
+	}
+	/* Now mark the retran info */
+	if (cookie->sent != SCTP_DATAGRAM_RESEND) {
+		sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+	}
+	cookie->sent = SCTP_DATAGRAM_RESEND;
+	/*
+	 * Now call the output routine to kick out the cookie again, Note we
+	 * don't mark any chunks for retran so that FR will need to kick in
+	 * to move these (or a send timer).
+	 */
+	return (0);
+}
+
+int
+sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+    struct sctp_nets *net)
+{
+	struct sctp_nets *alt;
+	struct sctp_tmit_chunk *strrst = NULL, *chk = NULL;
+
+	if (stcb->asoc.stream_reset_outstanding == 0) {
+		return (0);
+	}
+	/* find the existing STRRESET, we use the seq number we sent out on */
+	(void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst);
+	if (strrst == NULL) {
+		return (0);
+	}
+	/* do threshold management */
+	if (sctp_threshold_management(inp, stcb, strrst->whoTo,
+	    stcb->asoc.max_send_times)) {
+		/* Assoc is over */
+		return (1);
+	}
+	/*
+	 * Cleared threshold management, now lets backoff the address
+	 * and select an alternate
+	 */
+	sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0, 0);
+	alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0);
+	sctp_free_remote_addr(strrst->whoTo);
+	strrst->whoTo = alt;
+	atomic_add_int(&alt->ref_count, 1);
+
+	/* See if a ECN Echo is also stranded */
+	TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
+		if ((chk->whoTo == net) &&
+		    (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
+			sctp_free_remote_addr(chk->whoTo);
+			if (chk->sent != SCTP_DATAGRAM_RESEND) {
+				chk->sent = SCTP_DATAGRAM_RESEND;
+				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+			}
+			chk->whoTo = alt;
+			atomic_add_int(&alt->ref_count, 1);
+		}
+	}
+	if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
+		/*
+		 * If the address went un-reachable, we need to move to
+		 * alternates for ALL chk's in queue
+		 */
+		sctp_move_chunks_from_net(stcb, net);
+	}
+	/* mark the retran info */
+	if (strrst->sent != SCTP_DATAGRAM_RESEND)
+		sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+	strrst->sent = SCTP_DATAGRAM_RESEND;
+
+	/* restart the timer */
+	sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo);
+	return (0);
+}
+
+int
+sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+		  struct sctp_nets *net)
+{
+	struct sctp_nets *alt;
+	struct sctp_tmit_chunk *asconf, *chk;
+
+	/* is this a first send, or a retransmission? */
+	if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) {
+		/* compose a new ASCONF chunk and send it */
+		sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED);
+	} else {
+		/*
+		 * Retransmission of the existing ASCONF is needed
+		 */
+
+		/* find the existing ASCONF */
+		asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue);
+		if (asconf == NULL) {
+			return (0);
+		}
+		/* do threshold management */
+		if (sctp_threshold_management(inp, stcb, asconf->whoTo,
+		    stcb->asoc.max_send_times)) {
+			/* Assoc is over */
+			return (1);
+		}
+		if (asconf->snd_count > stcb->asoc.max_send_times) {
+			/*
+			 * Something is rotten: our peer is not responding to
+			 * ASCONFs but apparently is to other chunks.  i.e. it
+			 * is not properly handling the chunk type upper bits.
+			 * Mark this peer as ASCONF incapable and cleanup.
+			 */
+			SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n");
+			sctp_asconf_cleanup(stcb, net);
+			return (0);
+		}
+		/*
+		 * cleared threshold management, so now backoff the net and
+		 * select an alternate
+		 */
+		sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0, 0);
+		alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0);
+		if (asconf->whoTo != alt) {
+			sctp_free_remote_addr(asconf->whoTo);
+			asconf->whoTo = alt;
+			atomic_add_int(&alt->ref_count, 1);
+		}
+
+		/* See if an ECN Echo is also stranded */
+		TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
+			if ((chk->whoTo == net) &&
+			    (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
+				sctp_free_remote_addr(chk->whoTo);
+				chk->whoTo = alt;
+				if (chk->sent != SCTP_DATAGRAM_RESEND) {
+					chk->sent = SCTP_DATAGRAM_RESEND;
+					sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+				}
+				atomic_add_int(&alt->ref_count, 1);
+			}
+		}
+		TAILQ_FOREACH(chk, &stcb->asoc.asconf_send_queue, sctp_next) {
+			if (chk->whoTo != alt) {
+				sctp_free_remote_addr(chk->whoTo);
+				chk->whoTo = alt;
+				atomic_add_int(&alt->ref_count, 1);
+			}
+			if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT)
+				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+			chk->sent = SCTP_DATAGRAM_RESEND;
+		}
+		if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
+			/*
+			 * If the address went un-reachable, we need to move
+			 * to the alternate for ALL chunks in queue
+			 */
+			sctp_move_chunks_from_net(stcb, net);
+		}
+		/* mark the retran info */
+		if (asconf->sent != SCTP_DATAGRAM_RESEND)
+			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+		asconf->sent = SCTP_DATAGRAM_RESEND;
+
+		/* send another ASCONF if any and we can do */
+		sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED);
+	}
+	return (0);
+}
+
+/* Mobility adaptation */
+void
+sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+                       struct sctp_nets *net SCTP_UNUSED)
+{
+	if (stcb->asoc.deleted_primary == NULL) {
+		SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n");
+		sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+		return;
+	}
+	SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary ");
+	SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa);
+	sctp_free_remote_addr(stcb->asoc.deleted_primary);
+	stcb->asoc.deleted_primary = NULL;
+	sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
+	return;
+}
+
+/*
+ * For the shutdown and shutdown-ack, we do not keep one around on the
+ * control queue. This means we must generate a new one and call the general
+ * chunk output routine, AFTER having done threshold management.
+ * It is assumed that net is non-NULL.
+ */
+int
+sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+    struct sctp_nets *net)
+{
+	struct sctp_nets *alt;
+
+	/* first threshold management */
+	if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
+		/* Assoc is over */
+		return (1);
+	}
+	sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
+	/* second select an alternative */
+	alt = sctp_find_alternate_net(stcb, net, 0);
+
+	/* third generate a shutdown into the queue for out net */
+	sctp_send_shutdown(stcb, alt);
+
+	/* fourth restart timer */
+	sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
+	return (0);
+}
+
+int
+sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+    struct sctp_nets *net)
+{
+	struct sctp_nets *alt;
+
+	/* first threshold management */
+	if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
+		/* Assoc is over */
+		return (1);
+	}
+	sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
+	/* second select an alternative */
+	alt = sctp_find_alternate_net(stcb, net, 0);
+
+	/* third generate a shutdown into the queue for out net */
+	sctp_send_shutdown_ack(stcb, alt);
+
+	/* fourth restart timer */
+	sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
+	return (0);
+}
+
+static void
+sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
+    struct sctp_tcb *stcb)
+{
+	struct sctp_stream_queue_pending *sp;
+	unsigned int i, chks_in_queue = 0;
+	int being_filled = 0;
+	/*
+	 * This function is ONLY called when the send/sent queues are empty.
+	 */
+	if ((stcb == NULL) || (inp == NULL))
+		return;
+
+	if (stcb->asoc.sent_queue_retran_cnt) {
+		SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n",
+			    stcb->asoc.sent_queue_retran_cnt);
+		stcb->asoc.sent_queue_retran_cnt = 0;
+	}
+	if (stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) {
+		/* No stream scheduler information, initialize scheduler */
+		stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 0);
+		if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) {
+			/* yep, we lost a stream or two */
+			SCTP_PRINTF("Found additional streams NOT managed by scheduler, corrected\n");
+		} else {
+			/* no streams lost */
+			stcb->asoc.total_output_queue_size = 0;
+		}
+	}
+	/* Check to see if some data queued, if so report it */
+	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
+		if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
+			TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) {
+				if (sp->msg_is_complete)
+					being_filled++;
+				chks_in_queue++;
+			}
+		}
+	}
+	if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
+		SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
+			    stcb->asoc.stream_queue_cnt, chks_in_queue);
+	}
+	if (chks_in_queue) {
+		/* call the output queue function */
+		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+		if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
+		    (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
+			/*
+			 * Probably should go in and make it go back through
+			 * and add fragments allowed
+			 */
+			if (being_filled == 0) {
+				SCTP_PRINTF("Still nothing moved %d chunks are stuck\n",
+					    chks_in_queue);
+			}
+		}
+	} else {
+		SCTP_PRINTF("Found no chunks on any queue tot:%lu\n",
+			    (u_long)stcb->asoc.total_output_queue_size);
+		stcb->asoc.total_output_queue_size = 0;
+	}
+}
+
+int
+sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+    struct sctp_nets *net)
+{
+	uint8_t net_was_pf;
+
+	if (net->dest_state & SCTP_ADDR_PF) {
+		net_was_pf = 1;
+	} else {
+		net_was_pf = 0;
+	}
+	if (net->hb_responded == 0) {
+		if (net->ro._s_addr) {
+			/* Invalidate the src address if we did not get
+			 * a response last time.
+			 */
+			sctp_free_ifa(net->ro._s_addr);
+			net->ro._s_addr = NULL;
+			net->src_addr_selected = 0;
+		}
+		sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
+		if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
+			/* Assoc is over */
+			return (1);
+		}
+	}
+	/* Zero PBA, if it needs it */
+	if (net->partial_bytes_acked) {
+		net->partial_bytes_acked = 0;
+	}
+	if ((stcb->asoc.total_output_queue_size > 0) &&
+	    (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
+	    (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
+		sctp_audit_stream_queues_for_size(inp, stcb);
+	}
+	if (!(net->dest_state & SCTP_ADDR_NOHB) &&
+	    !((net_was_pf == 0) && (net->dest_state & SCTP_ADDR_PF))) {
+		/* when move to PF during threshold mangement, a HB has been
+		   queued in that routine */
+		uint32_t ms_gone_by;
+
+		if ((net->last_sent_time.tv_sec > 0) ||
+		    (net->last_sent_time.tv_usec > 0)) {
+#ifdef __FreeBSD__
+			struct timeval diff;
+
+			SCTP_GETTIME_TIMEVAL(&diff);
+			timevalsub(&diff, &net->last_sent_time);
+#else
+			struct timeval diff, now;
+
+			SCTP_GETTIME_TIMEVAL(&now);
+			timersub(&now, &net->last_sent_time, &diff);
+#endif
+			ms_gone_by = (uint32_t)(diff.tv_sec * 1000) +
+			             (uint32_t)(diff.tv_usec / 1000);
+		} else {
+			ms_gone_by = 0xffffffff;
+		}
+		if ((ms_gone_by >= net->heart_beat_delay) ||
+		    (net->dest_state & SCTP_ADDR_PF)) {
+			sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED);
+		}
+	}
+	return (0);
+}
+
+void
+sctp_pathmtu_timer(struct sctp_inpcb *inp,
+    struct sctp_tcb *stcb,
+    struct sctp_nets *net)
+{
+	uint32_t next_mtu, mtu;
+
+	next_mtu = sctp_get_next_mtu(net->mtu);
+
+	if ((next_mtu > net->mtu) && (net->port == 0)) {
+		if ((net->src_addr_selected == 0) ||
+		    (net->ro._s_addr == NULL) ||
+		    (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) {
+			if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) {
+				sctp_free_ifa(net->ro._s_addr);
+				net->ro._s_addr = NULL;
+				net->src_addr_selected = 0;
+			} else  if (net->ro._s_addr == NULL) {
+#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE)
+				if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+					struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+					/* KAME hack: embed scopeid */
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD)
+					(void)in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL);
+#else
+					(void)in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL, NULL);
+#endif
+#elif defined(SCTP_KAME)
+					(void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
+#else
+					(void)in6_embedscope(&sin6->sin6_addr, sin6);
+#endif
+				}
+#endif
+
+				net->ro._s_addr = sctp_source_address_selection(inp,
+										stcb,
+										(sctp_route_t *)&net->ro,
+										net, 0, stcb->asoc.vrf_id);
+#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE)
+				if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+					struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
+#ifdef SCTP_KAME
+					(void)sa6_recoverscope(sin6);
+#else
+					(void)in6_recoverscope(sin6, &sin6->sin6_addr, NULL);
+#endif	/* SCTP_KAME */
+				}
+#endif	/* INET6 */
+			}
+			if (net->ro._s_addr)
+				net->src_addr_selected = 1;
+		}
+		if (net->ro._s_addr) {
+			mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt);
+#if defined(INET) || defined(INET6)
+			if (net->port) {
+				mtu -= sizeof(struct udphdr);
+			}
+#endif
+			if (mtu > next_mtu) {
+				net->mtu = next_mtu;
+			} else {
+				net->mtu = mtu;
+			}
+		}
+	}
+	/* restart the timer */
+	sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+}
+
+void
+sctp_autoclose_timer(struct sctp_inpcb *inp,
+    struct sctp_tcb *stcb,
+    struct sctp_nets *net)
+{
+	struct timeval tn, *tim_touse;
+	struct sctp_association *asoc;
+	int ticks_gone_by;
+
+	(void)SCTP_GETTIME_TIMEVAL(&tn);
+	if (stcb->asoc.sctp_autoclose_ticks &&
+	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
+		/* Auto close is on */
+		asoc = &stcb->asoc;
+		/* pick the time to use */
+		if (asoc->time_last_rcvd.tv_sec >
+		    asoc->time_last_sent.tv_sec) {
+			tim_touse = &asoc->time_last_rcvd;
+		} else {
+			tim_touse = &asoc->time_last_sent;
+		}
+		/* Now has long enough transpired to autoclose? */
+		ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec);
+		if ((ticks_gone_by > 0) &&
+		    (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) {
+			/*
+			 * autoclose time has hit, call the output routine,
+			 * which should do nothing just to be SURE we don't
+			 * have hanging data. We can then safely check the
+			 * queues and know that we are clear to send
+			 * shutdown
+			 */
+			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
+			/* Are we clean? */
+			if (TAILQ_EMPTY(&asoc->send_queue) &&
+			    TAILQ_EMPTY(&asoc->sent_queue)) {
+				/*
+				 * there is nothing queued to send, so I'm
+				 * done...
+				 */
+				if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
+					/* only send SHUTDOWN 1st time thru */
+					struct sctp_nets *netp;
+
+					if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+					    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+					}
+					SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+					SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+					sctp_stop_timers_for_shutdown(stcb);
+					if (stcb->asoc.alternate) {
+						netp = stcb->asoc.alternate;
+					} else {
+						netp = stcb->asoc.primary_destination;
+					}
+					sctp_send_shutdown(stcb, netp);
+					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+							 stcb->sctp_ep, stcb,
+							 netp);
+					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+							 stcb->sctp_ep, stcb,
+							 netp);
+				}
+			}
+		} else {
+			/*
+			 * No auto close at this time, reset t-o to check
+			 * later
+			 */
+			int tmp;
+
+			/* fool the timer startup to use the time left */
+			tmp = asoc->sctp_autoclose_ticks;
+			asoc->sctp_autoclose_ticks -= ticks_gone_by;
+			sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
+			    net);
+			/* restore the real tick value */
+			asoc->sctp_autoclose_ticks = tmp;
+		}
+	}
+}
+
diff --git a/usrsctplib/netinet/sctp_timer.h b/usrsctplib/netinet/sctp_timer.h
new file mode 100755
index 0000000..a519bb6
--- /dev/null
+++ b/usrsctplib/netinet/sctp_timer.h
@@ -0,0 +1,103 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.h 295709 2016-02-17 18:04:22Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_TIMER_H_
+#define _NETINET_SCTP_TIMER_H_
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+#define SCTP_RTT_SHIFT 3
+#define SCTP_RTT_VAR_SHIFT 2
+
+struct sctp_nets *
+sctp_find_alternate_net(struct sctp_tcb *,
+    struct sctp_nets *, int mode);
+
+int
+sctp_t3rxt_timer(struct sctp_inpcb *, struct sctp_tcb *,
+    struct sctp_nets *);
+int
+sctp_t1init_timer(struct sctp_inpcb *, struct sctp_tcb *,
+    struct sctp_nets *);
+int
+sctp_shutdown_timer(struct sctp_inpcb *, struct sctp_tcb *,
+    struct sctp_nets *);
+int
+sctp_heartbeat_timer(struct sctp_inpcb *, struct sctp_tcb *,
+    struct sctp_nets *);
+
+int
+sctp_cookie_timer(struct sctp_inpcb *, struct sctp_tcb *,
+    struct sctp_nets *);
+
+void
+sctp_pathmtu_timer(struct sctp_inpcb *, struct sctp_tcb *,
+    struct sctp_nets *);
+
+int
+sctp_shutdownack_timer(struct sctp_inpcb *, struct sctp_tcb *,
+    struct sctp_nets *);
+int
+sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+    struct sctp_nets *net);
+
+int
+sctp_asconf_timer(struct sctp_inpcb *, struct sctp_tcb *,
+    struct sctp_nets *);
+
+void
+sctp_delete_prim_timer(struct sctp_inpcb *, struct sctp_tcb *,
+    struct sctp_nets *);
+
+void
+sctp_autoclose_timer(struct sctp_inpcb *, struct sctp_tcb *,
+    struct sctp_nets *net);
+
+void sctp_audit_retranmission_queue(struct sctp_association *);
+
+void sctp_iterator_timer(struct sctp_iterator *it);
+
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD) || defined(APPLE_SNOWLEOPARD) || defined(APPLE_LION) || defined(APPLE_MOUNTAINLION)
+void sctp_slowtimo(void);
+#else
+void sctp_gc(struct inpcbinfo *);
+#endif
+#endif
+
+#endif
+#endif
diff --git a/usrsctplib/netinet/sctp_uio.h b/usrsctplib/netinet/sctp_uio.h
new file mode 100755
index 0000000..c46989b
--- /dev/null
+++ b/usrsctplib/netinet/sctp_uio.h
@@ -0,0 +1,1464 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_uio.h 309607 2016-12-06 10:21:25Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_UIO_H_
+#define _NETINET_SCTP_UIO_H_
+
+#if (defined(__APPLE__) && defined(KERNEL))
+#ifndef _KERNEL
+#define _KERNEL
+#endif
+#endif
+
+#if !(defined(__Windows__)) && !defined(__Userspace_os_Windows)
+#if ! defined(_KERNEL)
+#include <stdint.h>
+#endif
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#endif
+#if defined(__Windows__)
+#pragma warning(push)
+#pragma warning(disable: 4200)
+#if defined(_KERNEL)
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#endif
+#endif
+
+typedef uint32_t sctp_assoc_t;
+
+#define SCTP_FUTURE_ASSOC  0
+#define SCTP_CURRENT_ASSOC 1
+#define SCTP_ALL_ASSOC     2
+
+struct sctp_event {
+	sctp_assoc_t se_assoc_id;
+	uint16_t     se_type;
+	uint8_t      se_on;
+};
+
+/* Compatibility to previous define's */
+#define sctp_stream_reset_events sctp_stream_reset_event
+
+/* On/Off setup for subscription to events */
+struct sctp_event_subscribe {
+	uint8_t sctp_data_io_event;
+	uint8_t sctp_association_event;
+	uint8_t sctp_address_event;
+	uint8_t sctp_send_failure_event;
+	uint8_t sctp_peer_error_event;
+	uint8_t sctp_shutdown_event;
+	uint8_t sctp_partial_delivery_event;
+	uint8_t sctp_adaptation_layer_event;
+	uint8_t sctp_authentication_event;
+	uint8_t sctp_sender_dry_event;
+	uint8_t sctp_stream_reset_event;
+};
+
+/* ancillary data types */
+#define SCTP_INIT	0x0001
+#define SCTP_SNDRCV	0x0002
+#define SCTP_EXTRCV	0x0003
+#define SCTP_SNDINFO    0x0004
+#define SCTP_RCVINFO    0x0005
+#define SCTP_NXTINFO    0x0006
+#define SCTP_PRINFO     0x0007
+#define SCTP_AUTHINFO   0x0008
+#define SCTP_DSTADDRV4  0x0009
+#define SCTP_DSTADDRV6  0x000a
+
+/*
+ * ancillary data structures
+ */
+struct sctp_initmsg {
+#if defined(__FreeBSD__) && __FreeBSD_version < 800000
+	/* This is a bug. Not fixed for ABI compatibility */
+	uint32_t sinit_num_ostreams;
+	uint32_t sinit_max_instreams;
+#else
+	uint16_t sinit_num_ostreams;
+	uint16_t sinit_max_instreams;
+#endif
+	uint16_t sinit_max_attempts;
+	uint16_t sinit_max_init_timeo;
+};
+
+/* We add 96 bytes to the size of sctp_sndrcvinfo.
+ * This makes the current structure 128 bytes long
+ * which is nicely 64 bit aligned but also has room
+ * for us to add more and keep ABI compatibility.
+ * For example, already we have the sctp_extrcvinfo
+ * when enabled which is 48 bytes.
+ */
+
+/*
+ * The assoc up needs a verfid
+ * all sendrcvinfo's need a verfid for SENDING only.
+ */
+
+
+#define SCTP_ALIGN_RESV_PAD 92
+#define SCTP_ALIGN_RESV_PAD_SHORT 76
+
+struct sctp_sndrcvinfo {
+	uint16_t sinfo_stream;
+	uint16_t sinfo_ssn;
+	uint16_t sinfo_flags;
+#if defined(__FreeBSD__) && __FreeBSD_version < 800000
+	uint16_t sinfo_pr_policy;
+#endif
+	uint32_t sinfo_ppid;
+	uint32_t sinfo_context;
+	uint32_t sinfo_timetolive;
+	uint32_t sinfo_tsn;
+	uint32_t sinfo_cumtsn;
+	sctp_assoc_t sinfo_assoc_id;
+	uint16_t sinfo_keynumber;
+	uint16_t sinfo_keynumber_valid;
+	uint8_t  __reserve_pad[SCTP_ALIGN_RESV_PAD];
+};
+
+struct sctp_extrcvinfo {
+	uint16_t sinfo_stream;
+	uint16_t sinfo_ssn;
+	uint16_t sinfo_flags;
+#if defined(__FreeBSD__) && __FreeBSD_version < 800000
+	uint16_t sinfo_pr_policy;
+#endif
+	uint32_t sinfo_ppid;
+	uint32_t sinfo_context;
+	uint32_t sinfo_timetolive; /* should have been sinfo_pr_value */
+	uint32_t sinfo_tsn;
+	uint32_t sinfo_cumtsn;
+	sctp_assoc_t sinfo_assoc_id;
+	uint16_t serinfo_next_flags;
+	uint16_t serinfo_next_stream;
+	uint32_t serinfo_next_aid;
+	uint32_t serinfo_next_length;
+	uint32_t serinfo_next_ppid;
+	uint16_t sinfo_keynumber;
+	uint16_t sinfo_keynumber_valid;
+	uint8_t  __reserve_pad[SCTP_ALIGN_RESV_PAD_SHORT];
+};
+#define sinfo_pr_value sinfo_timetolive
+#define sreinfo_next_flags serinfo_next_flags
+#define sreinfo_next_stream serinfo_next_stream
+#define sreinfo_next_aid serinfo_next_aid
+#define sreinfo_next_length serinfo_next_length
+#define sreinfo_next_ppid serinfo_next_ppid
+
+struct sctp_sndinfo {
+	uint16_t snd_sid;
+	uint16_t snd_flags;
+	uint32_t snd_ppid;
+	uint32_t snd_context;
+	sctp_assoc_t snd_assoc_id;
+};
+
+struct sctp_prinfo {
+	uint16_t pr_policy;
+	uint32_t pr_value;
+};
+
+struct sctp_default_prinfo {
+	uint16_t pr_policy;
+	uint32_t pr_value;
+	sctp_assoc_t pr_assoc_id;
+};
+
+struct sctp_authinfo {
+	uint16_t auth_keynumber;
+};
+
+struct sctp_rcvinfo {
+	uint16_t rcv_sid;
+	uint16_t rcv_ssn;
+	uint16_t rcv_flags;
+	uint32_t rcv_ppid;
+	uint32_t rcv_tsn;
+	uint32_t rcv_cumtsn;
+	uint32_t rcv_context;
+	sctp_assoc_t rcv_assoc_id;
+};
+
+struct sctp_nxtinfo {
+	uint16_t nxt_sid;
+	uint16_t nxt_flags;
+	uint32_t nxt_ppid;
+	uint32_t nxt_length;
+	sctp_assoc_t nxt_assoc_id;
+};
+
+#define SCTP_NO_NEXT_MSG           0x0000
+#define SCTP_NEXT_MSG_AVAIL        0x0001
+#define SCTP_NEXT_MSG_ISCOMPLETE   0x0002
+#define SCTP_NEXT_MSG_IS_UNORDERED 0x0004
+#define SCTP_NEXT_MSG_IS_NOTIFICATION 0x0008
+
+struct sctp_recvv_rn {
+	struct sctp_rcvinfo recvv_rcvinfo;
+	struct sctp_nxtinfo recvv_nxtinfo;
+};
+
+#define SCTP_RECVV_NOINFO  0
+#define SCTP_RECVV_RCVINFO 1
+#define SCTP_RECVV_NXTINFO 2
+#define SCTP_RECVV_RN      3
+
+#define SCTP_SENDV_NOINFO   0
+#define SCTP_SENDV_SNDINFO  1
+#define SCTP_SENDV_PRINFO   2
+#define SCTP_SENDV_AUTHINFO 3
+#define SCTP_SENDV_SPA      4
+
+struct sctp_sendv_spa {
+	uint32_t sendv_flags;
+	struct sctp_sndinfo sendv_sndinfo;
+	struct sctp_prinfo sendv_prinfo;
+	struct sctp_authinfo sendv_authinfo;
+};
+
+#define SCTP_SEND_SNDINFO_VALID  0x00000001
+#define SCTP_SEND_PRINFO_VALID   0x00000002
+#define SCTP_SEND_AUTHINFO_VALID 0x00000004
+
+struct sctp_snd_all_completes {
+	uint16_t sall_stream;
+	uint16_t sall_flags;
+	uint32_t sall_ppid;
+	uint32_t sall_context;
+	uint32_t sall_num_sent;
+	uint32_t sall_num_failed;
+};
+
+/* Flags that go into the sinfo->sinfo_flags field */
+#define SCTP_NOTIFICATION     0x0010 /* next message is a notification */
+#define SCTP_COMPLETE         0x0020 /* next message is complete */
+#define SCTP_EOF              0x0100 /* Start shutdown procedures */
+#define SCTP_ABORT            0x0200 /* Send an ABORT to peer */
+#define SCTP_UNORDERED        0x0400 /* Message is un-ordered */
+#define SCTP_ADDR_OVER        0x0800 /* Override the primary-address */
+#define SCTP_SENDALL          0x1000 /* Send this on all associations */
+#define SCTP_EOR              0x2000 /* end of message signal */
+#define SCTP_SACK_IMMEDIATELY 0x4000 /* Set I-Bit */
+
+#define INVALID_SINFO_FLAG(x) (((x) & 0xfffffff0 \
+                                    & ~(SCTP_EOF | SCTP_ABORT | SCTP_UNORDERED |\
+				        SCTP_ADDR_OVER | SCTP_SENDALL | SCTP_EOR |\
+					SCTP_SACK_IMMEDIATELY)) != 0)
+/* for the endpoint */
+
+/* The lower four bits is an enumeration of PR-SCTP policies */
+#define SCTP_PR_SCTP_NONE 0x0000 /* Reliable transfer */
+#define SCTP_PR_SCTP_TTL  0x0001 /* Time based PR-SCTP */
+#define SCTP_PR_SCTP_PRIO 0x0002 /* Buffer based PR-SCTP */
+#define SCTP_PR_SCTP_BUF  SCTP_PR_SCTP_PRIO /* For backwards compatibility */
+#define SCTP_PR_SCTP_RTX  0x0003 /* Number of retransmissions based PR-SCTP */
+#define SCTP_PR_SCTP_MAX  SCTP_PR_SCTP_RTX
+#define SCTP_PR_SCTP_ALL  0x000f /* Used for aggregated stats */
+
+#define PR_SCTP_POLICY(x)         ((x) & 0x0f)
+#define PR_SCTP_ENABLED(x)        ((PR_SCTP_POLICY(x) != SCTP_PR_SCTP_NONE) && \
+                                   (PR_SCTP_POLICY(x) != SCTP_PR_SCTP_ALL))
+#define PR_SCTP_TTL_ENABLED(x)    (PR_SCTP_POLICY(x) == SCTP_PR_SCTP_TTL)
+#define PR_SCTP_BUF_ENABLED(x)    (PR_SCTP_POLICY(x) == SCTP_PR_SCTP_BUF)
+#define PR_SCTP_RTX_ENABLED(x)    (PR_SCTP_POLICY(x) == SCTP_PR_SCTP_RTX)
+#define PR_SCTP_INVALID_POLICY(x) (PR_SCTP_POLICY(x) > SCTP_PR_SCTP_MAX)
+#define PR_SCTP_VALID_POLICY(x)   (PR_SCTP_POLICY(x) <= SCTP_PR_SCTP_MAX)
+
+/* Stat's */
+struct sctp_pcbinfo {
+	uint32_t ep_count;
+	uint32_t asoc_count;
+	uint32_t laddr_count;
+	uint32_t raddr_count;
+	uint32_t chk_count;
+	uint32_t readq_count;
+	uint32_t free_chunks;
+	uint32_t stream_oque;
+};
+
+struct sctp_sockstat {
+	sctp_assoc_t ss_assoc_id;
+	uint32_t ss_total_sndbuf;
+	uint32_t ss_total_recv_buf;
+};
+
+/*
+ * notification event structures
+ */
+
+/*
+ * association change event
+ */
+struct sctp_assoc_change {
+	uint16_t sac_type;
+	uint16_t sac_flags;
+	uint32_t sac_length;
+	uint16_t sac_state;
+	uint16_t sac_error;
+	uint16_t sac_outbound_streams;
+	uint16_t sac_inbound_streams;
+	sctp_assoc_t sac_assoc_id;
+	uint8_t sac_info[];
+};
+
+/* sac_state values */
+#define SCTP_COMM_UP            0x0001
+#define SCTP_COMM_LOST          0x0002
+#define SCTP_RESTART            0x0003
+#define SCTP_SHUTDOWN_COMP      0x0004
+#define SCTP_CANT_STR_ASSOC     0x0005
+
+/* sac_info values */
+#define SCTP_ASSOC_SUPPORTS_PR			0x01
+#define SCTP_ASSOC_SUPPORTS_AUTH		0x02
+#define SCTP_ASSOC_SUPPORTS_ASCONF		0x03
+#define SCTP_ASSOC_SUPPORTS_MULTIBUF		0x04
+#define SCTP_ASSOC_SUPPORTS_RE_CONFIG		0x05
+#define SCTP_ASSOC_SUPPORTS_INTERLEAVING	0x06
+#define SCTP_ASSOC_SUPPORTS_MAX			0x06
+/*
+ * Address event
+ */
+struct sctp_paddr_change {
+	uint16_t spc_type;
+	uint16_t spc_flags;
+	uint32_t spc_length;
+	struct sockaddr_storage spc_aaddr;
+	uint32_t spc_state;
+	uint32_t spc_error;
+	sctp_assoc_t spc_assoc_id;
+};
+
+/* paddr state values */
+#define SCTP_ADDR_AVAILABLE	0x0001
+#define SCTP_ADDR_UNREACHABLE	0x0002
+#define SCTP_ADDR_REMOVED	0x0003
+#define SCTP_ADDR_ADDED		0x0004
+#define SCTP_ADDR_MADE_PRIM	0x0005
+#define SCTP_ADDR_CONFIRMED	0x0006
+
+#define SCTP_ACTIVE		0x0001	/* SCTP_ADDR_REACHABLE */
+#define SCTP_INACTIVE		0x0002	/* neither SCTP_ADDR_REACHABLE
+					   nor SCTP_ADDR_UNCONFIRMED */
+#define SCTP_UNCONFIRMED	0x0200	/* SCTP_ADDR_UNCONFIRMED */
+
+/* remote error events */
+struct sctp_remote_error {
+	uint16_t sre_type;
+	uint16_t sre_flags;
+	uint32_t sre_length;
+	uint16_t sre_error;
+	sctp_assoc_t sre_assoc_id;
+	uint8_t sre_data[];
+};
+
+/* data send failure event (deprecated) */
+struct sctp_send_failed {
+	uint16_t ssf_type;
+	uint16_t ssf_flags;
+	uint32_t ssf_length;
+	uint32_t ssf_error;
+	struct sctp_sndrcvinfo ssf_info;
+	sctp_assoc_t ssf_assoc_id;
+	uint8_t ssf_data[];
+};
+
+/* data send failure event (not deprecated) */
+struct sctp_send_failed_event {
+	uint16_t ssfe_type;
+	uint16_t ssfe_flags;
+	uint32_t ssfe_length;
+	uint32_t ssfe_error;
+	struct sctp_sndinfo ssfe_info;
+	sctp_assoc_t ssfe_assoc_id;
+	uint8_t  ssfe_data[];
+};
+
+/* flag that indicates state of data */
+#define SCTP_DATA_UNSENT	0x0001	/* inqueue never on wire */
+#define SCTP_DATA_SENT		0x0002	/* on wire at failure */
+
+/* shutdown event */
+struct sctp_shutdown_event {
+	uint16_t sse_type;
+	uint16_t sse_flags;
+	uint32_t sse_length;
+	sctp_assoc_t sse_assoc_id;
+};
+
+/* Adaptation layer indication stuff */
+struct sctp_adaptation_event {
+	uint16_t sai_type;
+	uint16_t sai_flags;
+	uint32_t sai_length;
+	uint32_t sai_adaptation_ind;
+	sctp_assoc_t sai_assoc_id;
+};
+
+struct sctp_setadaptation {
+	uint32_t ssb_adaptation_ind;
+};
+
+/* compatible old spelling */
+struct sctp_adaption_event {
+	uint16_t sai_type;
+	uint16_t sai_flags;
+	uint32_t sai_length;
+	uint32_t sai_adaption_ind;
+	sctp_assoc_t sai_assoc_id;
+};
+
+struct sctp_setadaption {
+	uint32_t ssb_adaption_ind;
+};
+
+
+/*
+ * Partial Delivery API event
+ */
+struct sctp_pdapi_event {
+	uint16_t pdapi_type;
+	uint16_t pdapi_flags;
+	uint32_t pdapi_length;
+	uint32_t pdapi_indication;
+	uint16_t pdapi_stream;
+	uint16_t pdapi_seq;
+	sctp_assoc_t pdapi_assoc_id;
+};
+
+/* indication values */
+#define SCTP_PARTIAL_DELIVERY_ABORTED	0x0001
+
+
+/*
+ * authentication key event
+ */
+struct sctp_authkey_event {
+	uint16_t auth_type;
+	uint16_t auth_flags;
+	uint32_t auth_length;
+	uint16_t auth_keynumber;
+	uint16_t auth_altkeynumber;
+	uint32_t auth_indication;
+	sctp_assoc_t auth_assoc_id;
+};
+
+/* indication values */
+#define SCTP_AUTH_NEW_KEY	0x0001
+#define SCTP_AUTH_NEWKEY	SCTP_AUTH_NEW_KEY
+#define SCTP_AUTH_NO_AUTH	0x0002
+#define SCTP_AUTH_FREE_KEY	0x0003
+
+
+struct sctp_sender_dry_event {
+	uint16_t sender_dry_type;
+	uint16_t sender_dry_flags;
+	uint32_t sender_dry_length;
+	sctp_assoc_t sender_dry_assoc_id;
+};
+
+
+/*
+ * Stream reset event - subscribe to SCTP_STREAM_RESET_EVENT
+ */
+struct sctp_stream_reset_event {
+	uint16_t strreset_type;
+	uint16_t strreset_flags;
+	uint32_t strreset_length;
+	sctp_assoc_t strreset_assoc_id;
+	uint16_t strreset_stream_list[];
+};
+
+/* flags in stream_reset_event (strreset_flags) */
+#define SCTP_STREAM_RESET_INCOMING_SSN  0x0001
+#define SCTP_STREAM_RESET_OUTGOING_SSN  0x0002
+#define SCTP_STREAM_RESET_DENIED        0x0004
+#define SCTP_STREAM_RESET_FAILED        0x0008
+
+/*
+ * Assoc reset event - subscribe to SCTP_ASSOC_RESET_EVENT
+ */
+struct sctp_assoc_reset_event {
+	uint16_t 	assocreset_type;
+	uint16_t	assocreset_flags;
+	uint32_t	assocreset_length;
+	sctp_assoc_t	assocreset_assoc_id;
+	uint32_t	assocreset_local_tsn;
+	uint32_t	assocreset_remote_tsn;
+};
+
+#define SCTP_ASSOC_RESET_DENIED		0x0004
+#define SCTP_ASSOC_RESET_FAILED		0x0008
+
+/*
+ * Stream change event - subscribe to SCTP_STREAM_CHANGE_EVENT
+ */
+struct sctp_stream_change_event {
+	uint16_t	strchange_type;
+	uint16_t	strchange_flags;
+	uint32_t	strchange_length;
+	sctp_assoc_t	strchange_assoc_id;
+	uint16_t	strchange_instrms;
+	uint16_t	strchange_outstrms;
+};
+
+#define SCTP_STREAM_CHANGE_DENIED	0x0004
+#define SCTP_STREAM_CHANGE_FAILED	0x0008
+
+
+/* SCTP notification event */
+struct sctp_tlv {
+	uint16_t sn_type;
+	uint16_t sn_flags;
+	uint32_t sn_length;
+};
+
+union sctp_notification {
+	struct sctp_tlv sn_header;
+	struct sctp_assoc_change sn_assoc_change;
+	struct sctp_paddr_change sn_paddr_change;
+	struct sctp_remote_error sn_remote_error;
+	struct sctp_send_failed sn_send_failed;
+	struct sctp_shutdown_event sn_shutdown_event;
+	struct sctp_adaptation_event sn_adaptation_event;
+	/* compatibility same as above */
+	struct sctp_adaption_event sn_adaption_event;
+	struct sctp_pdapi_event sn_pdapi_event;
+	struct sctp_authkey_event sn_auth_event;
+	struct sctp_sender_dry_event sn_sender_dry_event;
+	struct sctp_send_failed_event sn_send_failed_event;
+	struct sctp_stream_reset_event sn_strreset_event;
+	struct sctp_assoc_reset_event  sn_assocreset_event;
+	struct sctp_stream_change_event sn_strchange_event;
+};
+
+/* notification types */
+#define SCTP_ASSOC_CHANGE                       0x0001
+#define SCTP_PEER_ADDR_CHANGE                   0x0002
+#define SCTP_REMOTE_ERROR                       0x0003
+#define SCTP_SEND_FAILED                        0x0004
+#define SCTP_SHUTDOWN_EVENT                     0x0005
+#define SCTP_ADAPTATION_INDICATION              0x0006
+/* same as above */
+#define SCTP_ADAPTION_INDICATION                0x0006
+#define SCTP_PARTIAL_DELIVERY_EVENT             0x0007
+#define SCTP_AUTHENTICATION_EVENT               0x0008
+#define SCTP_STREAM_RESET_EVENT                 0x0009
+#define SCTP_SENDER_DRY_EVENT                   0x000a
+#define SCTP_NOTIFICATIONS_STOPPED_EVENT        0x000b /* we don't send this*/
+#define SCTP_ASSOC_RESET_EVENT                  0x000c
+#define SCTP_STREAM_CHANGE_EVENT                0x000d
+#define SCTP_SEND_FAILED_EVENT                  0x000e
+/*
+ * socket option structs
+ */
+
+struct sctp_paddrparams {
+	struct sockaddr_storage spp_address;
+	sctp_assoc_t spp_assoc_id;
+	uint32_t spp_hbinterval;
+	uint32_t spp_pathmtu;
+	uint32_t spp_flags;
+	uint32_t spp_ipv6_flowlabel;
+	uint16_t spp_pathmaxrxt;
+	uint8_t spp_dscp;
+};
+#define spp_ipv4_tos spp_dscp
+
+#define SPP_HB_ENABLE		0x00000001
+#define SPP_HB_DISABLE		0x00000002
+#define SPP_HB_DEMAND		0x00000004
+#define SPP_PMTUD_ENABLE	0x00000008
+#define SPP_PMTUD_DISABLE	0x00000010
+#define SPP_HB_TIME_IS_ZERO     0x00000080
+#define SPP_IPV6_FLOWLABEL      0x00000100
+#define SPP_DSCP                0x00000200
+#define SPP_IPV4_TOS            SPP_DSCP
+
+struct sctp_paddrthlds {
+	struct sockaddr_storage spt_address;
+	sctp_assoc_t spt_assoc_id;
+	uint16_t spt_pathmaxrxt;
+	uint16_t spt_pathpfthld;
+	uint16_t spt_pathcpthld;
+};
+
+struct sctp_paddrinfo {
+	struct sockaddr_storage spinfo_address;
+	sctp_assoc_t spinfo_assoc_id;
+	int32_t spinfo_state;
+	uint32_t spinfo_cwnd;
+	uint32_t spinfo_srtt;
+	uint32_t spinfo_rto;
+	uint32_t spinfo_mtu;
+};
+
+struct sctp_rtoinfo {
+	sctp_assoc_t srto_assoc_id;
+	uint32_t srto_initial;
+	uint32_t srto_max;
+	uint32_t srto_min;
+};
+
+struct sctp_assocparams {
+	sctp_assoc_t sasoc_assoc_id;
+	uint32_t sasoc_peer_rwnd;
+	uint32_t sasoc_local_rwnd;
+	uint32_t sasoc_cookie_life;
+	uint16_t sasoc_asocmaxrxt;
+	uint16_t sasoc_number_peer_destinations;
+};
+
+struct sctp_setprim {
+	struct sockaddr_storage ssp_addr;
+	sctp_assoc_t ssp_assoc_id;
+	uint8_t ssp_padding[4];
+};
+
+struct sctp_setpeerprim {
+	struct sockaddr_storage sspp_addr;
+	sctp_assoc_t sspp_assoc_id;
+	uint8_t sspp_padding[4];
+};
+
+struct sctp_getaddresses {
+	sctp_assoc_t sget_assoc_id;
+	/* addr is filled in for N * sockaddr_storage */
+	struct sockaddr addr[1];
+};
+
+struct sctp_status {
+	sctp_assoc_t sstat_assoc_id;
+	int32_t sstat_state;
+	uint32_t sstat_rwnd;
+	uint16_t sstat_unackdata;
+	uint16_t sstat_penddata;
+	uint16_t sstat_instrms;
+	uint16_t sstat_outstrms;
+	uint32_t sstat_fragmentation_point;
+	struct sctp_paddrinfo sstat_primary;
+};
+
+/*
+ * AUTHENTICATION support
+ */
+/* SCTP_AUTH_CHUNK */
+struct sctp_authchunk {
+	uint8_t sauth_chunk;
+};
+
+/* SCTP_AUTH_KEY */
+struct sctp_authkey {
+	sctp_assoc_t sca_assoc_id;
+	uint16_t sca_keynumber;
+	uint16_t sca_keylength;
+	uint8_t sca_key[];
+};
+
+/* SCTP_HMAC_IDENT */
+struct sctp_hmacalgo {
+	uint32_t shmac_number_of_idents;
+	uint16_t shmac_idents[];
+};
+
+/* AUTH hmac_id */
+#define SCTP_AUTH_HMAC_ID_RSVD		0x0000
+#define SCTP_AUTH_HMAC_ID_SHA1		0x0001	/* default, mandatory */
+#define SCTP_AUTH_HMAC_ID_SHA256	0x0003
+
+/* SCTP_AUTH_ACTIVE_KEY / SCTP_AUTH_DELETE_KEY */
+struct sctp_authkeyid {
+	sctp_assoc_t scact_assoc_id;
+	uint16_t scact_keynumber;
+};
+
+/* SCTP_PEER_AUTH_CHUNKS / SCTP_LOCAL_AUTH_CHUNKS */
+struct sctp_authchunks {
+	sctp_assoc_t gauth_assoc_id;
+	uint32_t gauth_number_of_chunks;
+	uint8_t gauth_chunks[];
+};
+
+struct sctp_assoc_value {
+	sctp_assoc_t assoc_id;
+	uint32_t assoc_value;
+};
+
+struct sctp_cc_option {
+	int option;
+	struct sctp_assoc_value aid_value;
+};
+
+struct sctp_stream_value {
+	sctp_assoc_t assoc_id;
+	uint16_t stream_id;
+	uint16_t stream_value;
+};
+
+struct sctp_assoc_ids {
+	uint32_t gaids_number_of_ids;
+	sctp_assoc_t gaids_assoc_id[];
+};
+
+struct sctp_sack_info {
+	sctp_assoc_t sack_assoc_id;
+	uint32_t sack_delay;
+	uint32_t sack_freq;
+};
+
+struct sctp_timeouts {
+	sctp_assoc_t stimo_assoc_id;
+	uint32_t stimo_init;
+	uint32_t stimo_data;
+	uint32_t stimo_sack;
+	uint32_t stimo_shutdown;
+	uint32_t stimo_heartbeat;
+	uint32_t stimo_cookie;
+	uint32_t stimo_shutdownack;
+};
+
+struct sctp_udpencaps {
+	struct sockaddr_storage sue_address;
+	sctp_assoc_t sue_assoc_id;
+	uint16_t sue_port;
+};
+
+struct sctp_prstatus {
+	sctp_assoc_t sprstat_assoc_id;
+	uint16_t sprstat_sid;
+	uint16_t sprstat_policy;
+	uint64_t sprstat_abandoned_unsent;
+	uint64_t sprstat_abandoned_sent;
+};
+
+struct sctp_cwnd_args {
+	struct sctp_nets *net;	/* network to */ /* FIXME: LP64 issue */
+	uint32_t cwnd_new_value;/* cwnd in k */
+	uint32_t pseudo_cumack;
+	uint16_t inflight;	/* flightsize in k */
+	uint16_t cwnd_augment;	/* increment to it */
+	uint8_t meets_pseudo_cumack;
+	uint8_t need_new_pseudo_cumack;
+	uint8_t cnt_in_send;
+	uint8_t cnt_in_str;
+};
+
+struct sctp_blk_args {
+	uint32_t onsb;		/* in 1k bytes */
+	uint32_t sndlen;	/* len of send being attempted */
+	uint32_t peer_rwnd;	/* rwnd of peer */
+	uint16_t send_sent_qcnt;/* chnk cnt */
+	uint16_t stream_qcnt;	/* chnk cnt */
+	uint16_t chunks_on_oque;/* chunks out */
+	uint16_t flight_size;   /* flight size in k */
+};
+
+/*
+ * Max we can reset in one setting, note this is dictated not by the define
+ * but the size of a mbuf cluster so don't change this define and think you
+ * can specify more. You must do multiple resets if you want to reset more
+ * than SCTP_MAX_EXPLICIT_STR_RESET.
+ */
+#define SCTP_MAX_EXPLICT_STR_RESET   1000
+
+struct sctp_reset_streams {
+	sctp_assoc_t srs_assoc_id;
+	uint16_t srs_flags;
+	uint16_t srs_number_streams;	/* 0 == ALL */
+	uint16_t srs_stream_list[];/* list if strrst_num_streams is not 0 */
+};
+
+struct sctp_add_streams {
+	sctp_assoc_t	sas_assoc_id;
+	uint16_t	sas_instrms;
+	uint16_t	sas_outstrms;
+};
+
+struct sctp_get_nonce_values {
+	sctp_assoc_t gn_assoc_id;
+	uint32_t gn_peers_tag;
+	uint32_t gn_local_tag;
+};
+
+/* Debugging logs */
+struct sctp_str_log {
+	void *stcb; /* FIXME: LP64 issue */
+	uint32_t n_tsn;
+	uint32_t e_tsn;
+	uint16_t n_sseq;
+	uint16_t e_sseq;
+	uint16_t strm;
+};
+
+struct sctp_sb_log {
+	void  *stcb; /* FIXME: LP64 issue */
+	uint32_t so_sbcc;
+	uint32_t stcb_sbcc;
+	uint32_t incr;
+};
+
+struct sctp_fr_log {
+	uint32_t largest_tsn;
+	uint32_t largest_new_tsn;
+	uint32_t tsn;
+};
+
+struct sctp_fr_map {
+	uint32_t base;
+	uint32_t cum;
+	uint32_t high;
+};
+
+struct sctp_rwnd_log {
+	uint32_t rwnd;
+	uint32_t send_size;
+	uint32_t overhead;
+	uint32_t new_rwnd;
+};
+
+struct sctp_mbcnt_log {
+	uint32_t total_queue_size;
+	uint32_t size_change;
+	uint32_t total_queue_mb_size;
+	uint32_t mbcnt_change;
+};
+
+struct sctp_sack_log {
+	uint32_t cumack;
+	uint32_t oldcumack;
+	uint32_t tsn;
+	uint16_t numGaps;
+	uint16_t numDups;
+};
+
+struct sctp_lock_log {
+	void *sock;  /* FIXME: LP64 issue */
+	void *inp; /* FIXME: LP64 issue */
+	uint8_t tcb_lock;
+	uint8_t inp_lock;
+	uint8_t info_lock;
+	uint8_t sock_lock;
+	uint8_t sockrcvbuf_lock;
+	uint8_t socksndbuf_lock;
+	uint8_t create_lock;
+	uint8_t resv;
+};
+
+struct sctp_rto_log {
+	void * net; /* FIXME: LP64 issue */
+	uint32_t rtt;
+};
+
+struct sctp_nagle_log {
+	void  *stcb; /* FIXME: LP64 issue */
+	uint32_t total_flight;
+	uint32_t total_in_queue;
+	uint16_t count_in_queue;
+	uint16_t count_in_flight;
+};
+
+struct sctp_sbwake_log {
+	void *stcb; /* FIXME: LP64 issue */
+	uint16_t send_q;
+	uint16_t sent_q;
+	uint16_t flight;
+	uint16_t wake_cnt;
+	uint8_t stream_qcnt;	/* chnk cnt */
+	uint8_t chunks_on_oque;/* chunks out */
+	uint8_t sbflags;
+	uint8_t sctpflags;
+};
+
+struct sctp_misc_info {
+	uint32_t log1;
+	uint32_t log2;
+	uint32_t log3;
+	uint32_t log4;
+};
+
+struct sctp_log_closing {
+	void *inp; /* FIXME: LP64 issue */
+	void *stcb;  /* FIXME: LP64 issue */
+	uint32_t sctp_flags;
+	uint16_t  state;
+	int16_t  loc;
+};
+
+struct sctp_mbuf_log {
+	struct mbuf *mp; /* FIXME: LP64 issue */
+	caddr_t  ext;
+	caddr_t  data;
+	uint16_t size;
+	uint8_t  refcnt;
+	uint8_t  mbuf_flags;
+};
+
+struct sctp_cwnd_log {
+	uint64_t time_event;
+	uint8_t  from;
+	uint8_t  event_type;
+	uint8_t  resv[2];
+	union {
+		struct sctp_log_closing close;
+		struct sctp_blk_args blk;
+		struct sctp_cwnd_args cwnd;
+		struct sctp_str_log strlog;
+		struct sctp_fr_log fr;
+		struct sctp_fr_map map;
+		struct sctp_rwnd_log rwnd;
+		struct sctp_mbcnt_log mbcnt;
+		struct sctp_sack_log sack;
+		struct sctp_lock_log lock;
+		struct sctp_rto_log rto;
+		struct sctp_sb_log sb;
+		struct sctp_nagle_log nagle;
+		struct sctp_sbwake_log wake;
+		struct sctp_mbuf_log mb;
+		struct sctp_misc_info misc;
+	}     x;
+};
+
+struct sctp_cwnd_log_req {
+	int32_t num_in_log;		/* Number in log */
+	int32_t num_ret;		/* Number returned */
+	int32_t start_at;		/* start at this one */
+	int32_t end_at;		        /* end at this one */
+	struct sctp_cwnd_log log[];
+};
+
+struct sctp_timeval {
+	uint32_t tv_sec;
+	uint32_t tv_usec;
+};
+
+struct sctpstat {
+	struct sctp_timeval sctps_discontinuitytime; /* sctpStats 18 (TimeStamp) */
+	/* MIB according to RFC 3873 */
+	uint32_t  sctps_currestab;           /* sctpStats  1   (Gauge32) */
+	uint32_t  sctps_activeestab;         /* sctpStats  2 (Counter32) */
+	uint32_t  sctps_restartestab;
+	uint32_t  sctps_collisionestab;
+	uint32_t  sctps_passiveestab;        /* sctpStats  3 (Counter32) */
+	uint32_t  sctps_aborted;             /* sctpStats  4 (Counter32) */
+	uint32_t  sctps_shutdown;            /* sctpStats  5 (Counter32) */
+	uint32_t  sctps_outoftheblue;        /* sctpStats  6 (Counter32) */
+	uint32_t  sctps_checksumerrors;      /* sctpStats  7 (Counter32) */
+	uint32_t  sctps_outcontrolchunks;    /* sctpStats  8 (Counter64) */
+	uint32_t  sctps_outorderchunks;      /* sctpStats  9 (Counter64) */
+	uint32_t  sctps_outunorderchunks;    /* sctpStats 10 (Counter64) */
+	uint32_t  sctps_incontrolchunks;     /* sctpStats 11 (Counter64) */
+	uint32_t  sctps_inorderchunks;       /* sctpStats 12 (Counter64) */
+	uint32_t  sctps_inunorderchunks;     /* sctpStats 13 (Counter64) */
+	uint32_t  sctps_fragusrmsgs;         /* sctpStats 14 (Counter64) */
+	uint32_t  sctps_reasmusrmsgs;        /* sctpStats 15 (Counter64) */
+	uint32_t  sctps_outpackets;          /* sctpStats 16 (Counter64) */
+	uint32_t  sctps_inpackets;           /* sctpStats 17 (Counter64) */
+
+	/* input statistics: */
+	uint32_t  sctps_recvpackets;         /* total input packets        */
+	uint32_t  sctps_recvdatagrams;       /* total input datagrams      */
+	uint32_t  sctps_recvpktwithdata;     /* total packets that had data */
+	uint32_t  sctps_recvsacks;           /* total input SACK chunks    */
+	uint32_t  sctps_recvdata;            /* total input DATA chunks    */
+	uint32_t  sctps_recvdupdata;         /* total input duplicate DATA chunks */
+	uint32_t  sctps_recvheartbeat;       /* total input HB chunks      */
+	uint32_t  sctps_recvheartbeatack;    /* total input HB-ACK chunks  */
+	uint32_t  sctps_recvecne;            /* total input ECNE chunks    */
+	uint32_t  sctps_recvauth;            /* total input AUTH chunks    */
+	uint32_t  sctps_recvauthmissing;     /* total input chunks missing AUTH */
+	uint32_t  sctps_recvivalhmacid;      /* total number of invalid HMAC ids received */
+	uint32_t  sctps_recvivalkeyid;       /* total number of invalid secret ids received */
+	uint32_t  sctps_recvauthfailed;      /* total number of auth failed */
+	uint32_t  sctps_recvexpress;         /* total fast path receives all one chunk */
+	uint32_t  sctps_recvexpressm;        /* total fast path multi-part data */
+	uint32_t  sctps_recvnocrc;
+	uint32_t  sctps_recvswcrc;
+	uint32_t  sctps_recvhwcrc;
+
+	/* output statistics: */
+	uint32_t  sctps_sendpackets;         /* total output packets       */
+	uint32_t  sctps_sendsacks;           /* total output SACKs         */
+	uint32_t  sctps_senddata;            /* total output DATA chunks   */
+	uint32_t  sctps_sendretransdata;     /* total output retransmitted DATA chunks */
+	uint32_t  sctps_sendfastretrans;     /* total output fast retransmitted DATA chunks */
+	uint32_t  sctps_sendmultfastretrans; /* total FR's that happened more than once
+                                              * to same chunk (u-del multi-fr algo).
+					      */
+	uint32_t  sctps_sendheartbeat;       /* total output HB chunks     */
+	uint32_t  sctps_sendecne;            /* total output ECNE chunks    */
+	uint32_t  sctps_sendauth;            /* total output AUTH chunks FIXME   */
+	uint32_t  sctps_senderrors;	     /* ip_output error counter */
+	uint32_t  sctps_sendnocrc;
+	uint32_t  sctps_sendswcrc;
+	uint32_t  sctps_sendhwcrc;
+	/* PCKDROPREP statistics: */
+	uint32_t  sctps_pdrpfmbox;           /* Packet drop from middle box */
+	uint32_t  sctps_pdrpfehos;           /* P-drop from end host */
+	uint32_t  sctps_pdrpmbda;            /* P-drops with data */
+	uint32_t  sctps_pdrpmbct;            /* P-drops, non-data, non-endhost */
+	uint32_t  sctps_pdrpbwrpt;           /* P-drop, non-endhost, bandwidth rep only */
+	uint32_t  sctps_pdrpcrupt;           /* P-drop, not enough for chunk header */
+	uint32_t  sctps_pdrpnedat;           /* P-drop, not enough data to confirm */
+	uint32_t  sctps_pdrppdbrk;           /* P-drop, where process_chunk_drop said break */
+	uint32_t  sctps_pdrptsnnf;           /* P-drop, could not find TSN */
+	uint32_t  sctps_pdrpdnfnd;           /* P-drop, attempt reverse TSN lookup */
+	uint32_t  sctps_pdrpdiwnp;           /* P-drop, e-host confirms zero-rwnd */
+	uint32_t  sctps_pdrpdizrw;           /* P-drop, midbox confirms no space */
+	uint32_t  sctps_pdrpbadd;            /* P-drop, data did not match TSN */
+	uint32_t  sctps_pdrpmark;            /* P-drop, TSN's marked for Fast Retran */
+	/* timeouts */
+	uint32_t  sctps_timoiterator;        /* Number of iterator timers that fired */
+	uint32_t  sctps_timodata;            /* Number of T3 data time outs */
+	uint32_t  sctps_timowindowprobe;     /* Number of window probe (T3) timers that fired */
+	uint32_t  sctps_timoinit;            /* Number of INIT timers that fired */
+	uint32_t  sctps_timosack;            /* Number of sack timers that fired */
+	uint32_t  sctps_timoshutdown;        /* Number of shutdown timers that fired */
+	uint32_t  sctps_timoheartbeat;       /* Number of heartbeat timers that fired */
+	uint32_t  sctps_timocookie;          /* Number of times a cookie timeout fired */
+	uint32_t  sctps_timosecret;          /* Number of times an endpoint changed its cookie secret*/
+	uint32_t  sctps_timopathmtu;         /* Number of PMTU timers that fired */
+	uint32_t  sctps_timoshutdownack;     /* Number of shutdown ack timers that fired */
+	uint32_t  sctps_timoshutdownguard;   /* Number of shutdown guard timers that fired */
+	uint32_t  sctps_timostrmrst;         /* Number of stream reset timers that fired */
+	uint32_t  sctps_timoearlyfr;         /* Number of early FR timers that fired */
+	uint32_t  sctps_timoasconf;          /* Number of times an asconf timer fired */
+	uint32_t  sctps_timodelprim;	     /* Number of times a prim_deleted timer fired */
+	uint32_t  sctps_timoautoclose;       /* Number of times auto close timer fired */
+	uint32_t  sctps_timoassockill;       /* Number of asoc free timers expired */
+	uint32_t  sctps_timoinpkill;         /* Number of inp free timers expired */
+	/* former early FR counters */
+	uint32_t  sctps_spare[11];
+	/* others */
+	uint32_t  sctps_hdrops;	          /* packet shorter than header */
+	uint32_t  sctps_badsum;	          /* checksum error             */
+	uint32_t  sctps_noport;           /* no endpoint for port       */
+	uint32_t  sctps_badvtag;          /* bad v-tag                  */
+	uint32_t  sctps_badsid;           /* bad SID                    */
+	uint32_t  sctps_nomem;            /* no memory                  */
+	uint32_t  sctps_fastretransinrtt; /* number of multiple FR in a RTT window */
+	uint32_t  sctps_markedretrans;
+	uint32_t  sctps_naglesent;        /* nagle allowed sending      */
+	uint32_t  sctps_naglequeued;      /* nagle doesn't allow sending */
+	uint32_t  sctps_maxburstqueued;   /* max burst doesn't allow sending */
+	uint32_t  sctps_ifnomemqueued;    /* look ahead tells us no memory in
+                                         * interface ring buffer OR we had a
+					 * send error and are queuing one send.
+                                         */
+	uint32_t  sctps_windowprobed;     /* total number of window probes sent */
+	uint32_t  sctps_lowlevelerr;	/* total times an output error causes us
+					 * to clamp down on next user send.
+					 */
+	uint32_t  sctps_lowlevelerrusr;	/* total times sctp_senderrors were caused from
+					 * a user send from a user invoked send not
+					 * a sack response
+					 */
+	uint32_t  sctps_datadropchklmt;	/* Number of in data drops due to chunk limit reached */
+	uint32_t  sctps_datadroprwnd;	/* Number of in data drops due to rwnd limit reached */
+	uint32_t  sctps_ecnereducedcwnd;  /* Number of times a ECN reduced the cwnd */
+	uint32_t  sctps_vtagexpress;	/* Used express lookup via vtag */
+	uint32_t  sctps_vtagbogus;	/* Collision in express lookup. */
+	uint32_t  sctps_primary_randry;	/* Number of times the sender ran dry of user data on primary */
+	uint32_t  sctps_cmt_randry;       /* Same for above */
+	uint32_t  sctps_slowpath_sack;    /* Sacks the slow way */
+	uint32_t  sctps_wu_sacks_sent;	/* Window Update only sacks sent */
+	uint32_t  sctps_sends_with_flags; /* number of sends with sinfo_flags !=0 */
+	uint32_t  sctps_sends_with_unord; /* number of unordered sends */
+	uint32_t  sctps_sends_with_eof; 	/* number of sends with EOF flag set */
+	uint32_t  sctps_sends_with_abort; /* number of sends with ABORT flag set */
+	uint32_t  sctps_protocol_drain_calls;	/* number of times protocol drain called */
+	uint32_t  sctps_protocol_drains_done; 	/* number of times we did a protocol drain */
+	uint32_t  sctps_read_peeks;	/* Number of times recv was called with peek */
+	uint32_t  sctps_cached_chk;       /* Number of cached chunks used */
+	uint32_t  sctps_cached_strmoq;    /* Number of cached stream oq's used */
+	uint32_t  sctps_left_abandon;     /* Number of unread messages abandoned by close */
+	uint32_t  sctps_send_burst_avoid; /* Unused */
+	uint32_t  sctps_send_cwnd_avoid;  /* Send cwnd full  avoidance, already max burst inflight to net */
+	uint32_t  sctps_fwdtsn_map_over;  /* number of map array over-runs via fwd-tsn's */
+	uint32_t  sctps_queue_upd_ecne;  /* Number of times we queued or updated an ECN chunk on send queue */
+	uint32_t  sctps_reserved[31];     /* Future ABI compat - remove int's from here when adding new */
+};
+
+#define SCTP_STAT_INCR(_x) SCTP_STAT_INCR_BY(_x,1)
+#define SCTP_STAT_DECR(_x) SCTP_STAT_DECR_BY(_x,1)
+#if defined(__FreeBSD__) && defined(SMP) && defined(SCTP_USE_PERCPU_STAT)
+#define SCTP_STAT_INCR_BY(_x,_d) (SCTP_BASE_STATS[PCPU_GET(cpuid)]._x += _d)
+#define SCTP_STAT_DECR_BY(_x,_d) (SCTP_BASE_STATS[PCPU_GET(cpuid)]._x -= _d)
+#else
+#define SCTP_STAT_INCR_BY(_x,_d) atomic_add_int(&SCTP_BASE_STAT(_x), _d)
+#define SCTP_STAT_DECR_BY(_x,_d) atomic_subtract_int(&SCTP_BASE_STAT(_x), _d)
+#endif
+/* The following macros are for handling MIB values, */
+#define SCTP_STAT_INCR_COUNTER32(_x) SCTP_STAT_INCR(_x)
+#define SCTP_STAT_INCR_COUNTER64(_x) SCTP_STAT_INCR(_x)
+#define SCTP_STAT_INCR_GAUGE32(_x) SCTP_STAT_INCR(_x)
+#define SCTP_STAT_DECR_COUNTER32(_x) SCTP_STAT_DECR(_x)
+#define SCTP_STAT_DECR_COUNTER64(_x) SCTP_STAT_DECR(_x)
+#define SCTP_STAT_DECR_GAUGE32(_x) SCTP_STAT_DECR(_x)
+
+#if defined(__Userspace__)
+union sctp_sockstore {
+#if defined(INET)
+	struct sockaddr_in sin;
+#endif
+#if defined(INET6)
+	struct sockaddr_in6 sin6;
+#endif
+	struct sockaddr_conn sconn;
+	struct sockaddr sa;
+};
+#else
+union sctp_sockstore {
+	struct sockaddr_in sin;
+	struct sockaddr_in6 sin6;
+	struct sockaddr sa;
+};
+#endif
+
+
+/***********************************/
+/* And something for us old timers */
+/***********************************/
+
+#ifndef __APPLE__
+#ifndef __Userspace__
+#ifndef ntohll
+#if defined(__Userspace_os_Linux)
+#ifndef _BSD_SOURCE
+#define _BSD_SOURCE
+#endif
+#include <endian.h>
+#else
+#include <sys/endian.h>
+#endif
+#define ntohll(x) be64toh(x)
+#endif
+
+#ifndef htonll
+#if defined(__Userspace_os_Linux)
+#ifndef _BSD_SOURCE
+#define _BSD_SOURCE
+#endif
+#include <endian.h>
+#else
+#include <sys/endian.h>
+#endif
+#define htonll(x) htobe64(x)
+#endif
+#endif
+#endif
+/***********************************/
+
+
+struct xsctp_inpcb {
+	uint32_t last;
+	uint32_t flags;
+#if defined(__FreeBSD__) && __FreeBSD_version < 1000048
+	uint32_t features;
+#else
+	uint64_t features;
+#endif
+	uint32_t total_sends;
+	uint32_t total_recvs;
+	uint32_t total_nospaces;
+	uint32_t fragmentation_point;
+	uint16_t local_port;
+#if defined(__FreeBSD__) && __FreeBSD_version > 1100096
+	uint16_t qlen_old;
+	uint16_t maxqlen_old;
+#else
+	uint16_t qlen;
+	uint16_t maxqlen;
+#endif
+#if defined(__Windows__)
+	uint16_t padding;
+#endif
+#if !(defined(__FreeBSD__) && (__FreeBSD_version < 1001517))
+	void *socket;
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_version > 1100096
+	uint32_t qlen;
+	uint32_t maxqlen;
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_version < 1000048
+	uint32_t extra_padding[32]; /* future */
+#elif defined(__FreeBSD__) && (__FreeBSD_version < 1001517)
+	uint32_t extra_padding[31]; /* future */
+#else
+#if defined(__LP64__)
+#if defined(__FreeBSD__) && __FreeBSD_version > 1100096
+	uint32_t extra_padding[27]; /* future */
+#else
+	uint32_t extra_padding[29]; /* future */
+#endif
+#else
+#if defined(__FreeBSD__) && __FreeBSD_version > 1100096
+	uint32_t extra_padding[28]; /* future */
+#else
+	uint32_t extra_padding[30]; /* future */
+#endif
+#endif
+#endif
+};
+
+struct xsctp_tcb {
+	union sctp_sockstore primary_addr;      /* sctpAssocEntry 5/6 */
+	uint32_t last;
+	uint32_t heartbeat_interval;            /* sctpAssocEntry 7   */
+	uint32_t state;                         /* sctpAssocEntry 8   */
+	uint32_t in_streams;                    /* sctpAssocEntry 9   */
+	uint32_t out_streams;                   /* sctpAssocEntry 10  */
+	uint32_t max_nr_retrans;                /* sctpAssocEntry 11  */
+	uint32_t primary_process;               /* sctpAssocEntry 12  */
+	uint32_t T1_expireries;                 /* sctpAssocEntry 13  */
+	uint32_t T2_expireries;                 /* sctpAssocEntry 14  */
+	uint32_t retransmitted_tsns;            /* sctpAssocEntry 15  */
+	uint32_t total_sends;
+	uint32_t total_recvs;
+	uint32_t local_tag;
+	uint32_t remote_tag;
+	uint32_t initial_tsn;
+	uint32_t highest_tsn;
+	uint32_t cumulative_tsn;
+	uint32_t cumulative_tsn_ack;
+	uint32_t mtu;
+	uint32_t refcnt;
+	uint16_t local_port;                    /* sctpAssocEntry 3   */
+	uint16_t remote_port;                   /* sctpAssocEntry 4   */
+	struct sctp_timeval start_time;         /* sctpAssocEntry 16  */
+	struct sctp_timeval discontinuity_time; /* sctpAssocEntry 17  */
+#if defined(__FreeBSD__)
+#if __FreeBSD_version >= 800000
+	uint32_t peers_rwnd;
+	sctp_assoc_t assoc_id;                  /* sctpAssocEntry 1   */
+	uint32_t extra_padding[32];              /* future */
+#else
+#endif
+#else
+	uint32_t peers_rwnd;
+	sctp_assoc_t assoc_id;                  /* sctpAssocEntry 1   */
+	uint32_t extra_padding[32];              /* future */
+#endif
+};
+
+struct xsctp_laddr {
+	union sctp_sockstore address;    /* sctpAssocLocalAddrEntry 1/2 */
+	uint32_t last;
+	struct sctp_timeval start_time;  /* sctpAssocLocalAddrEntry 3   */
+	uint32_t extra_padding[32];       /* future */
+};
+
+struct xsctp_raddr {
+	union sctp_sockstore address;      /* sctpAssocLocalRemEntry 1/2 */
+	uint32_t last;
+	uint32_t rto;                      /* sctpAssocLocalRemEntry 5   */
+	uint32_t max_path_rtx;             /* sctpAssocLocalRemEntry 6   */
+	uint32_t rtx;                      /* sctpAssocLocalRemEntry 7   */
+	uint32_t error_counter;            /*                            */
+	uint32_t cwnd;                     /*                            */
+	uint32_t flight_size;              /*                            */
+	uint32_t mtu;                      /*                            */
+	uint8_t active;                    /* sctpAssocLocalRemEntry 3   */
+	uint8_t confirmed;                 /*                            */
+	uint8_t heartbeat_enabled;         /* sctpAssocLocalRemEntry 4   */
+	uint8_t potentially_failed;
+	struct sctp_timeval start_time;    /* sctpAssocLocalRemEntry 8   */
+#if defined(__FreeBSD__)
+#if __FreeBSD_version >= 800000
+	uint32_t rtt;
+	uint32_t heartbeat_interval;
+	uint32_t ssthresh;
+	uint32_t extra_padding[30];              /* future */
+#endif
+#else
+	uint32_t rtt;
+	uint32_t heartbeat_interval;
+	uint32_t ssthresh;
+	uint32_t extra_padding[30];              /* future */
+#endif
+};
+
+#define SCTP_MAX_LOGGING_SIZE 30000
+#define SCTP_TRACE_PARAMS 6                /* This number MUST be even   */
+
+struct sctp_log_entry {
+	uint64_t timestamp;
+	uint32_t subsys;
+	uint32_t padding;
+	uint32_t params[SCTP_TRACE_PARAMS];
+};
+
+struct sctp_log {
+	struct sctp_log_entry entry[SCTP_MAX_LOGGING_SIZE];
+	uint32_t index;
+	uint32_t padding;
+};
+
+/*
+ * Kernel defined for sctp_send
+ */
+#if defined(_KERNEL) || defined(__Userspace__)
+int
+sctp_lower_sosend(struct socket *so,
+    struct sockaddr *addr,
+    struct uio *uio,
+#if defined(__Panda__)
+    pakhandle_type i_pak,
+    pakhandle_type i_control,
+#else
+    struct mbuf *i_pak,
+    struct mbuf *control,
+#endif
+    int flags,
+    struct sctp_sndrcvinfo *srcv
+#if !(defined(__Panda__) || defined(__Userspace__))
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+    ,struct thread *p
+#elif defined(__Windows__)
+    , PKTHREAD p
+#else
+    ,struct proc *p
+#endif
+#endif
+);
+
+int
+sctp_sorecvmsg(struct socket *so,
+    struct uio *uio,
+#if defined(__Panda__)
+    particletype **mp,
+#else
+    struct mbuf **mp,
+#endif
+    struct sockaddr *from,
+    int fromlen,
+    int *msg_flags,
+    struct sctp_sndrcvinfo *sinfo,
+    int filling_sinfo);
+#endif
+
+/*
+ * API system calls
+ */
+#if !(defined(_KERNEL)) && !(defined(__Userspace__))
+
+__BEGIN_DECLS
+#if defined(__FreeBSD__) && __FreeBSD_version < 902000
+int	sctp_peeloff __P((int, sctp_assoc_t));
+int	sctp_bindx __P((int, struct sockaddr *, int, int));
+int	sctp_connectx __P((int, const struct sockaddr *, int, sctp_assoc_t *));
+int	sctp_getaddrlen __P((sa_family_t));
+int	sctp_getpaddrs __P((int, sctp_assoc_t, struct sockaddr **));
+void	sctp_freepaddrs __P((struct sockaddr *));
+int	sctp_getladdrs __P((int, sctp_assoc_t, struct sockaddr **));
+void	sctp_freeladdrs __P((struct sockaddr *));
+int	sctp_opt_info __P((int, sctp_assoc_t, int, void *, socklen_t *));
+
+/* deprecated */
+ssize_t	sctp_sendmsg __P((int, const void *, size_t, const struct sockaddr *,
+	    socklen_t, uint32_t, uint32_t, uint16_t, uint32_t, uint32_t));
+
+/* deprecated */
+ssize_t	sctp_send __P((int, const void *, size_t,
+	    const struct sctp_sndrcvinfo *, int));
+
+/* deprecated */
+ssize_t	sctp_sendx __P((int, const void *, size_t, struct sockaddr *,
+	    int, struct sctp_sndrcvinfo *, int));
+
+/* deprecated */
+ssize_t	sctp_sendmsgx __P((int sd, const void *, size_t, struct sockaddr *,
+	    int, uint32_t, uint32_t, uint16_t, uint32_t, uint32_t));
+
+sctp_assoc_t	sctp_getassocid __P((int, struct sockaddr *));
+
+/* deprecated */
+ssize_t	sctp_recvmsg __P((int, void *, size_t, struct sockaddr *, socklen_t *,
+	    struct sctp_sndrcvinfo *, int *));
+
+ssize_t	sctp_sendv __P((int, const struct iovec *, int, struct sockaddr *,
+	    int, void *, socklen_t, unsigned int, int));
+
+ssize_t	sctp_recvv __P((int, const struct iovec *, int, struct sockaddr *,
+	    socklen_t *, void *, socklen_t *, unsigned int *, int *));
+#else
+int	sctp_peeloff(int, sctp_assoc_t);
+int	sctp_bindx(int, struct sockaddr *, int, int);
+int	sctp_connectx(int, const struct sockaddr *, int, sctp_assoc_t *);
+int	sctp_getaddrlen(sa_family_t);
+int	sctp_getpaddrs(int, sctp_assoc_t, struct sockaddr **);
+void	sctp_freepaddrs(struct sockaddr *);
+int	sctp_getladdrs(int, sctp_assoc_t, struct sockaddr **);
+void	sctp_freeladdrs(struct sockaddr *);
+int	sctp_opt_info(int, sctp_assoc_t, int, void *, socklen_t *);
+
+/* deprecated */
+ssize_t	sctp_sendmsg(int, const void *, size_t, const struct sockaddr *,
+	    socklen_t, uint32_t, uint32_t, uint16_t, uint32_t, uint32_t);
+
+/* deprecated */
+ssize_t	sctp_send(int, const void *, size_t,
+	    const struct sctp_sndrcvinfo *, int);
+
+/* deprecated */
+ssize_t	sctp_sendx(int, const void *, size_t, struct sockaddr *,
+	    int, struct sctp_sndrcvinfo *, int);
+
+/* deprecated */
+ssize_t	sctp_sendmsgx(int sd, const void *, size_t, struct sockaddr *,
+	    int, uint32_t, uint32_t, uint16_t, uint32_t, uint32_t);
+
+sctp_assoc_t	sctp_getassocid(int, struct sockaddr *);
+
+/* deprecated */
+ssize_t	sctp_recvmsg(int, void *, size_t, struct sockaddr *, socklen_t *,
+	    struct sctp_sndrcvinfo *, int *);
+
+ssize_t	sctp_sendv(int, const struct iovec *, int, struct sockaddr *,
+	    int, void *, socklen_t, unsigned int, int);
+
+ssize_t	sctp_recvv(int, const struct iovec *, int, struct sockaddr *,
+	    socklen_t *, void *, socklen_t *, unsigned int *, int *);
+#endif
+__END_DECLS
+
+#endif				/* !_KERNEL */
+#endif				/* !__sctp_uio_h__ */
diff --git a/usrsctplib/netinet/sctp_userspace.c b/usrsctplib/netinet/sctp_userspace.c
new file mode 100755
index 0000000..1c21160
--- /dev/null
+++ b/usrsctplib/netinet/sctp_userspace.c
@@ -0,0 +1,466 @@
+/*-
+ * Copyright (c) 2011-2012 Irene Ruengeler
+ * Copyright (c) 2011-2012 Michael Tuexen
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+
+#ifdef _WIN32
+#include <netinet/sctp_pcb.h>
+#include <sys/timeb.h>
+#include <iphlpapi.h>
+#pragma comment(lib, "IPHLPAPI.lib")
+#endif
+#include <netinet/sctp_os_userspace.h>
+#if defined(__Userspace_os_FreeBSD)
+#include <pthread_np.h>
+#endif
+
+#if defined(__Userspace_os_Linux)
+#include <sys/prctl.h>
+#endif
+
+#if defined(__Userspace_os_Windows)
+/* Adapter to translate Unix thread start routines to Windows thread start
+ * routines.
+ */
+static DWORD WINAPI
+sctp_create_thread_adapter(void *arg) {
+	start_routine_t start_routine = (start_routine_t)arg;
+	return start_routine(NULL) == NULL;
+}
+
+int
+sctp_userspace_thread_create(userland_thread_t *thread, start_routine_t start_routine)
+{
+	*thread = CreateThread(NULL, 0, sctp_create_thread_adapter,
+			       (void *)start_routine, 0, NULL);
+	if (*thread == NULL)
+		return GetLastError();
+	return 0;
+}
+#else
+int
+sctp_userspace_thread_create(userland_thread_t *thread, start_routine_t start_routine)
+{
+	return pthread_create(thread, NULL, start_routine, NULL);
+}
+#endif
+
+void
+sctp_userspace_set_threadname(const char *name)
+{
+#if defined(__Userspace_os_Darwin)
+	pthread_setname_np(name);
+#endif
+#if defined(__Userspace_os_Linux)
+	prctl(PR_SET_NAME, name);
+#endif
+#if defined(__Userspace_os_FreeBSD)
+	pthread_set_name_np(pthread_self(), name);
+#endif
+}
+
+#if !defined(_WIN32) && !defined(__Userspace_os_NaCl)
+int
+sctp_userspace_get_mtu_from_ifn(uint32_t if_index, int af)
+{
+	struct ifreq ifr;
+	int fd;
+
+	memset(&ifr, 0, sizeof(struct ifreq));
+	if_indextoname(if_index, ifr.ifr_name);
+	/* TODO can I use the raw socket here and not have to open a new one with each query? */
+	if ((fd = socket(af, SOCK_DGRAM, 0)) < 0)
+		return (0);
+	if (ioctl(fd, SIOCGIFMTU, &ifr) < 0) {
+		close(fd);
+		return (0);
+	}
+	close(fd);
+	return ifr.ifr_mtu;
+}
+#endif
+
+#if defined(__Userspace_os_NaCl)
+int
+sctp_userspace_get_mtu_from_ifn(uint32_t if_index, int af)
+{
+	return 1280;
+}
+#endif
+
+#ifdef _WIN32
+int
+sctp_userspace_get_mtu_from_ifn(uint32_t if_index, int af)
+{
+	PIP_ADAPTER_ADDRESSES pAdapterAddrs, pAdapt;
+	DWORD AdapterAddrsSize, Err;
+	int ret;
+
+	ret = 0;
+	AdapterAddrsSize = 0;
+	pAdapterAddrs = NULL;
+	if ((Err = GetAdaptersAddresses(AF_UNSPEC, 0, NULL, NULL, &AdapterAddrsSize)) != 0) {
+		if ((Err != ERROR_BUFFER_OVERFLOW) && (Err != ERROR_INSUFFICIENT_BUFFER)) {
+			SCTPDBG(SCTP_DEBUG_USR, "GetAdaptersAddresses() sizing failed with error code %d, AdapterAddrsSize = %d\n", Err, AdapterAddrsSize);
+			ret = -1;
+			goto cleanup;
+		}
+	}
+	if ((pAdapterAddrs = (PIP_ADAPTER_ADDRESSES) GlobalAlloc(GPTR, AdapterAddrsSize)) == NULL) {
+		SCTPDBG(SCTP_DEBUG_USR, "Memory allocation error!\n");
+		ret = -1;
+		goto cleanup;
+	}
+	if ((Err = GetAdaptersAddresses(AF_UNSPEC, 0, NULL, pAdapterAddrs, &AdapterAddrsSize)) != ERROR_SUCCESS) {
+		SCTPDBG(SCTP_DEBUG_USR, "GetAdaptersAddresses() failed with error code %d\n", Err);
+		ret = -1;
+		goto cleanup;
+	}
+	for (pAdapt = pAdapterAddrs; pAdapt; pAdapt = pAdapt->Next) {
+		if (pAdapt->IfIndex == if_index)
+			ret = pAdapt->Mtu;
+			break;
+	}
+cleanup:
+	if (pAdapterAddrs != NULL) {
+		GlobalFree(pAdapterAddrs);
+	}
+	return (ret);
+}
+
+void
+getwintimeofday(struct timeval *tv)
+{
+	struct timeb tb;
+
+	ftime(&tb);
+	tv->tv_sec = (long)tb.time;
+	tv->tv_usec = (long)(tb.millitm) * 1000L;
+}
+
+int
+Win_getifaddrs(struct ifaddrs** interfaces)
+{
+	int ret;
+#if defined(INET) || defined(INET6)
+	DWORD Err, AdapterAddrsSize;
+	int count;
+	PIP_ADAPTER_ADDRESSES pAdapterAddrs, pAdapt;
+	struct ifaddrs *ifa;
+#endif
+#if defined(INET)
+	struct sockaddr_in *addr;
+#endif
+#if defined(INET6)
+	struct sockaddr_in6 *addr6;
+#endif
+#if defined(INET) || defined(INET6)
+	count = 0;
+#endif
+	ret = 0;
+#if defined(INET)
+	AdapterAddrsSize = 0;
+	pAdapterAddrs = NULL;
+	if ((Err = GetAdaptersAddresses(AF_INET, 0, NULL, NULL, &AdapterAddrsSize)) != 0) {
+		if ((Err != ERROR_BUFFER_OVERFLOW) && (Err != ERROR_INSUFFICIENT_BUFFER)) {
+			SCTPDBG(SCTP_DEBUG_USR, "GetAdaptersV4Addresses() sizing failed with error code %d and AdapterAddrsSize = %d\n", Err, AdapterAddrsSize);
+			ret = -1;
+			goto cleanup;
+		}
+	}
+	/* Allocate memory from sizing information */
+	if ((pAdapterAddrs = (PIP_ADAPTER_ADDRESSES) GlobalAlloc(GPTR, AdapterAddrsSize)) == NULL) {
+		SCTPDBG(SCTP_DEBUG_USR, "Memory allocation error!\n");
+		ret = -1;
+		goto cleanup;
+	}
+	/* Get actual adapter information */
+	if ((Err = GetAdaptersAddresses(AF_INET, 0, NULL, pAdapterAddrs, &AdapterAddrsSize)) != ERROR_SUCCESS) {
+		SCTPDBG(SCTP_DEBUG_USR, "GetAdaptersV4Addresses() failed with error code %d\n", Err);
+		ret = -1;
+		goto cleanup;
+	}
+	/* Enumerate through each returned adapter and save its information */
+	for (pAdapt = pAdapterAddrs, count; pAdapt; pAdapt = pAdapt->Next, count++) {
+		addr = (struct sockaddr_in *)malloc(sizeof(struct sockaddr_in));
+		ifa = (struct ifaddrs *)malloc(sizeof(struct ifaddrs));
+		if ((addr == NULL) || (ifa == NULL)) {
+			SCTPDBG(SCTP_DEBUG_USR, "Can't allocate memory\n");
+			ret = -1;
+			goto cleanup;
+		}
+		ifa->ifa_name = _strdup(pAdapt->AdapterName);
+		ifa->ifa_flags = pAdapt->Flags;
+		ifa->ifa_addr = (struct sockaddr *)addr;
+		memcpy(addr, &pAdapt->FirstUnicastAddress->Address.lpSockaddr, sizeof(struct sockaddr_in));
+		interfaces[count] = ifa;
+	}
+	GlobalFree(pAdapterAddrs);
+#endif
+#if defined(INET6)
+	AdapterAddrsSize = 0;
+	pAdapterAddrs = NULL;
+	if ((Err = GetAdaptersAddresses(AF_INET6, 0, NULL, NULL, &AdapterAddrsSize)) != 0) {
+		if ((Err != ERROR_BUFFER_OVERFLOW) && (Err != ERROR_INSUFFICIENT_BUFFER)) {
+			SCTPDBG(SCTP_DEBUG_USR, "GetAdaptersV6Addresses() sizing failed with error code %d AdapterAddrsSize = %d\n", Err, AdapterAddrsSize);
+			ret = -1;
+			goto cleanup;
+		}
+	}
+	/* Allocate memory from sizing information */
+	if ((pAdapterAddrs = (PIP_ADAPTER_ADDRESSES) GlobalAlloc(GPTR, AdapterAddrsSize)) == NULL) {
+		SCTPDBG(SCTP_DEBUG_USR, "Memory allocation error!\n");
+		ret = -1;
+		goto cleanup;
+	}
+	/* Get actual adapter information */
+	if ((Err = GetAdaptersAddresses(AF_INET6, 0, NULL, pAdapterAddrs, &AdapterAddrsSize)) != ERROR_SUCCESS) {
+		SCTPDBG(SCTP_DEBUG_USR, "GetAdaptersV6Addresses() failed with error code %d\n", Err);
+		ret = -1;
+		goto cleanup;
+	}
+	/* Enumerate through each returned adapter and save its information */
+	for (pAdapt = pAdapterAddrs, count; pAdapt; pAdapt = pAdapt->Next, count++) {
+		addr6 = (struct sockaddr_in6 *)malloc(sizeof(struct sockaddr_in6));
+		ifa = (struct ifaddrs *)malloc(sizeof(struct ifaddrs));
+		if ((addr6 == NULL) || (ifa == NULL)) {
+			SCTPDBG(SCTP_DEBUG_USR, "Can't allocate memory\n");
+			ret = -1;
+			goto cleanup;
+		}
+		ifa->ifa_name = _strdup(pAdapt->AdapterName);
+		ifa->ifa_flags = pAdapt->Flags;
+		ifa->ifa_addr = (struct sockaddr *)addr6;
+		memcpy(addr6, &pAdapt->FirstUnicastAddress->Address.lpSockaddr, sizeof(struct sockaddr_in6));
+		interfaces[count] = ifa;
+	}
+#endif
+#if defined(INET) || defined(INET6)
+cleanup:
+	if (pAdapterAddrs != NULL) {
+		GlobalFree(pAdapterAddrs);
+	}
+#endif
+	return (ret);
+}
+
+int
+win_if_nametoindex(const char *ifname)
+{
+	IP_ADAPTER_ADDRESSES *addresses, *addr;
+	ULONG status, size;
+	int index = 0;
+
+	if (!ifname) {
+		return 0;
+	}
+
+	size = 0;
+	status = GetAdaptersAddresses(AF_UNSPEC, 0, NULL, NULL, &size);
+	if (status != ERROR_BUFFER_OVERFLOW) {
+		return 0;
+	}
+	addresses = malloc(size);
+	status = GetAdaptersAddresses(AF_UNSPEC, 0, NULL, addresses, &size);
+	if (status == ERROR_SUCCESS) {
+		for (addr = addresses; addr; addr = addr->Next) {
+			if (addr->AdapterName && !strcmp(ifname, addr->AdapterName)) {
+				index = addr->IfIndex;
+				break;
+			}
+		}
+	}
+
+	free(addresses);
+	return index;
+}
+
+#if WINVER < 0x0600
+/* These functions are written based on the code at
+ * http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
+ * Therefore, for the rest of the file the following applies:
+ *
+ *
+ * Copyright and Licensing Information for ACE(TM), TAO(TM), CIAO(TM),
+ * DAnCE(TM), and CoSMIC(TM)
+ *
+ * [1]ACE(TM), [2]TAO(TM), [3]CIAO(TM), DAnCE(TM), and [4]CoSMIC(TM)
+ * (henceforth referred to as "DOC software") are copyrighted by
+ * [5]Douglas C. Schmidt and his [6]research group at [7]Washington
+ * University, [8]University of California, Irvine, and [9]Vanderbilt
+ * University, Copyright (c) 1993-2012, all rights reserved. Since DOC
+ * software is open-source, freely available software, you are free to
+ * use, modify, copy, and distribute--perpetually and irrevocably--the
+ * DOC software source code and object code produced from the source, as
+ * well as copy and distribute modified versions of this software. You
+ * must, however, include this copyright statement along with any code
+ * built using DOC software that you release. No copyright statement
+ * needs to be provided if you just ship binary executables of your
+ * software products.
+ *
+ * You can use DOC software in commercial and/or binary software releases
+ * and are under no obligation to redistribute any of your source code
+ * that is built using DOC software. Note, however, that you may not
+ * misappropriate the DOC software code, such as copyrighting it yourself
+ * or claiming authorship of the DOC software code, in a way that will
+ * prevent DOC software from being distributed freely using an
+ * open-source development model. You needn't inform anyone that you're
+ * using DOC software in your software, though we encourage you to let
+ * [10]us know so we can promote your project in the [11]DOC software
+ * success stories.
+ *
+ * The [12]ACE, [13]TAO, [14]CIAO, [15]DAnCE, and [16]CoSMIC web sites
+ * are maintained by the [17]DOC Group at the [18]Institute for Software
+ * Integrated Systems (ISIS) and the [19]Center for Distributed Object
+ * Computing of Washington University, St. Louis for the development of
+ * open-source software as part of the open-source software community.
+ * Submissions are provided by the submitter ``as is'' with no warranties
+ * whatsoever, including any warranty of merchantability, noninfringement
+ * of third party intellectual property, or fitness for any particular
+ * purpose. In no event shall the submitter be liable for any direct,
+ * indirect, special, exemplary, punitive, or consequential damages,
+ * including without limitation, lost profits, even if advised of the
+ * possibility of such damages. Likewise, DOC software is provided as is
+ * with no warranties of any kind, including the warranties of design,
+ * merchantability, and fitness for a particular purpose,
+ * noninfringement, or arising from a course of dealing, usage or trade
+ * practice. Washington University, UC Irvine, Vanderbilt University,
+ * their employees, and students shall have no liability with respect to
+ * the infringement of copyrights, trade secrets or any patents by DOC
+ * software or any part thereof. Moreover, in no event will Washington
+ * University, UC Irvine, or Vanderbilt University, their employees, or
+ * students be liable for any lost revenue or profits or other special,
+ * indirect and consequential damages.
+ *
+ * DOC software is provided with no support and without any obligation on
+ * the part of Washington University, UC Irvine, Vanderbilt University,
+ * their employees, or students to assist in its use, correction,
+ * modification, or enhancement. A [20]number of companies around the
+ * world provide commercial support for DOC software, however. DOC
+ * software is Y2K-compliant, as long as the underlying OS platform is
+ * Y2K-compliant. Likewise, DOC software is compliant with the new US
+ * daylight savings rule passed by Congress as "The Energy Policy Act of
+ * 2005," which established new daylight savings times (DST) rules for
+ * the United States that expand DST as of March 2007. Since DOC software
+ * obtains time/date and calendaring information from operating systems
+ * users will not be affected by the new DST rules as long as they
+ * upgrade their operating systems accordingly.
+ *
+ * The names ACE(TM), TAO(TM), CIAO(TM), DAnCE(TM), CoSMIC(TM),
+ * Washington University, UC Irvine, and Vanderbilt University, may not
+ * be used to endorse or promote products or services derived from this
+ * source without express written permission from Washington University,
+ * UC Irvine, or Vanderbilt University. This license grants no permission
+ * to call products or services derived from this source ACE(TM),
+ * TAO(TM), CIAO(TM), DAnCE(TM), or CoSMIC(TM), nor does it grant
+ * permission for the name Washington University, UC Irvine, or
+ * Vanderbilt University to appear in their names.
+ *
+ * If you have any suggestions, additions, comments, or questions, please
+ * let [21]me know.
+ *
+ * [22]Douglas C. Schmidt
+ *
+ * References
+ *
+ *  1. http://www.cs.wustl.edu/~schmidt/ACE.html
+ *  2. http://www.cs.wustl.edu/~schmidt/TAO.html
+ *  3. http://www.dre.vanderbilt.edu/CIAO/
+ *  4. http://www.dre.vanderbilt.edu/cosmic/
+ *  5. http://www.dre.vanderbilt.edu/~schmidt/
+ *  6. http://www.cs.wustl.edu/~schmidt/ACE-members.html
+ *  7. http://www.wustl.edu/
+ *  8. http://www.uci.edu/
+ *  9. http://www.vanderbilt.edu/
+ * 10. mailto:doc_group@cs.wustl.edu
+ * 11. http://www.cs.wustl.edu/~schmidt/ACE-users.html
+ * 12. http://www.cs.wustl.edu/~schmidt/ACE.html
+ * 13. http://www.cs.wustl.edu/~schmidt/TAO.html
+ * 14. http://www.dre.vanderbilt.edu/CIAO/
+ * 15. http://www.dre.vanderbilt.edu/~schmidt/DOC_ROOT/DAnCE/
+ * 16. http://www.dre.vanderbilt.edu/cosmic/
+ * 17. http://www.dre.vanderbilt.edu/
+ * 18. http://www.isis.vanderbilt.edu/
+ * 19. http://www.cs.wustl.edu/~schmidt/doc-center.html
+ * 20. http://www.cs.wustl.edu/~schmidt/commercial-support.html
+ * 21. mailto:d.schmidt@vanderbilt.edu
+ * 22. http://www.dre.vanderbilt.edu/~schmidt/
+ * 23. http://www.cs.wustl.edu/ACE.html
+ */
+
+void
+InitializeXPConditionVariable(userland_cond_t *cv)
+{
+	cv->waiters_count = 0;
+	InitializeCriticalSection(&(cv->waiters_count_lock));
+	cv->events_[C_SIGNAL] = CreateEvent (NULL, FALSE, FALSE, NULL);
+	cv->events_[C_BROADCAST] = CreateEvent (NULL, TRUE, FALSE, NULL);
+}
+
+void
+DeleteXPConditionVariable(userland_cond_t *cv)
+{
+	CloseHandle(cv->events_[C_BROADCAST]);
+	CloseHandle(cv->events_[C_SIGNAL]);
+	DeleteCriticalSection(&(cv->waiters_count_lock));
+}
+
+int
+SleepXPConditionVariable(userland_cond_t *cv, userland_mutex_t *mtx)
+{
+	int result, last_waiter;
+
+	EnterCriticalSection(&cv->waiters_count_lock);
+	cv->waiters_count++;
+	LeaveCriticalSection(&cv->waiters_count_lock);
+	LeaveCriticalSection (mtx);
+	result = WaitForMultipleObjects(2, cv->events_, FALSE, INFINITE);
+	if (result==-1) {
+		result = GetLastError();
+	}
+	EnterCriticalSection(&cv->waiters_count_lock);
+	cv->waiters_count--;
+	last_waiter = result == (C_SIGNAL + C_BROADCAST && (cv->waiters_count == 0));
+	LeaveCriticalSection(&cv->waiters_count_lock);
+	if (last_waiter)
+		ResetEvent(cv->events_[C_BROADCAST]);
+	EnterCriticalSection (mtx);
+	return result;
+}
+
+void
+WakeAllXPConditionVariable(userland_cond_t *cv)
+{
+	int have_waiters;
+	EnterCriticalSection(&cv->waiters_count_lock);
+	have_waiters = cv->waiters_count > 0;
+	LeaveCriticalSection(&cv->waiters_count_lock);
+	if (have_waiters)
+		SetEvent (cv->events_[C_BROADCAST]);
+}
+#endif
+#endif
diff --git a/usrsctplib/netinet/sctp_usrreq.c b/usrsctplib/netinet/sctp_usrreq.c
new file mode 100755
index 0000000..7d14a92
--- /dev/null
+++ b/usrsctplib/netinet/sctp_usrreq.c
@@ -0,0 +1,9000 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_usrreq.c 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#ifdef __FreeBSD__
+#include <sys/proc.h>
+#endif
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_var.h>
+#ifdef INET6
+#include <netinet6/sctp6_var.h>
+#endif
+#include <netinet/sctp_sysctl.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_indata.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_bsd_addr.h>
+#if defined(__Userspace__)
+#include <netinet/sctp_callout.h>
+#else
+#include <netinet/udp.h>
+#endif
+
+#if defined(HAVE_SCTP_PEELOFF_SOCKOPT)
+#include <netinet/sctp_peeloff.h>
+#endif				/* HAVE_SCTP_PEELOFF_SOCKOPT */
+
+#if defined(__APPLE__)
+#define APPLE_FILE_NO 7
+#endif
+
+extern const struct sctp_cc_functions sctp_cc_functions[];
+extern const struct sctp_ss_functions sctp_ss_functions[];
+
+void
+#if defined(__Userspace__)
+sctp_init(uint16_t port,
+          int (*conn_output)(void *addr, void *buffer, size_t length, uint8_t tos, uint8_t set_df),
+          void (*debug_printf)(const char *format, ...))
+#elif defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) &&!defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
+sctp_init(struct protosw *pp SCTP_UNUSED, struct domain *dp SCTP_UNUSED)
+#else
+sctp_init(void)
+#endif
+{
+#if !defined(__Panda__) && !defined(__Userspace__)
+	u_long sb_max_adj;
+
+#endif
+	/* Initialize and modify the sysctled variables */
+	sctp_init_sysctls();
+#if defined(__Userspace__)
+#if defined(__Userspace_os_Windows) || defined(__Userspace_os_NaCl)
+	srand((unsigned int)time(NULL));
+#else
+	srandom(getpid()); /* so inp->sctp_ep.random_numbers are truly random... */
+#endif
+#endif
+#if defined(__Panda__)
+	sctp_sendspace = SB_MAX;
+	sctp_recvspace = SB_MAX;
+
+#elif defined(__Userspace__)
+	SCTP_BASE_SYSCTL(sctp_udp_tunneling_port) = port;
+#else
+#if !defined(__APPLE__)
+	if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
+		SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8);
+#endif
+	/*
+	 * Allow a user to take no more than 1/2 the number of clusters or
+	 * the SB_MAX whichever is smaller for the send window.
+	 */
+#if defined(__APPLE__)
+	sb_max_adj = (u_long)((u_quad_t) (sb_max) * MCLBYTES / (MSIZE + MCLBYTES));
+#else
+	sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
+#endif
+#if defined(__APPLE__)
+	SCTP_BASE_SYSCTL(sctp_sendspace) = sb_max_adj;
+#else
+	SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj,
+	    (((uint32_t)nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT));
+#endif
+	/*
+	 * Now for the recv window, should we take the same amount? or
+	 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For
+	 * now I will just copy.
+	 */
+	SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace);
+#endif
+	SCTP_BASE_VAR(first_time) = 0;
+	SCTP_BASE_VAR(sctp_pcb_initialized) = 0;
+#if defined(__Userspace__)
+#if !defined(THREAD_SUPPORT)
+	SCTP_BASE_VAR(recvmbuf4) = malloc(sizeof(struct mbuf *) * MAXLEN_MBUF_CHAIN);
+	SCTP_BASE_VAR(to_fill4) = MAXLEN_MBUF_CHAIN;
+	SCTP_BASE_VAR(recvmbuf6) = malloc(sizeof(struct mbuf *) * MAXLEN_MBUF_CHAIN);
+	SCTP_BASE_VAR(to_fill6) = MAXLEN_MBUF_CHAIN;
+	SCTP_BASE_VAR(udp_recvmbuf4) = malloc(sizeof(struct mbuf *) * MAXLEN_MBUF_CHAIN);
+	SCTP_BASE_VAR(udp_to_fill4) = MAXLEN_MBUF_CHAIN;
+	SCTP_BASE_VAR(udp_recvmbuf6) = malloc(sizeof(struct mbuf *) * MAXLEN_MBUF_CHAIN);
+	SCTP_BASE_VAR(udp_to_fill6) = MAXLEN_MBUF_CHAIN;
+#else
+#if !defined(__Userspace_os_Windows)
+#if defined(INET) || defined(INET6)
+	SCTP_BASE_VAR(userspace_route) = -1;
+#endif
+#endif
+#ifdef INET
+	SCTP_BASE_VAR(userspace_rawsctp) = -1;
+	SCTP_BASE_VAR(userspace_udpsctp) = -1;
+#endif
+#ifdef INET6
+	SCTP_BASE_VAR(userspace_rawsctp6) = -1;
+	SCTP_BASE_VAR(userspace_udpsctp6) = -1;
+#endif
+#endif
+	SCTP_BASE_VAR(timer_thread_should_exit) = 0;
+	SCTP_BASE_VAR(conn_output) = conn_output;
+	SCTP_BASE_VAR(debug_printf) = debug_printf;
+	SCTP_BASE_VAR(crc32c_offloaded) = 0;
+#endif
+	sctp_pcb_init();
+#if defined(__Userspace__) && defined(THREAD_SUPPORT)
+	sctp_start_timer();
+#endif
+#if defined(SCTP_PACKET_LOGGING)
+	SCTP_BASE_VAR(packet_log_writers) = 0;
+	SCTP_BASE_VAR(packet_log_end) = 0;
+	bzero(&SCTP_BASE_VAR(packet_log_buffer), SCTP_PACKET_LOG_SIZE);
+#endif
+#if defined(__APPLE__)
+	SCTP_BASE_VAR(sctp_main_timer_ticks) = 0;
+	sctp_start_main_timer();
+	timeout(sctp_delayed_startup, NULL, 1);
+#endif
+}
+
+#if defined(__FreeBSD__)
+#ifdef VIMAGE
+static void
+sctp_finish(void *unused __unused)
+{
+         sctp_pcb_finish();
+}
+VNET_SYSUNINIT(sctp, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, sctp_finish, NULL);
+#endif
+#else
+void
+sctp_finish(void)
+{
+#if defined(__APPLE__)
+	untimeout(sctp_delayed_startup, NULL);
+	sctp_over_udp_stop();
+	sctp_address_monitor_stop();
+	sctp_stop_main_timer();
+#endif
+#if defined(__Userspace__)
+#if defined(INET) || defined(INET6)
+	recv_thread_destroy();
+#endif
+#if !defined(__Userspace_os_Windows)
+#if defined(INET) || defined(INET6)
+	if (SCTP_BASE_VAR(userspace_route) != -1) {
+		pthread_join(SCTP_BASE_VAR(recvthreadroute), NULL);
+	}
+#endif
+#endif
+#ifdef INET
+	if (SCTP_BASE_VAR(userspace_rawsctp) != -1) {
+#if defined(__Userspace_os_Windows)
+		WaitForSingleObject(SCTP_BASE_VAR(recvthreadraw), INFINITE);
+		CloseHandle(SCTP_BASE_VAR(recvthreadraw));
+#else
+		pthread_join(SCTP_BASE_VAR(recvthreadraw), NULL);
+#endif
+	}
+	if (SCTP_BASE_VAR(userspace_udpsctp) != -1) {
+#if defined(__Userspace_os_Windows)
+		WaitForSingleObject(SCTP_BASE_VAR(recvthreadudp), INFINITE);
+		CloseHandle(SCTP_BASE_VAR(recvthreadudp));
+#else
+		pthread_join(SCTP_BASE_VAR(recvthreadudp), NULL);
+#endif
+	}
+#endif
+#ifdef INET6
+	if (SCTP_BASE_VAR(userspace_rawsctp6) != -1) {
+#if defined(__Userspace_os_Windows)
+		WaitForSingleObject(SCTP_BASE_VAR(recvthreadraw6), INFINITE);
+		CloseHandle(SCTP_BASE_VAR(recvthreadraw6));
+#else
+		pthread_join(SCTP_BASE_VAR(recvthreadraw6), NULL);
+#endif
+	}
+	if (SCTP_BASE_VAR(userspace_udpsctp6) != -1) {
+#if defined(__Userspace_os_Windows)
+		WaitForSingleObject(SCTP_BASE_VAR(recvthreadudp6), INFINITE);
+		CloseHandle(SCTP_BASE_VAR(recvthreadudp6));
+#else
+		pthread_join(SCTP_BASE_VAR(recvthreadudp6), NULL);
+#endif
+	}
+#endif
+	SCTP_BASE_VAR(timer_thread_should_exit) = 1;
+#if defined(__Userspace_os_Windows)
+	WaitForSingleObject(SCTP_BASE_VAR(timer_thread), INFINITE);
+	CloseHandle(SCTP_BASE_VAR(timer_thread));
+#else
+	pthread_join(SCTP_BASE_VAR(timer_thread), NULL);
+#endif
+#endif
+	sctp_pcb_finish();
+#if defined(__Windows__)
+	sctp_finish_sysctls();
+#endif
+}
+#endif
+
+void
+sctp_pathmtu_adjustment(struct sctp_tcb *stcb, uint16_t nxtsz)
+{
+	struct sctp_tmit_chunk *chk;
+	uint16_t overhead;
+
+	/* Adjust that too */
+	stcb->asoc.smallest_mtu = nxtsz;
+	/* now off to subtract IP_DF flag if needed */
+	overhead = IP_HDR_SIZE;
+	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
+		overhead += sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
+	}
+	TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
+		if ((chk->send_size + overhead) > nxtsz) {
+			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+		}
+	}
+	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+		if ((chk->send_size + overhead) > nxtsz) {
+			/*
+			 * For this guy we also mark for immediate resend
+			 * since we sent to big of chunk
+			 */
+			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+			if (chk->sent < SCTP_DATAGRAM_RESEND) {
+				sctp_flight_size_decrease(chk);
+				sctp_total_flight_decrease(stcb, chk);
+				chk->sent = SCTP_DATAGRAM_RESEND;
+				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
+				chk->rec.data.doing_fast_retransmit = 0;
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
+					sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU,
+						       chk->whoTo->flight_size,
+						       chk->book_size,
+						       (uint32_t)(uintptr_t)chk->whoTo,
+						       chk->rec.data.tsn);
+				}
+				/* Clear any time so NO RTT is being done */
+				chk->do_rtt = 0;
+			}
+		}
+	}
+}
+
+#ifdef INET
+#if !defined(__Userspace__)
+void
+sctp_notify(struct sctp_inpcb *inp,
+            struct sctp_tcb *stcb,
+            struct sctp_nets *net,
+            uint8_t icmp_type,
+            uint8_t icmp_code,
+            uint16_t ip_len,
+            uint16_t next_mtu)
+{
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	struct socket *so;
+#endif
+	int timer_stopped;
+
+	if (icmp_type != ICMP_UNREACH) {
+		/* We only care about unreachable */
+		SCTP_TCB_UNLOCK(stcb);
+		return;
+	}
+	if ((icmp_code == ICMP_UNREACH_NET) ||
+	    (icmp_code == ICMP_UNREACH_HOST) ||
+	    (icmp_code == ICMP_UNREACH_NET_UNKNOWN) ||
+	    (icmp_code == ICMP_UNREACH_HOST_UNKNOWN) ||
+	    (icmp_code == ICMP_UNREACH_ISOLATED) ||
+	    (icmp_code == ICMP_UNREACH_NET_PROHIB) ||
+	    (icmp_code == ICMP_UNREACH_HOST_PROHIB) ||
+#if defined(__Panda__)
+	    (icmp_code == ICMP_UNREACH_ADMIN)) {
+#elif defined(__Userspace_os_NetBSD)
+	    (icmp_code == ICMP_UNREACH_ADMIN_PROHIBIT)) {
+#else
+	    (icmp_code == ICMP_UNREACH_FILTER_PROHIB)) {
+#endif
+		/* Mark the net unreachable. */
+		if (net->dest_state & SCTP_ADDR_REACHABLE) {
+			/* OK, that destination is NOT reachable. */
+			net->dest_state &= ~SCTP_ADDR_REACHABLE;
+			net->dest_state &= ~SCTP_ADDR_PF;
+			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
+			                stcb, 0,
+			                (void *)net, SCTP_SO_NOT_LOCKED);
+		}
+		SCTP_TCB_UNLOCK(stcb);
+	} else  if ((icmp_code == ICMP_UNREACH_PROTOCOL) ||
+		    (icmp_code == ICMP_UNREACH_PORT)) {
+		/* Treat it like an ABORT. */
+		sctp_abort_notification(stcb, 1, 0, NULL, SCTP_SO_NOT_LOCKED);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		so = SCTP_INP_SO(inp);
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_SOCKET_LOCK(so, 1);
+		SCTP_TCB_LOCK(stcb);
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+		                      SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_SOCKET_UNLOCK(so, 1);
+		/* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed.*/
+#endif
+		/* no need to unlock here, since the TCB is gone */
+	} else if (icmp_code == ICMP_UNREACH_NEEDFRAG) {
+		/* Find the next (smaller) MTU */
+		if (next_mtu == 0) {
+			/*
+			 * Old type router that does not tell us what the next
+			 * MTU is.
+			 * Rats we will have to guess (in a educated fashion
+			 * of course).
+			 */
+			next_mtu = sctp_get_prev_mtu(ip_len);
+		}
+		/* Stop the PMTU timer. */
+		if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+			timer_stopped = 1;
+			sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
+			                SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1);
+		} else {
+			timer_stopped = 0;
+		}
+		/* Update the path MTU. */
+		if (net->mtu > next_mtu) {
+			net->mtu = next_mtu;
+			if (net->port) {
+				net->mtu -= sizeof(struct udphdr);
+			}
+		}
+		/* Update the association MTU */
+		if (stcb->asoc.smallest_mtu > next_mtu) {
+			sctp_pathmtu_adjustment(stcb, next_mtu);
+		}
+		/* Finally, start the PMTU timer if it was running before. */
+		if (timer_stopped) {
+			sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+		}
+		SCTP_TCB_UNLOCK(stcb);
+	} else {
+		SCTP_TCB_UNLOCK(stcb);
+	}
+}
+#endif
+
+#if !defined(__Panda__) && !defined(__Userspace__)
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+void
+#else
+void *
+#endif
+sctp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
+{
+#if defined(__FreeBSD__)
+	struct ip *outer_ip;
+#endif
+	struct ip *inner_ip;
+	struct sctphdr *sh;
+	struct icmp *icmp;
+	struct sctp_inpcb *inp;
+	struct sctp_tcb *stcb;
+	struct sctp_nets *net;
+#if defined(__FreeBSD__)
+	struct sctp_init_chunk *ch;
+#endif
+	struct sockaddr_in src, dst;
+
+	if (sa->sa_family != AF_INET ||
+	    ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+		return;
+#else
+		return (NULL);
+#endif
+	}
+	if (PRC_IS_REDIRECT(cmd)) {
+		vip = NULL;
+	} else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+		return;
+#else
+		return (NULL);
+#endif
+	}
+	if (vip != NULL) {
+		inner_ip = (struct ip *)vip;
+		icmp = (struct icmp *)((caddr_t)inner_ip -
+		    (sizeof(struct icmp) - sizeof(struct ip)));
+#if defined(__FreeBSD__)
+		outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
+#endif
+		sh = (struct sctphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
+		memset(&src, 0, sizeof(struct sockaddr_in));
+		src.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+		src.sin_len = sizeof(struct sockaddr_in);
+#endif
+		src.sin_port = sh->src_port;
+		src.sin_addr = inner_ip->ip_src;
+		memset(&dst, 0, sizeof(struct sockaddr_in));
+		dst.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+		dst.sin_len = sizeof(struct sockaddr_in);
+#endif
+		dst.sin_port = sh->dest_port;
+		dst.sin_addr = inner_ip->ip_dst;
+		/*
+		 * 'dst' holds the dest of the packet that failed to be sent.
+		 * 'src' holds our local endpoint address. Thus we reverse
+		 * the dst and the src in the lookup.
+		 */
+		inp = NULL;
+		net = NULL;
+		stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
+		                                    (struct sockaddr *)&src,
+		                                    &inp, &net, 1,
+		                                    SCTP_DEFAULT_VRFID);
+		if ((stcb != NULL) &&
+		    (net != NULL) &&
+		    (inp != NULL)) {
+			/* Check the verification tag */
+			if (ntohl(sh->v_tag) != 0) {
+				/*
+				 * This must be the verification tag used for
+				 * sending out packets. We don't consider
+				 * packets reflecting the verification tag.
+				 */
+				if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
+					SCTP_TCB_UNLOCK(stcb);
+					return;
+				}
+			} else {
+#if defined(__FreeBSD__)
+				if (ntohs(outer_ip->ip_len) >=
+				    sizeof(struct ip) +
+				    8 + (inner_ip->ip_hl << 2) + 20) {
+					/*
+					 * In this case we can check if we
+					 * got an INIT chunk and if the
+					 * initiate tag matches.
+					 */
+					ch = (struct sctp_init_chunk *)(sh + 1);
+					if ((ch->ch.chunk_type != SCTP_INITIATION) ||
+					    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
+						SCTP_TCB_UNLOCK(stcb);
+						return;
+					}
+				} else {
+					SCTP_TCB_UNLOCK(stcb);
+					return;
+				}
+#else
+				SCTP_TCB_UNLOCK(stcb);
+				return;
+#endif
+			}
+			sctp_notify(inp, stcb, net,
+			            icmp->icmp_type,
+			            icmp->icmp_code,
+#if defined(__FreeBSD__) && __FreeBSD_version >= 1000000
+			            ntohs(inner_ip->ip_len),
+#else
+			            inner_ip->ip_len,
+#endif
+			            ntohs(icmp->icmp_nextmtu));
+		} else {
+#if defined(__FreeBSD__) && __FreeBSD_version < 500000
+			/*
+			 * XXX must be fixed for 5.x and higher, leave for
+			 * 4.x
+			 */
+			if (PRC_IS_REDIRECT(cmd) && (inp != NULL)) {
+				in_rtchange((struct inpcb *)inp,
+				    inetctlerrmap[cmd]);
+			}
+#endif
+			if ((stcb == NULL) && (inp != NULL)) {
+				/* reduce ref-count */
+				SCTP_INP_WLOCK(inp);
+				SCTP_INP_DECR_REF(inp);
+				SCTP_INP_WUNLOCK(inp);
+			}
+			if (stcb) {
+				SCTP_TCB_UNLOCK(stcb);
+			}
+		}
+	}
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+	return;
+#else
+	return (NULL);
+#endif
+}
+#endif
+#endif
+
+#if defined(__FreeBSD__)
+static int
+sctp_getcred(SYSCTL_HANDLER_ARGS)
+{
+	struct xucred xuc;
+	struct sockaddr_in addrs[2];
+	struct sctp_inpcb *inp;
+	struct sctp_nets *net;
+	struct sctp_tcb *stcb;
+	int error;
+	uint32_t vrf_id;
+
+	/* FIX, for non-bsd is this right? */
+	vrf_id = SCTP_DEFAULT_VRFID;
+
+#if __FreeBSD_version > 602000
+	error = priv_check(req->td, PRIV_NETINET_GETCRED);
+
+#elif __FreeBSD_version >= 500000
+	error = suser(req->td);
+#else
+	error = suser(req->p);
+#endif
+	if (error)
+		return (error);
+
+	error = SYSCTL_IN(req, addrs, sizeof(addrs));
+	if (error)
+		return (error);
+
+	stcb = sctp_findassociation_addr_sa(sintosa(&addrs[1]),
+	    sintosa(&addrs[0]),
+	    &inp, &net, 1, vrf_id);
+	if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
+		if ((inp != NULL) && (stcb == NULL)) {
+			/* reduce ref-count */
+			SCTP_INP_WLOCK(inp);
+			SCTP_INP_DECR_REF(inp);
+			goto cred_can_cont;
+		}
+
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+		error = ENOENT;
+		goto out;
+	}
+	SCTP_TCB_UNLOCK(stcb);
+	/* We use the write lock here, only
+	 * since in the error leg we need it.
+	 * If we used RLOCK, then we would have
+	 * to wlock/decr/unlock/rlock. Which
+	 * in theory could create a hole. Better
+	 * to use higher wlock.
+	 */
+	SCTP_INP_WLOCK(inp);
+ cred_can_cont:
+	error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket);
+	if (error) {
+		SCTP_INP_WUNLOCK(inp);
+		goto out;
+	}
+	cru2x(inp->sctp_socket->so_cred, &xuc);
+	SCTP_INP_WUNLOCK(inp);
+	error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
+out:
+	return (error);
+}
+
+SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW,
+    0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection");
+#endif				/* #if defined(__FreeBSD__) */
+
+
+#ifdef INET
+#if defined(__Panda__) || defined(__Windows__) || defined(__Userspace__)
+int
+#elif defined(__FreeBSD__) && __FreeBSD_version > 690000
+static void
+#else
+static int
+#endif
+sctp_abort(struct socket *so)
+{
+	struct sctp_inpcb *inp;
+	uint32_t flags;
+
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp == NULL) {
+#if defined(__FreeBSD__) && __FreeBSD_version > 690000
+		return;
+#else
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (EINVAL);
+#endif
+	}
+
+ sctp_must_try_again:
+	flags = inp->sctp_flags;
+#ifdef SCTP_LOG_CLOSING
+	sctp_log_closing(inp, NULL, 17);
+#endif
+	if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+	    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
+#ifdef SCTP_LOG_CLOSING
+		sctp_log_closing(inp, NULL, 16);
+#endif
+		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+				SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
+		SOCK_LOCK(so);
+		SCTP_SB_CLEAR(so->so_snd);
+		/* same for the rcv ones, they are only
+		 * here for the accounting/select.
+		 */
+		SCTP_SB_CLEAR(so->so_rcv);
+
+#if defined(__APPLE__)
+		so->so_usecount--;
+#else
+		/* Now null out the reference, we are completely detached. */
+		so->so_pcb = NULL;
+#endif
+		SOCK_UNLOCK(so);
+	} else {
+		flags = inp->sctp_flags;
+		if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+			goto sctp_must_try_again;
+		}
+	}
+#if defined(__FreeBSD__) && __FreeBSD_version > 690000
+	return;
+#else
+	return (0);
+#endif
+}
+
+#if defined(__Panda__) || defined(__Userspace__)
+int
+#else
+static int
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+sctp_attach(struct socket *so, int proto SCTP_UNUSED, struct thread *p SCTP_UNUSED)
+#elif defined(__Panda__) || defined(__Userspace__)
+sctp_attach(struct socket *so, int proto SCTP_UNUSED, uint32_t vrf_id)
+#elif defined(__Windows__)
+sctp_attach(struct socket *so, int proto SCTP_UNUSED, PKTHREAD p SCTP_UNUSED)
+#else
+sctp_attach(struct socket *so, int proto SCTP_UNUSED, struct proc *p SCTP_UNUSED)
+#endif
+{
+	struct sctp_inpcb *inp;
+	struct inpcb *ip_inp;
+	int error;
+#if !defined(__Panda__) && !defined(__Userspace__)
+	uint32_t vrf_id = SCTP_DEFAULT_VRFID;
+#endif
+
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp != NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (EINVAL);
+	}
+	if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
+		error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace));
+		if (error) {
+			return (error);
+		}
+	}
+	error = sctp_inpcb_alloc(so, vrf_id);
+	if (error) {
+		return (error);
+	}
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	SCTP_INP_WLOCK(inp);
+	inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6;	/* I'm not v6! */
+	ip_inp = &inp->ip_inp.inp;
+	ip_inp->inp_vflag |= INP_IPV4;
+	ip_inp->inp_ip_ttl = MODULE_GLOBAL(ip_defttl);
+	SCTP_INP_WUNLOCK(inp);
+	return (0);
+}
+
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+static int
+sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
+{
+#elif defined(__FreeBSD__) || defined(__APPLE__)
+static int
+sctp_bind(struct socket *so, struct sockaddr *addr, struct proc *p) {
+#elif defined(__Panda__) || defined(__Userspace__)
+int
+sctp_bind(struct socket *so, struct sockaddr *addr) {
+	void *p = NULL;
+#elif defined(__Windows__)
+static int
+sctp_bind(struct socket *so, struct sockaddr *addr, PKTHREAD p) {
+#else
+static int
+sctp_bind(struct socket *so, struct mbuf *nam, struct proc *p)
+{
+	struct sockaddr *addr = nam ? mtod(nam, struct sockaddr *): NULL;
+
+#endif
+	struct sctp_inpcb *inp;
+
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (EINVAL);
+	}
+	if (addr != NULL) {
+#ifdef HAVE_SA_LEN
+		if ((addr->sa_family != AF_INET) ||
+		    (addr->sa_len != sizeof(struct sockaddr_in))) {
+#else
+		if (addr->sa_family != AF_INET) {
+#endif
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			return (EINVAL);
+		}
+	}
+	return (sctp_inpcb_bind(so, addr, NULL, p));
+}
+
+#endif
+#if defined(__Userspace__)
+
+int
+sctpconn_attach(struct socket *so, int proto SCTP_UNUSED, uint32_t vrf_id)
+{
+	struct sctp_inpcb *inp;
+	struct inpcb *ip_inp;
+	int error;
+
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp != NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (EINVAL);
+	}
+	if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
+		error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace));
+		if (error) {
+			return (error);
+		}
+	}
+	error = sctp_inpcb_alloc(so, vrf_id);
+	if (error) {
+		return (error);
+	}
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	SCTP_INP_WLOCK(inp);
+	inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6;
+	inp->sctp_flags |= SCTP_PCB_FLAGS_BOUND_CONN;
+	ip_inp = &inp->ip_inp.inp;
+	ip_inp->inp_vflag |= INP_CONN;
+	ip_inp->inp_ip_ttl = MODULE_GLOBAL(ip_defttl);
+	SCTP_INP_WUNLOCK(inp);
+	return (0);
+}
+
+int
+sctpconn_bind(struct socket *so, struct sockaddr *addr)
+{
+	struct sctp_inpcb *inp;
+
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (EINVAL);
+	}
+	if (addr != NULL) {
+#ifdef HAVE_SA_LEN
+		if ((addr->sa_family != AF_CONN) ||
+		    (addr->sa_len != sizeof(struct sockaddr_conn))) {
+#else
+		if (addr->sa_family != AF_CONN) {
+#endif
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			return (EINVAL);
+		}
+	}
+	return (sctp_inpcb_bind(so, addr, NULL, NULL));
+}
+
+#endif
+#if (defined(__FreeBSD__) && __FreeBSD_version > 690000) || defined(__Windows__) || defined(__Userspace__)
+void
+sctp_close(struct socket *so)
+{
+	struct sctp_inpcb *inp;
+	uint32_t flags;
+
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp == NULL)
+		return;
+
+	/* Inform all the lower layer assoc that we
+	 * are done.
+	 */
+ sctp_must_try_again:
+	flags = inp->sctp_flags;
+#ifdef SCTP_LOG_CLOSING
+	sctp_log_closing(inp, NULL, 17);
+#endif
+	if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+	    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
+#if defined(__Userspace__)
+		if (((so->so_options & SCTP_SO_LINGER) && (so->so_linger == 0)) ||
+		    (so->so_rcv.sb_cc > 0)) {
+#else
+		if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
+		    (so->so_rcv.sb_cc > 0)) {
+#endif
+#ifdef SCTP_LOG_CLOSING
+			sctp_log_closing(inp, NULL, 13);
+#endif
+			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+					SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
+		} else {
+#ifdef SCTP_LOG_CLOSING
+			sctp_log_closing(inp, NULL, 14);
+#endif
+			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE,
+					SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
+		}
+		/* The socket is now detached, no matter what
+		 * the state of the SCTP association.
+		 */
+		SOCK_LOCK(so);
+		SCTP_SB_CLEAR(so->so_snd);
+		/* same for the rcv ones, they are only
+		 * here for the accounting/select.
+		 */
+		SCTP_SB_CLEAR(so->so_rcv);
+
+#if !defined(__APPLE__)
+		/* Now null out the reference, we are completely detached. */
+		so->so_pcb = NULL;
+#endif
+		SOCK_UNLOCK(so);
+	} else {
+		flags = inp->sctp_flags;
+		if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+			goto sctp_must_try_again;
+		}
+	}
+	return;
+}
+
+#else
+
+
+int
+sctp_detach(struct socket *so)
+{
+	struct sctp_inpcb *inp;
+	uint32_t flags;
+
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp == NULL) {
+#if defined(__FreeBSD__) && __FreeBSD_version > 690000
+		return;
+#else
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (EINVAL);
+#endif
+	}
+ sctp_must_try_again:
+	flags = inp->sctp_flags;
+#ifdef SCTP_LOG_CLOSING
+	sctp_log_closing(inp, NULL, 17);
+#endif
+	if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+	    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
+#if defined(__Userspace__)
+		if (((so->so_options & SCTP_SO_LINGER) && (so->so_linger == 0)) ||
+		    (so->so_rcv.sb_cc > 0)) {
+#else
+		if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
+		    (so->so_rcv.sb_cc > 0)) {
+#endif
+#ifdef SCTP_LOG_CLOSING
+			sctp_log_closing(inp, NULL, 13);
+#endif
+			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+					SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
+		} else {
+#ifdef SCTP_LOG_CLOSING
+			sctp_log_closing(inp, NULL, 13);
+#endif
+			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE,
+					SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
+		}
+		/* The socket is now detached, no matter what
+		 * the state of the SCTP association.
+		 */
+		SCTP_SB_CLEAR(so->so_snd);
+		/* same for the rcv ones, they are only
+		 * here for the accounting/select.
+		 */
+		SCTP_SB_CLEAR(so->so_rcv);
+#if !defined(__APPLE__)
+		/* Now disconnect */
+		so->so_pcb = NULL;
+#endif
+	} else {
+		flags = inp->sctp_flags;
+		if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+			goto sctp_must_try_again;
+		}
+	}
+#if defined(__FreeBSD__) && __FreeBSD_version > 690000
+	return;
+#else
+	return (0);
+#endif
+}
+#endif
+
+#if defined(__Userspace__)
+/* __Userspace__ is not calling sctp_sendm */
+#endif
+#if !(defined(__Panda__) || defined(__Windows__))
+int
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+    struct mbuf *control, struct thread *p);
+
+#else
+sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+    struct mbuf *control, struct proc *p);
+
+#endif
+
+int
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+    struct mbuf *control, struct thread *p)
+{
+#else
+sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
+    struct mbuf *control, struct proc *p)
+{
+#endif
+	struct sctp_inpcb *inp;
+	int error;
+
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp == NULL) {
+		if (control) {
+			sctp_m_freem(control);
+			control = NULL;
+		}
+		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		sctp_m_freem(m);
+		return (EINVAL);
+	}
+	/* Got to have an to address if we are NOT a connected socket */
+	if ((addr == NULL) &&
+	    ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))) {
+		goto connected_type;
+	} else if (addr == NULL) {
+		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
+		error = EDESTADDRREQ;
+		sctp_m_freem(m);
+		if (control) {
+			sctp_m_freem(control);
+			control = NULL;
+		}
+		return (error);
+	}
+#ifdef INET6
+	if (addr->sa_family != AF_INET) {
+		/* must be a v4 address! */
+		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
+		sctp_m_freem(m);
+		if (control) {
+			sctp_m_freem(control);
+			control = NULL;
+		}
+		error = EDESTADDRREQ;
+		return (error);
+	}
+#endif				/* INET6 */
+connected_type:
+	/* now what about control */
+	if (control) {
+		if (inp->control) {
+			SCTP_PRINTF("huh? control set?\n");
+			sctp_m_freem(inp->control);
+			inp->control = NULL;
+		}
+		inp->control = control;
+	}
+	/* Place the data */
+	if (inp->pkt) {
+		SCTP_BUF_NEXT(inp->pkt_last) = m;
+		inp->pkt_last = m;
+	} else {
+		inp->pkt_last = inp->pkt = m;
+	}
+	if (
+#if defined(__FreeBSD__) || defined(__APPLE__)
+	/* FreeBSD uses a flag passed */
+	    ((flags & PRUS_MORETOCOME) == 0)
+#else
+	    1			/* Open BSD does not have any "more to come"
+				 * indication */
+#endif
+	    ) {
+		/*
+		 * note with the current version this code will only be used
+		 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
+		 * re-defining sosend to use the sctp_sosend. One can
+		 * optionally switch back to this code (by changing back the
+		 * definitions) but this is not advisable. This code is used
+		 * by FreeBSD when sending a file with sendfile() though.
+		 */
+		int ret;
+
+		ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
+		inp->pkt = NULL;
+		inp->control = NULL;
+		return (ret);
+	} else {
+		return (0);
+	}
+}
+#endif
+
+int
+sctp_disconnect(struct socket *so)
+{
+	struct sctp_inpcb *inp;
+
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
+		return (ENOTCONN);
+	}
+	SCTP_INP_RLOCK(inp);
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
+			/* No connection */
+			SCTP_INP_RUNLOCK(inp);
+			return (0);
+		} else {
+			struct sctp_association *asoc;
+			struct sctp_tcb *stcb;
+
+			stcb = LIST_FIRST(&inp->sctp_asoc_list);
+			if (stcb == NULL) {
+				SCTP_INP_RUNLOCK(inp);
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				return (EINVAL);
+			}
+			SCTP_TCB_LOCK(stcb);
+			asoc = &stcb->asoc;
+			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+				/* We are about to be freed, out of here */
+				SCTP_TCB_UNLOCK(stcb);
+				SCTP_INP_RUNLOCK(inp);
+				return (0);
+			}
+#if defined(__Userspace__)
+			if (((so->so_options & SCTP_SO_LINGER) &&
+			     (so->so_linger == 0)) ||
+			    (so->so_rcv.sb_cc > 0)) {
+#else
+			if (((so->so_options & SO_LINGER) &&
+			     (so->so_linger == 0)) ||
+			    (so->so_rcv.sb_cc > 0)) {
+#endif
+				if (SCTP_GET_STATE(asoc) !=
+				    SCTP_STATE_COOKIE_WAIT) {
+					/* Left with Data unread */
+					struct mbuf *err;
+
+					err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA);
+					if (err) {
+						/*
+						 * Fill in the user
+						 * initiated abort
+						 */
+						struct sctp_paramhdr *ph;
+
+						ph = mtod(err, struct sctp_paramhdr *);
+						SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr);
+						ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
+						ph->param_length = htons(SCTP_BUF_LEN(err));
+					}
+					sctp_send_abort_tcb(stcb, err, SCTP_SO_LOCKED);
+					SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+				}
+				SCTP_INP_RUNLOCK(inp);
+				if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+				    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+				}
+				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+				                      SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3);
+				/* No unlock tcb assoc is gone */
+				return (0);
+			}
+			if (TAILQ_EMPTY(&asoc->send_queue) &&
+			    TAILQ_EMPTY(&asoc->sent_queue) &&
+			    (asoc->stream_queue_cnt == 0)) {
+				/* there is nothing queued to send, so done */
+				if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
+					goto abort_anyway;
+				}
+				if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
+				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+					/* only send SHUTDOWN 1st time thru */
+					struct sctp_nets *netp;
+
+					if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
+					    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+					}
+					SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+					SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+					sctp_stop_timers_for_shutdown(stcb);
+					if (stcb->asoc.alternate) {
+						netp = stcb->asoc.alternate;
+					} else {
+						netp = stcb->asoc.primary_destination;
+					}
+					sctp_send_shutdown(stcb,netp);
+					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+							 stcb->sctp_ep, stcb, netp);
+					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
+							 stcb->sctp_ep, stcb, netp);
+					sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED);
+				}
+			} else {
+				/*
+				 * we still got (or just got) data to send,
+				 * so set SHUTDOWN_PENDING
+				 */
+				/*
+				 * XXX sockets draft says that SCTP_EOF
+				 * should be sent with no data. currently,
+				 * we will allow user data to be sent first
+				 * and move to SHUTDOWN-PENDING
+				 */
+				struct sctp_nets *netp;
+				if (stcb->asoc.alternate) {
+					netp = stcb->asoc.alternate;
+				} else {
+					netp = stcb->asoc.primary_destination;
+				}
+
+				asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
+				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
+						 netp);
+				if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
+					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
+				}
+				if (TAILQ_EMPTY(&asoc->send_queue) &&
+				    TAILQ_EMPTY(&asoc->sent_queue) &&
+				    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+					struct mbuf *op_err;
+				abort_anyway:
+					op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
+					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4;
+					sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED);
+					SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+					if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+					    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+					}
+					SCTP_INP_RUNLOCK(inp);
+					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+					                      SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5);
+					return (0);
+				} else {
+					sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
+				}
+			}
+			soisdisconnecting(so);
+			SCTP_TCB_UNLOCK(stcb);
+			SCTP_INP_RUNLOCK(inp);
+			return (0);
+		}
+		/* not reached */
+	} else {
+		/* UDP model does not support this */
+		SCTP_INP_RUNLOCK(inp);
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+		return (EOPNOTSUPP);
+	}
+}
+
+#if defined(__FreeBSD__) || defined(__Windows__) || defined(__Userspace__)
+int
+sctp_flush(struct socket *so, int how)
+{
+        /*
+	 * We will just clear out the values and let
+	 * subsequent close clear out the data, if any.
+	 * Note if the user did a shutdown(SHUT_RD) they
+	 * will not be able to read the data, the socket
+	 * will block that from happening.
+	 */
+	struct sctp_inpcb *inp;
+
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (EINVAL);
+	}
+	SCTP_INP_RLOCK(inp);
+	/* For the 1 to many model this does nothing */
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
+		SCTP_INP_RUNLOCK(inp);
+		return (0);
+	}
+	SCTP_INP_RUNLOCK(inp);
+        if ((how == PRU_FLUSH_RD) || (how == PRU_FLUSH_RDWR)) {
+		/* First make sure the sb will be happy, we don't
+		 * use these except maybe the count
+		 */
+		SCTP_INP_WLOCK(inp);
+		SCTP_INP_READ_LOCK(inp);
+		inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_CANT_READ;
+		SCTP_INP_READ_UNLOCK(inp);
+		SCTP_INP_WUNLOCK(inp);
+		so->so_rcv.sb_cc = 0;
+		so->so_rcv.sb_mbcnt = 0;
+		so->so_rcv.sb_mb = NULL;
+	}
+        if ((how == PRU_FLUSH_WR) || (how == PRU_FLUSH_RDWR)) {
+		/* First make sure the sb will be happy, we don't
+		 * use these except maybe the count
+		 */
+		so->so_snd.sb_cc = 0;
+		so->so_snd.sb_mbcnt = 0;
+		so->so_snd.sb_mb = NULL;
+
+	}
+	return (0);
+}
+#endif
+
+int
+sctp_shutdown(struct socket *so)
+{
+	struct sctp_inpcb *inp;
+
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (EINVAL);
+	}
+	SCTP_INP_RLOCK(inp);
+	/* For UDP model this is a invalid call */
+	if (!((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+	      (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
+		/* Restore the flags that the soshutdown took away. */
+#if (defined(__FreeBSD__) && __FreeBSD_version >= 502115) || defined(__Windows__)
+		SOCKBUF_LOCK(&so->so_rcv);
+		so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
+		SOCKBUF_UNLOCK(&so->so_rcv);
+#else
+		so->so_state &= ~SS_CANTRCVMORE;
+#endif
+		/* This proc will wakeup for read and do nothing (I hope) */
+		SCTP_INP_RUNLOCK(inp);
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+		return (EOPNOTSUPP);
+	} else {
+		/*
+		 * Ok, if we reach here its the TCP model and it is either
+		 * a SHUT_WR or SHUT_RDWR.
+		 * This means we put the shutdown flag against it.
+		 */
+		struct sctp_tcb *stcb;
+		struct sctp_association *asoc;
+		struct sctp_nets *netp;
+
+		if ((so->so_state &
+		     (SS_ISCONNECTED|SS_ISCONNECTING|SS_ISDISCONNECTING)) == 0) {
+			SCTP_INP_RUNLOCK(inp);
+			return (ENOTCONN);
+		}
+		socantsendmore(so);
+
+		stcb = LIST_FIRST(&inp->sctp_asoc_list);
+		if (stcb == NULL) {
+			/*
+			 * Ok, we hit the case that the shutdown call was
+			 * made after an abort or something. Nothing to do
+			 * now.
+			 */
+			SCTP_INP_RUNLOCK(inp);
+			return (0);
+		}
+		SCTP_TCB_LOCK(stcb);
+		asoc = &stcb->asoc;
+		if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+			SCTP_TCB_UNLOCK(stcb);
+			SCTP_INP_RUNLOCK(inp);
+			return (0);
+		}
+		if ((SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
+		    (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_ECHOED) &&
+		    (SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN)) {
+			/* If we are not in or before ESTABLISHED, there is
+			 * no protocol action required.
+			 */
+			SCTP_TCB_UNLOCK(stcb);
+			SCTP_INP_RUNLOCK(inp);
+			return (0);
+		}
+		if (stcb->asoc.alternate) {
+			netp = stcb->asoc.alternate;
+		} else {
+			netp = stcb->asoc.primary_destination;
+		}
+		if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) &&
+		    TAILQ_EMPTY(&asoc->send_queue) &&
+		    TAILQ_EMPTY(&asoc->sent_queue) &&
+		    (asoc->stream_queue_cnt == 0)) {
+			if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
+				goto abort_anyway;
+			}
+			/* there is nothing queued to send, so I'm done... */
+			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
+			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+			sctp_stop_timers_for_shutdown(stcb);
+			sctp_send_shutdown(stcb, netp);
+			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
+			                 stcb->sctp_ep, stcb, netp);
+		} else {
+			/*
+			 * We still got (or just got) data to send, so set
+			 * SHUTDOWN_PENDING.
+			 */
+			SCTP_ADD_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
+			if ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete)(stcb, asoc)) {
+				SCTP_ADD_SUBSTATE(asoc, SCTP_STATE_PARTIAL_MSG_LEFT);
+			}
+			if (TAILQ_EMPTY(&asoc->send_queue) &&
+			    TAILQ_EMPTY(&asoc->sent_queue) &&
+			    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
+				struct mbuf *op_err;
+			abort_anyway:
+				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
+				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6;
+				sctp_abort_an_association(stcb->sctp_ep, stcb,
+							  op_err, SCTP_SO_LOCKED);
+				SCTP_INP_RUNLOCK(inp);
+				return (0);
+			}
+		}
+		sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb, netp);
+		/* XXX: Why do this in the case where we have still data queued? */
+		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_INP_RUNLOCK(inp);
+		return (0);
+	}
+}
+
+/*
+ * copies a "user" presentable address and removes embedded scope, etc.
+ * returns 0 on success, 1 on error
+ */
+static uint32_t
+sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
+{
+#ifdef INET6
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+	struct sockaddr_in6 lsa6;
+
+	sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa,
+	    &lsa6);
+#endif
+#endif
+#ifdef HAVE_SA_LEN
+	memcpy(ss, sa, sa->sa_len);
+#else
+	switch (sa->sa_family) {
+#ifdef INET
+	case AF_INET:
+		memcpy(ss, sa, sizeof(struct sockaddr_in));
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		memcpy(ss, sa, sizeof(struct sockaddr_in6));
+		break;
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+		memcpy(ss, sa, sizeof(struct sockaddr_conn));
+		break;
+#endif
+	default:
+		/* TSNH */
+		break;
+	}
+#endif
+	return (0);
+}
+
+
+
+/*
+ * NOTE: assumes addr lock is held
+ */
+static size_t
+sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp,
+			   struct sctp_tcb *stcb,
+			   size_t limit,
+			   struct sockaddr_storage *sas,
+			   uint32_t vrf_id)
+{
+	struct sctp_ifn *sctp_ifn;
+	struct sctp_ifa *sctp_ifa;
+	size_t actual;
+	int loopback_scope;
+#if defined(INET)
+	int ipv4_local_scope, ipv4_addr_legal;
+#endif
+#if defined(INET6)
+	int local_scope, site_scope, ipv6_addr_legal;
+#endif
+#if defined(__Userspace__)
+	int conn_addr_legal;
+#endif
+	struct sctp_vrf *vrf;
+
+	actual = 0;
+	if (limit <= 0)
+		return (actual);
+
+	if (stcb) {
+		/* Turn on all the appropriate scope */
+		loopback_scope = stcb->asoc.scope.loopback_scope;
+#if defined(INET)
+		ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
+		ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
+#endif
+#if defined(INET6)
+		local_scope = stcb->asoc.scope.local_scope;
+		site_scope = stcb->asoc.scope.site_scope;
+		ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
+#endif
+#if defined(__Userspace__)
+		conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
+#endif
+	} else {
+		/* Use generic values for endpoints. */
+		loopback_scope = 1;
+#if defined(INET)
+		ipv4_local_scope = 1;
+#endif
+#if defined(INET6)
+		local_scope = 1;
+		site_scope = 1;
+#endif
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+#if defined(INET6)
+			ipv6_addr_legal = 1;
+#endif
+#if defined(INET)
+			if (SCTP_IPV6_V6ONLY(inp)) {
+				ipv4_addr_legal = 0;
+			} else {
+				ipv4_addr_legal = 1;
+			}
+#endif
+#if defined(__Userspace__)
+			conn_addr_legal = 0;
+#endif
+		} else {
+#if defined(INET6)
+			ipv6_addr_legal = 0;
+#endif
+#if defined(__Userspace__)
+			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
+				conn_addr_legal = 1;
+#if defined(INET)
+				ipv4_addr_legal = 0;
+#endif
+			} else {
+				conn_addr_legal = 0;
+#if defined(INET)
+				ipv4_addr_legal = 1;
+#endif
+			}
+#else
+#if defined(INET)
+			ipv4_addr_legal = 1;
+#endif
+#endif
+		}
+	}
+	vrf = sctp_find_vrf(vrf_id);
+	if (vrf == NULL) {
+		return (0);
+	}
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+			if ((loopback_scope == 0) &&
+			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+				/* Skip loopback if loopback_scope not set */
+				continue;
+			}
+			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+				if (stcb) {
+					/*
+					 * For the BOUND-ALL case, the list
+					 * associated with a TCB is Always
+					 * considered a reverse list.. i.e.
+					 * it lists addresses that are NOT
+					 * part of the association. If this
+					 * is one of those we must skip it.
+					 */
+					if (sctp_is_addr_restricted(stcb,
+								    sctp_ifa)) {
+						continue;
+					}
+				}
+				switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+				case AF_INET:
+					if (ipv4_addr_legal) {
+						struct sockaddr_in *sin;
+
+						sin = &sctp_ifa->address.sin;
+						if (sin->sin_addr.s_addr == 0) {
+							/*
+							 * we skip unspecifed
+							 * addresses
+							 */
+							continue;
+						}
+#if defined(__FreeBSD__)
+						if (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+						                     &sin->sin_addr) != 0) {
+							continue;
+						}
+#endif
+						if ((ipv4_local_scope == 0) &&
+						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+							continue;
+						}
+#ifdef INET6
+						if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
+							in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas);
+							((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
+							sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6));
+							actual += sizeof(struct sockaddr_in6);
+						} else {
+#endif
+							memcpy(sas, sin, sizeof(*sin));
+							((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
+							sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin));
+							actual += sizeof(*sin);
+#ifdef INET6
+						}
+#endif
+						if (actual >= limit) {
+							return (actual);
+						}
+					} else {
+						continue;
+					}
+					break;
+#endif
+#ifdef INET6
+				case AF_INET6:
+					if (ipv6_addr_legal) {
+						struct sockaddr_in6 *sin6;
+
+#if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME)
+						struct sockaddr_in6 lsa6;
+#endif
+						sin6 = &sctp_ifa->address.sin6;
+						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+							/*
+							 * we skip unspecifed
+							 * addresses
+							 */
+							continue;
+						}
+#if defined(__FreeBSD__)
+						if (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+						                     &sin6->sin6_addr) != 0) {
+							continue;
+						}
+#endif
+						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+							if (local_scope == 0)
+								continue;
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+							if (sin6->sin6_scope_id == 0) {
+#ifdef SCTP_KAME
+								if (sa6_recoverscope(sin6) != 0)
+									/*
+									 * bad link
+									 * local
+									 * address
+									 */
+									continue;
+#else
+								lsa6 = *sin6;
+								if (in6_recoverscope(&lsa6,
+										     &lsa6.sin6_addr,
+										     NULL))
+									/*
+									 * bad link
+									 * local
+									 * address
+									 */
+								continue;
+								sin6 = &lsa6;
+#endif				/* SCTP_KAME */
+							}
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+						}
+						if ((site_scope == 0) &&
+						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
+							continue;
+						}
+						memcpy(sas, sin6, sizeof(*sin6));
+						((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
+						sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6));
+						actual += sizeof(*sin6);
+						if (actual >= limit) {
+							return (actual);
+						}
+					} else {
+						continue;
+					}
+					break;
+#endif
+#if defined(__Userspace__)
+				case AF_CONN:
+					if (conn_addr_legal) {
+						memcpy(sas, &sctp_ifa->address.sconn, sizeof(struct sockaddr_conn));
+						((struct sockaddr_conn *)sas)->sconn_port = inp->sctp_lport;
+						sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_conn));
+						actual += sizeof(struct sockaddr_conn);
+						if (actual >= limit) {
+							return (actual);
+						}
+					} else {
+						continue;
+					}
+#endif
+				default:
+					/* TSNH */
+					break;
+				}
+			}
+		}
+	} else {
+		struct sctp_laddr *laddr;
+#ifndef HAVE_SA_LEN
+		uint32_t sa_len = 0;
+#endif
+
+		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+			if (stcb) {
+				if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
+					continue;
+				}
+			}
+			if (sctp_fill_user_address(sas, &laddr->ifa->address.sa))
+				continue;
+			switch (laddr->ifa->address.sa.sa_family) {
+#ifdef INET
+			case AF_INET:
+				((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
+				break;
+#endif
+#ifdef INET6
+			case AF_INET6:
+				((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
+				break;
+#endif
+#if defined(__Userspace__)
+			case AF_CONN:
+				((struct sockaddr_conn *)sas)->sconn_port = inp->sctp_lport;
+				break;
+#endif
+			default:
+				/* TSNH */
+				break;
+			}
+#ifdef HAVE_SA_LEN
+			sas = (struct sockaddr_storage *)((caddr_t)sas +
+							  laddr->ifa->address.sa.sa_len);
+			actual += laddr->ifa->address.sa.sa_len;
+#else
+			switch (laddr->ifa->address.sa.sa_family) {
+#ifdef INET
+			case AF_INET:
+				sa_len = sizeof(struct sockaddr_in);
+				break;
+#endif
+#ifdef INET6
+			case AF_INET6:
+				sa_len = sizeof(struct sockaddr_in6);
+				break;
+#endif
+#if defined(__Userspace__)
+			case AF_CONN:
+				sa_len = sizeof(struct sockaddr_conn);
+				break;
+#endif
+			default:
+				/* TSNH */
+				break;
+			}
+			sas = (struct sockaddr_storage *)((caddr_t)sas + sa_len);
+			actual += sa_len;
+#endif
+			if (actual >= limit) {
+				return (actual);
+			}
+		}
+	}
+	return (actual);
+}
+
+static size_t
+sctp_fill_up_addresses(struct sctp_inpcb *inp,
+                       struct sctp_tcb *stcb,
+                       size_t limit,
+                       struct sockaddr_storage *sas)
+{
+	size_t size = 0;
+#ifdef SCTP_MVRF
+	uint32_t id;
+#endif
+
+	SCTP_IPI_ADDR_RLOCK();
+#ifdef SCTP_MVRF
+/*
+ * FIX ME: ?? this WILL report duplicate addresses if they appear
+ * in more than one VRF.
+ */
+	/* fill up addresses for all VRFs on the endpoint */
+	for (id = 0; (id < inp->num_vrfs) && (size < limit); id++) {
+		size += sctp_fill_up_addresses_vrf(inp, stcb, limit, sas,
+						   inp->m_vrf_ids[id]);
+		sas = (struct sockaddr_storage *)((caddr_t)sas + size);
+	}
+#else
+	/* fill up addresses for the endpoint's default vrf */
+	size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas,
+					  inp->def_vrf_id);
+#endif
+	SCTP_IPI_ADDR_RUNLOCK();
+	return (size);
+}
+
+/*
+ * NOTE: assumes addr lock is held
+ */
+static int
+sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id)
+{
+	int cnt = 0;
+	struct sctp_vrf *vrf = NULL;
+
+	/*
+	 * In both sub-set bound an bound_all cases we return the MAXIMUM
+	 * number of addresses that you COULD get. In reality the sub-set
+	 * bound may have an exclusion list for a given TCB OR in the
+	 * bound-all case a TCB may NOT include the loopback or other
+	 * addresses as well.
+	 */
+	vrf = sctp_find_vrf(vrf_id);
+	if (vrf == NULL) {
+		return (0);
+	}
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		struct sctp_ifn *sctp_ifn;
+		struct sctp_ifa *sctp_ifa;
+
+		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+				/* Count them if they are the right type */
+				switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+				case AF_INET:
+#ifdef INET6
+					if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
+						cnt += sizeof(struct sockaddr_in6);
+					else
+						cnt += sizeof(struct sockaddr_in);
+#else
+					cnt += sizeof(struct sockaddr_in);
+#endif
+					break;
+#endif
+#ifdef INET6
+				case AF_INET6:
+					cnt += sizeof(struct sockaddr_in6);
+					break;
+#endif
+#if defined(__Userspace__)
+				case AF_CONN:
+					cnt += sizeof(struct sockaddr_conn);
+					break;
+#endif
+				default:
+					break;
+				}
+			}
+		}
+	} else {
+		struct sctp_laddr *laddr;
+
+		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+			switch (laddr->ifa->address.sa.sa_family) {
+#ifdef INET
+			case AF_INET:
+#ifdef INET6
+				if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
+					cnt += sizeof(struct sockaddr_in6);
+				else
+					cnt += sizeof(struct sockaddr_in);
+#else
+				cnt += sizeof(struct sockaddr_in);
+#endif
+				break;
+#endif
+#ifdef INET6
+			case AF_INET6:
+				cnt += sizeof(struct sockaddr_in6);
+				break;
+#endif
+#if defined(__Userspace__)
+			case AF_CONN:
+				cnt += sizeof(struct sockaddr_conn);
+				break;
+#endif
+			default:
+				break;
+			}
+		}
+	}
+	return (cnt);
+}
+
+static int
+sctp_count_max_addresses(struct sctp_inpcb *inp)
+{
+	int cnt = 0;
+#ifdef SCTP_MVRF
+	int id;
+#endif
+
+	SCTP_IPI_ADDR_RLOCK();
+#ifdef SCTP_MVRF
+/*
+ * FIX ME: ?? this WILL count duplicate addresses if they appear
+ * in more than one VRF.
+ */
+	/* count addresses for all VRFs on the endpoint */
+	for (id = 0; id < inp->num_vrfs; id++) {
+		cnt += sctp_count_max_addresses_vrf(inp, inp->m_vrf_ids[id]);
+	}
+#else
+	/* count addresses for the endpoint's default VRF */
+	cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id);
+#endif
+	SCTP_IPI_ADDR_RUNLOCK();
+	return (cnt);
+}
+
+static int
+sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval,
+		  size_t optsize, void *p, int delay)
+{
+	int error = 0;
+	int creat_lock_on = 0;
+	struct sctp_tcb *stcb = NULL;
+	struct sockaddr *sa;
+	unsigned int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr;
+	uint32_t vrf_id;
+	int bad_addresses = 0;
+	sctp_assoc_t *a_id;
+
+	SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n");
+
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
+		/* We are already connected AND the TCP model */
+		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
+		return (EADDRINUSE);
+	}
+
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
+	    (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
+		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (EINVAL);
+	}
+
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+		SCTP_INP_RLOCK(inp);
+		stcb = LIST_FIRST(&inp->sctp_asoc_list);
+		SCTP_INP_RUNLOCK(inp);
+	}
+	if (stcb) {
+		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+		return (EALREADY);
+	}
+	SCTP_INP_INCR_REF(inp);
+	SCTP_ASOC_CREATE_LOCK(inp);
+	creat_lock_on = 1;
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
+		error = EFAULT;
+		goto out_now;
+	}
+	totaddrp = (unsigned int *)optval;
+	totaddr = *totaddrp;
+	sa = (struct sockaddr *)(totaddrp + 1);
+	stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (unsigned int)(optsize - sizeof(int)), &bad_addresses);
+	if ((stcb != NULL) || bad_addresses) {
+		/* Already have or am bring up an association */
+		SCTP_ASOC_CREATE_UNLOCK(inp);
+		creat_lock_on = 0;
+		if (stcb)
+			SCTP_TCB_UNLOCK(stcb);
+		if (bad_addresses == 0) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+			error = EALREADY;
+		}
+		goto out_now;
+	}
+#ifdef INET6
+	if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
+	    (num_v6 > 0)) {
+		error = EINVAL;
+		goto out_now;
+	}
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+	    (num_v4 > 0)) {
+		struct in6pcb *inp6;
+
+		inp6 = (struct in6pcb *)inp;
+		if (SCTP_IPV6_V6ONLY(inp6)) {
+			/*
+			 * if IPV6_V6ONLY flag, ignore connections destined
+			 * to a v4 addr or v4-mapped addr
+			 */
+			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			goto out_now;
+		}
+	}
+#endif				/* INET6 */
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
+	    SCTP_PCB_FLAGS_UNBOUND) {
+		/* Bind a ephemeral port */
+		error = sctp_inpcb_bind(so, NULL, NULL, p);
+		if (error) {
+			goto out_now;
+		}
+	}
+
+	/* FIX ME: do we want to pass in a vrf on the connect call? */
+	vrf_id = inp->def_vrf_id;
+
+
+	/* We are GOOD to go */
+	stcb = sctp_aloc_assoc(inp, sa, &error, 0, vrf_id,
+	                       inp->sctp_ep.pre_open_stream_count,
+	                       inp->sctp_ep.port,
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+			       (struct thread *)p
+#elif defined(__Windows__)
+			       (PKTHREAD)p
+#else
+			       (struct proc *)p
+#endif
+		);
+	if (stcb == NULL) {
+		/* Gak! no memory */
+		goto out_now;
+	}
+	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+		/* Set the connected flag so we can queue data */
+		soisconnecting(so);
+	}
+	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
+	/* move to second address */
+	switch (sa->sa_family) {
+#ifdef INET
+	case AF_INET:
+		sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+		sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
+		break;
+#endif
+	default:
+		break;
+	}
+
+	error = 0;
+	sctp_connectx_helper_add(stcb, sa, (totaddr-1), &error);
+	/* Fill in the return id */
+	if (error) {
+		(void)sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE,
+		                      SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
+		goto out_now;
+	}
+	a_id = (sctp_assoc_t *)optval;
+	*a_id = sctp_get_associd(stcb);
+
+	/* initialize authentication parameters for the assoc */
+	sctp_initialize_auth_params(inp, stcb);
+
+	if (delay) {
+		/* doing delayed connection */
+		stcb->asoc.delayed_connection = 1;
+		sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
+	} else {
+		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+		sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+	}
+	SCTP_TCB_UNLOCK(stcb);
+ out_now:
+	if (creat_lock_on) {
+		SCTP_ASOC_CREATE_UNLOCK(inp);
+	}
+	SCTP_INP_DECR_REF(inp);
+	return (error);
+}
+
+#define SCTP_FIND_STCB(inp, stcb, assoc_id) { \
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \
+		SCTP_INP_RLOCK(inp); \
+		stcb = LIST_FIRST(&inp->sctp_asoc_list); \
+		if (stcb) { \
+			SCTP_TCB_LOCK(stcb); \
+		} \
+		SCTP_INP_RUNLOCK(inp); \
+	} else if (assoc_id > SCTP_ALL_ASSOC) { \
+		stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \
+		if (stcb == NULL) { \
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \
+			error = ENOENT; \
+			break; \
+		} \
+	} else { \
+		stcb = NULL; \
+	} \
+}
+
+
+#define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\
+	if (size < sizeof(type)) { \
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \
+		error = EINVAL; \
+		break; \
+	} else { \
+		destp = (type *)srcp; \
+	} \
+}
+
+#if defined(__Panda__) || defined(__Userspace__)
+int
+#else
+static int
+#endif
+sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
+	    void *p) {
+	struct sctp_inpcb *inp = NULL;
+	int error, val = 0;
+	struct sctp_tcb *stcb = NULL;
+
+	if (optval == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (EINVAL);
+	}
+
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return EINVAL;
+	}
+	error = 0;
+
+	switch (optname) {
+	case SCTP_NODELAY:
+	case SCTP_AUTOCLOSE:
+	case SCTP_EXPLICIT_EOR:
+	case SCTP_AUTO_ASCONF:
+	case SCTP_DISABLE_FRAGMENTS:
+	case SCTP_I_WANT_MAPPED_V4_ADDR:
+	case SCTP_USE_EXT_RCVINFO:
+		SCTP_INP_RLOCK(inp);
+		switch (optname) {
+		case SCTP_DISABLE_FRAGMENTS:
+			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT);
+			break;
+		case SCTP_I_WANT_MAPPED_V4_ADDR:
+			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4);
+			break;
+		case SCTP_AUTO_ASCONF:
+			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+				/* only valid for bound all sockets */
+				val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+				goto flags_out;
+			}
+			break;
+		case SCTP_EXPLICIT_EOR:
+			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
+			break;
+		case SCTP_NODELAY:
+			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY);
+			break;
+		case SCTP_USE_EXT_RCVINFO:
+			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO);
+			break;
+		case SCTP_AUTOCLOSE:
+			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))
+				val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time);
+			else
+				val = 0;
+			break;
+
+		default:
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
+			error = ENOPROTOOPT;
+		} /* end switch (sopt->sopt_name) */
+		if (*optsize < sizeof(val)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+		}
+	flags_out:
+		SCTP_INP_RUNLOCK(inp);
+		if (error == 0) {
+			/* return the option value */
+			*(int *)optval = val;
+			*optsize = sizeof(val);
+		}
+		break;
+        case SCTP_GET_PACKET_LOG:
+	{
+#ifdef  SCTP_PACKET_LOGGING
+		uint8_t *target;
+		int ret;
+
+		SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize);
+		ret = sctp_copy_out_packet_log(target , (int)*optsize);
+		*optsize = ret;
+#else
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+		error = EOPNOTSUPP;
+#endif
+		break;
+	}
+	case SCTP_REUSE_PORT:
+	{
+		uint32_t *value;
+
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
+			/* Can't do this for a 1-m socket */
+			error = EINVAL;
+			break;
+		}
+		SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+		*value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE);
+		*optsize = sizeof(uint32_t);
+		break;
+	}
+	case SCTP_PARTIAL_DELIVERY_POINT:
+	{
+		uint32_t *value;
+
+		SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+		*value = inp->partial_delivery_point;
+		*optsize = sizeof(uint32_t);
+		break;
+	}
+	case SCTP_FRAGMENT_INTERLEAVE:
+	{
+		uint32_t *value;
+
+		SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) {
+			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) {
+				*value = SCTP_FRAG_LEVEL_2;
+			} else {
+				*value = SCTP_FRAG_LEVEL_1;
+			}
+		} else {
+			*value = SCTP_FRAG_LEVEL_0;
+		}
+		*optsize = sizeof(uint32_t);
+		break;
+	}
+	case SCTP_INTERLEAVING_SUPPORTED:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			av->assoc_value = stcb->asoc.idata_supported;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				if (inp->idata_supported) {
+					av->assoc_value = 1;
+				} else {
+					av->assoc_value = 0;
+				}
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+		break;
+	}
+	case SCTP_CMT_ON_OFF:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+		if (stcb) {
+			av->assoc_value = stcb->asoc.sctp_cmt_on_off;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				av->assoc_value = inp->sctp_cmt_on_off;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+		break;
+	}
+	case SCTP_PLUGGABLE_CC:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+		if (stcb) {
+			av->assoc_value = stcb->asoc.congestion_control_module;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				av->assoc_value = inp->sctp_ep.sctp_default_cc_module;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+		break;
+	}
+	case SCTP_CC_OPTION:
+	{
+		struct sctp_cc_option *cc_opt;
+
+		SCTP_CHECK_AND_CAST(cc_opt, optval, struct sctp_cc_option, *optsize);
+		SCTP_FIND_STCB(inp, stcb, cc_opt->aid_value.assoc_id);
+		if (stcb == NULL) {
+			error = EINVAL;
+		} else {
+			if (stcb->asoc.cc_functions.sctp_cwnd_socket_option == NULL) {
+				error = ENOTSUP;
+			} else {
+				error = (*stcb->asoc.cc_functions.sctp_cwnd_socket_option)(stcb, 0, cc_opt);
+				*optsize = sizeof(struct sctp_cc_option);
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		}
+		break;
+	}
+	case SCTP_PLUGGABLE_SS:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+		if (stcb) {
+			av->assoc_value = stcb->asoc.stream_scheduling_module;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				av->assoc_value = inp->sctp_ep.sctp_default_ss_module;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+		break;
+	}
+	case SCTP_SS_VALUE:
+	{
+		struct sctp_stream_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_stream_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+		if (stcb) {
+			if ((av->stream_id >= stcb->asoc.streamoutcnt) ||
+			    (stcb->asoc.ss_functions.sctp_ss_get_value(stcb, &stcb->asoc, &stcb->asoc.strmout[av->stream_id],
+			                                               &av->stream_value) < 0)) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			} else {
+				*optsize = sizeof(struct sctp_stream_value);
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			/* Can't get stream value without association */
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+		}
+		break;
+	}
+	case SCTP_GET_ADDR_LEN:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+		error = EINVAL;
+#ifdef INET
+		if (av->assoc_value == AF_INET) {
+			av->assoc_value = sizeof(struct sockaddr_in);
+			error = 0;
+		}
+#endif
+#ifdef INET6
+		if (av->assoc_value == AF_INET6) {
+			av->assoc_value = sizeof(struct sockaddr_in6);
+			error = 0;
+		}
+#endif
+#if defined(__Userspace__)
+		if (av->assoc_value == AF_CONN) {
+			av->assoc_value = sizeof(struct sockaddr_conn);
+			error = 0;
+		}
+#endif
+		if (error) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+		} else {
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+		break;
+	}
+	case SCTP_GET_ASSOC_NUMBER:
+	{
+		uint32_t *value, cnt;
+
+		SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+		SCTP_INP_RLOCK(inp);
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+			/* Can't do this for a 1-1 socket */
+			error = EINVAL;
+			SCTP_INP_RUNLOCK(inp);
+			break;
+		}
+		cnt = 0;
+		LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+			cnt++;
+		}
+		SCTP_INP_RUNLOCK(inp);
+		*value = cnt;
+		*optsize = sizeof(uint32_t);
+		break;
+	}
+	case SCTP_GET_ASSOC_ID_LIST:
+	{
+		struct sctp_assoc_ids *ids;
+		uint32_t at;
+		size_t limit;
+
+		SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize);
+		SCTP_INP_RLOCK(inp);
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+			/* Can't do this for a 1-1 socket */
+			error = EINVAL;
+			SCTP_INP_RUNLOCK(inp);
+			break;
+		}
+		at = 0;
+		limit = (*optsize - sizeof(uint32_t)) / sizeof(sctp_assoc_t);
+		LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+			if (at < limit) {
+				ids->gaids_assoc_id[at++] = sctp_get_associd(stcb);
+				if (at == 0) {
+					error = EINVAL;
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+					break;
+				}
+			} else {
+				error = EINVAL;
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+				break;
+			}
+		}
+		SCTP_INP_RUNLOCK(inp);
+		if (error == 0) {
+			ids->gaids_number_of_ids = at;
+			*optsize = ((at * sizeof(sctp_assoc_t)) + sizeof(uint32_t));
+		}
+		break;
+	}
+	case SCTP_CONTEXT:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			av->assoc_value = stcb->asoc.context;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				av->assoc_value = inp->sctp_context;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+		break;
+	}
+	case SCTP_VRF_ID:
+	{
+		uint32_t *default_vrfid;
+
+		SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize);
+		*default_vrfid = inp->def_vrf_id;
+		*optsize = sizeof(uint32_t);
+		break;
+	}
+	case SCTP_GET_ASOC_VRF:
+	{
+		struct sctp_assoc_value *id;
+
+		SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, id->assoc_id);
+		if (stcb == NULL) {
+			error = EINVAL;
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+		} else {
+			id->assoc_value = stcb->asoc.vrf_id;
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+		break;
+	}
+	case SCTP_GET_VRF_IDS:
+	{
+#ifdef SCTP_MVRF
+		int siz_needed;
+		uint32_t *vrf_ids;
+
+		SCTP_CHECK_AND_CAST(vrf_ids, optval, uint32_t, *optsize);
+		siz_needed = inp->num_vrfs * sizeof(uint32_t);
+		if (*optsize < siz_needed) {
+			error = EINVAL;
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+		} else {
+			memcpy(vrf_ids, inp->m_vrf_ids, siz_needed);
+			*optsize = siz_needed;
+		}
+#else
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+		error = EOPNOTSUPP;
+#endif
+		break;
+	}
+	case SCTP_GET_NONCE_VALUES:
+	{
+		struct sctp_get_nonce_values *gnv;
+
+		SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize);
+		SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id);
+
+		if (stcb) {
+			gnv->gn_peers_tag = stcb->asoc.peer_vtag;
+			gnv->gn_local_tag = stcb->asoc.my_vtag;
+			SCTP_TCB_UNLOCK(stcb);
+			*optsize = sizeof(struct sctp_get_nonce_values);
+		} else {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
+			error = ENOTCONN;
+		}
+		break;
+	}
+	case SCTP_DELAYED_SACK:
+	{
+		struct sctp_sack_info *sack;
+
+		SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize);
+		SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
+		if (stcb) {
+			sack->sack_delay = stcb->asoc.delayed_ack;
+			sack->sack_freq = stcb->asoc.sack_freq;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (sack->sack_assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
+				sack->sack_freq = inp->sctp_ep.sctp_sack_freq;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_sack_info);
+		}
+		break;
+	}
+	case SCTP_GET_SNDBUF_USE:
+	{
+		struct sctp_sockstat *ss;
+
+		SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize);
+		SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id);
+
+		if (stcb) {
+			ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size;
+			ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue +
+						 stcb->asoc.size_on_all_streams);
+			SCTP_TCB_UNLOCK(stcb);
+			*optsize = sizeof(struct sctp_sockstat);
+		} else {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
+			error = ENOTCONN;
+		}
+		break;
+	}
+	case SCTP_MAX_BURST:
+	{
+#if defined(__FreeBSD__) && __FreeBSD_version < 900000
+		uint8_t *value;
+
+		SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize);
+
+		SCTP_INP_RLOCK(inp);
+		if (inp->sctp_ep.max_burst < 256) {
+			*value = inp->sctp_ep.max_burst;
+		} else {
+			*value = 255;
+		}
+		SCTP_INP_RUNLOCK(inp);
+		*optsize = sizeof(uint8_t);
+#else
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			av->assoc_value = stcb->asoc.max_burst;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				av->assoc_value = inp->sctp_ep.max_burst;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+#endif
+		break;
+	}
+	case SCTP_MAXSEG:
+	{
+		struct sctp_assoc_value *av;
+		int ovh;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc);
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+					ovh = SCTP_MED_OVERHEAD;
+				} else {
+					ovh = SCTP_MED_V4_OVERHEAD;
+				}
+				if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT)
+					av->assoc_value = 0;
+				else
+					av->assoc_value = inp->sctp_frag_point - ovh;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+		break;
+	}
+	case SCTP_GET_STAT_LOG:
+		error = sctp_fill_stat_log(optval, optsize);
+		break;
+	case SCTP_EVENTS:
+	{
+		struct sctp_event_subscribe *events;
+
+		SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize);
+		memset(events, 0, sizeof(struct sctp_event_subscribe));
+		SCTP_INP_RLOCK(inp);
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT))
+			events->sctp_data_io_event = 1;
+
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT))
+			events->sctp_association_event = 1;
+
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT))
+			events->sctp_address_event = 1;
+
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
+			events->sctp_send_failure_event = 1;
+
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR))
+			events->sctp_peer_error_event = 1;
+
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
+			events->sctp_shutdown_event = 1;
+
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT))
+			events->sctp_partial_delivery_event = 1;
+
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT))
+			events->sctp_adaptation_layer_event = 1;
+
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT))
+			events->sctp_authentication_event = 1;
+
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT))
+			events->sctp_sender_dry_event = 1;
+
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
+			events->sctp_stream_reset_event = 1;
+		SCTP_INP_RUNLOCK(inp);
+		*optsize = sizeof(struct sctp_event_subscribe);
+		break;
+	}
+	case SCTP_ADAPTATION_LAYER:
+	{
+		uint32_t *value;
+
+		SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+
+		SCTP_INP_RLOCK(inp);
+		*value = inp->sctp_ep.adaptation_layer_indicator;
+		SCTP_INP_RUNLOCK(inp);
+		*optsize = sizeof(uint32_t);
+		break;
+	}
+	case SCTP_SET_INITIAL_DBG_SEQ:
+	{
+		uint32_t *value;
+
+		SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+		SCTP_INP_RLOCK(inp);
+		*value = inp->sctp_ep.initial_sequence_debug;
+		SCTP_INP_RUNLOCK(inp);
+		*optsize = sizeof(uint32_t);
+		break;
+	}
+	case SCTP_GET_LOCAL_ADDR_SIZE:
+	{
+		uint32_t *value;
+
+		SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+		SCTP_INP_RLOCK(inp);
+		*value = sctp_count_max_addresses(inp);
+		SCTP_INP_RUNLOCK(inp);
+		*optsize = sizeof(uint32_t);
+		break;
+	}
+	case SCTP_GET_REMOTE_ADDR_SIZE:
+	{
+		uint32_t *value;
+		size_t size;
+		struct sctp_nets *net;
+
+		SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
+		/* FIXME MT: change to sctp_assoc_value? */
+		SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) *value);
+
+		if (stcb) {
+			size = 0;
+			/* Count the sizes */
+			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+				switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+				case AF_INET:
+#ifdef INET6
+					if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
+						size += sizeof(struct sockaddr_in6);
+					} else {
+						size += sizeof(struct sockaddr_in);
+					}
+#else
+					size += sizeof(struct sockaddr_in);
+#endif
+					break;
+#endif
+#ifdef INET6
+				case AF_INET6:
+					size += sizeof(struct sockaddr_in6);
+					break;
+#endif
+#if defined(__Userspace__)
+				case AF_CONN:
+					size += sizeof(struct sockaddr_conn);
+					break;
+#endif
+				default:
+					break;
+				}
+			}
+			SCTP_TCB_UNLOCK(stcb);
+			*value = (uint32_t) size;
+			*optsize = sizeof(uint32_t);
+		} else {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
+			error = ENOTCONN;
+		}
+		break;
+	}
+	case SCTP_GET_PEER_ADDRESSES:
+		/*
+		 * Get the address information, an array is passed in to
+		 * fill up we pack it.
+		 */
+	{
+		size_t cpsz, left;
+		struct sockaddr_storage *sas;
+		struct sctp_nets *net;
+		struct sctp_getaddresses *saddr;
+
+		SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
+		SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
+
+		if (stcb) {
+			left = (*optsize) - sizeof(struct sctp_getaddresses);
+			*optsize = sizeof(struct sctp_getaddresses);
+			sas = (struct sockaddr_storage *)&saddr->addr[0];
+
+			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+				switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+				case AF_INET:
+#ifdef INET6
+					if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
+						cpsz = sizeof(struct sockaddr_in6);
+					} else {
+						cpsz = sizeof(struct sockaddr_in);
+					}
+#else
+					cpsz = sizeof(struct sockaddr_in);
+#endif
+					break;
+#endif
+#ifdef INET6
+				case AF_INET6:
+					cpsz = sizeof(struct sockaddr_in6);
+					break;
+#endif
+#if defined(__Userspace__)
+				case AF_CONN:
+					cpsz = sizeof(struct sockaddr_conn);
+					break;
+#endif
+				default:
+					cpsz = 0;
+					break;
+				}
+				if (cpsz == 0) {
+					break;
+				}
+				if (left < cpsz) {
+					/* not enough room. */
+					break;
+				}
+#if defined(INET) && defined(INET6)
+				if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
+				    (net->ro._l_addr.sa.sa_family == AF_INET)) {
+					/* Must map the address */
+					in6_sin_2_v4mapsin6(&net->ro._l_addr.sin,
+							    (struct sockaddr_in6 *)sas);
+				} else {
+					memcpy(sas, &net->ro._l_addr, cpsz);
+				}
+#else
+				memcpy(sas, &net->ro._l_addr, cpsz);
+#endif
+				((struct sockaddr_in *)sas)->sin_port = stcb->rport;
+
+				sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz);
+				left -= cpsz;
+				*optsize += cpsz;
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+		        SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+			error = ENOENT;
+		}
+		break;
+	}
+	case SCTP_GET_LOCAL_ADDRESSES:
+	{
+		size_t limit, actual;
+		struct sockaddr_storage *sas;
+		struct sctp_getaddresses *saddr;
+
+		SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
+		SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
+
+		sas = (struct sockaddr_storage *)&saddr->addr[0];
+		limit = *optsize - sizeof(sctp_assoc_t);
+		actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
+		if (stcb) {
+			SCTP_TCB_UNLOCK(stcb);
+		}
+		*optsize = sizeof(struct sockaddr_storage) + actual;
+		break;
+	}
+	case SCTP_PEER_ADDR_PARAMS:
+	{
+		struct sctp_paddrparams *paddrp;
+		struct sctp_nets *net;
+		struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+		struct sockaddr_in sin_store;
+#endif
+
+		SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize);
+		SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
+
+#if defined(INET) && defined(INET6)
+		if (paddrp->spp_address.ss_family == AF_INET6) {
+			struct sockaddr_in6 *sin6;
+
+			sin6 = (struct sockaddr_in6 *)&paddrp->spp_address;
+			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+				in6_sin6_2_sin(&sin_store, sin6);
+				addr = (struct sockaddr *)&sin_store;
+			} else {
+				addr = (struct sockaddr *)&paddrp->spp_address;
+			}
+		} else {
+			addr = (struct sockaddr *)&paddrp->spp_address;
+		}
+#else
+		addr = (struct sockaddr *)&paddrp->spp_address;
+#endif
+		if (stcb != NULL) {
+			net = sctp_findnet(stcb, addr);
+		} else {
+			/* We increment here since sctp_findassociation_ep_addr() wil
+			 * do a decrement if it finds the stcb as long as the locked
+			 * tcb (last argument) is NOT a TCB.. aka NULL.
+			 */
+			net = NULL;
+			SCTP_INP_INCR_REF(inp);
+			stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL);
+			if (stcb == NULL) {
+				SCTP_INP_DECR_REF(inp);
+			}
+		}
+		if ((stcb != NULL) && (net == NULL)) {
+#ifdef INET
+			if (addr->sa_family == AF_INET) {
+				struct sockaddr_in *sin;
+
+				sin = (struct sockaddr_in *)addr;
+				if (sin->sin_addr.s_addr != INADDR_ANY) {
+					error = EINVAL;
+					SCTP_TCB_UNLOCK(stcb);
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+					break;
+				}
+			} else
+#endif
+#ifdef INET6
+			if (addr->sa_family == AF_INET6) {
+				struct sockaddr_in6 *sin6;
+
+				sin6 = (struct sockaddr_in6 *)addr;
+				if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+					error = EINVAL;
+					SCTP_TCB_UNLOCK(stcb);
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+					break;
+				}
+			} else
+#endif
+#if defined(__Userspace__)
+			if (addr->sa_family == AF_CONN) {
+				struct sockaddr_conn *sconn;
+
+				sconn = (struct sockaddr_conn *)addr;
+				if (sconn->sconn_addr != NULL) {
+					error = EINVAL;
+					SCTP_TCB_UNLOCK(stcb);
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+					break;
+				}
+			} else
+#endif
+			{
+				error = EAFNOSUPPORT;
+				SCTP_TCB_UNLOCK(stcb);
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+				break;
+			}
+		}
+
+		if (stcb != NULL) {
+			/* Applies to the specific association */
+			paddrp->spp_flags = 0;
+			if (net != NULL) {
+				paddrp->spp_hbinterval = net->heart_beat_delay;
+				paddrp->spp_pathmaxrxt = net->failure_threshold;
+				paddrp->spp_pathmtu = net->mtu;
+				switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+				case AF_INET:
+					paddrp->spp_pathmtu -= SCTP_MIN_V4_OVERHEAD;
+					break;
+#endif
+#ifdef INET6
+				case AF_INET6:
+					paddrp->spp_pathmtu -= SCTP_MIN_V4_OVERHEAD;
+					break;
+#endif
+				default:
+					break;
+				}
+				/* get flags for HB */
+				if (net->dest_state & SCTP_ADDR_NOHB) {
+					paddrp->spp_flags |= SPP_HB_DISABLE;
+				} else {
+					paddrp->spp_flags |= SPP_HB_ENABLE;
+				}
+				/* get flags for PMTU */
+				if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
+					paddrp->spp_flags |= SPP_PMTUD_DISABLE;
+				} else {
+					paddrp->spp_flags |= SPP_PMTUD_ENABLE;
+				}
+				if (net->dscp & 0x01) {
+					paddrp->spp_dscp = net->dscp & 0xfc;
+					paddrp->spp_flags |= SPP_DSCP;
+				}
+#ifdef INET6
+				if ((net->ro._l_addr.sa.sa_family == AF_INET6) &&
+				    (net->flowlabel & 0x80000000)) {
+					paddrp->spp_ipv6_flowlabel = net->flowlabel & 0x000fffff;
+					paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
+				}
+#endif
+			} else {
+				/*
+				 * No destination so return default
+				 * value
+				 */
+				paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
+				paddrp->spp_pathmtu = 0;
+				if (stcb->asoc.default_dscp & 0x01) {
+					paddrp->spp_dscp = stcb->asoc.default_dscp & 0xfc;
+					paddrp->spp_flags |= SPP_DSCP;
+				}
+#ifdef INET6
+				if (stcb->asoc.default_flowlabel & 0x80000000) {
+					paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel & 0x000fffff;
+					paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
+				}
+#endif
+				/* default settings should be these */
+				if (sctp_stcb_is_feature_on(inp, stcb, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) {
+					paddrp->spp_flags |= SPP_HB_DISABLE;
+				} else {
+					paddrp->spp_flags |= SPP_HB_ENABLE;
+				}
+				if (sctp_stcb_is_feature_on(inp, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD)) {
+					paddrp->spp_flags |= SPP_PMTUD_DISABLE;
+				} else {
+					paddrp->spp_flags |= SPP_PMTUD_ENABLE;
+				}
+				paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
+			}
+			paddrp->spp_assoc_id = sctp_get_associd(stcb);
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (paddrp->spp_assoc_id == SCTP_FUTURE_ASSOC)) {
+				/* Use endpoint defaults */
+				SCTP_INP_RLOCK(inp);
+				paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
+				paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
+				paddrp->spp_assoc_id = SCTP_FUTURE_ASSOC;
+				/* get inp's default */
+				if (inp->sctp_ep.default_dscp & 0x01) {
+					paddrp->spp_dscp = inp->sctp_ep.default_dscp & 0xfc;
+					paddrp->spp_flags |= SPP_DSCP;
+				}
+#ifdef INET6
+				if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+				    (inp->sctp_ep.default_flowlabel & 0x80000000)) {
+					paddrp->spp_ipv6_flowlabel = inp->sctp_ep.default_flowlabel & 0x000fffff;
+					paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
+				}
+#endif
+				/* can't return this */
+				paddrp->spp_pathmtu = 0;
+
+				if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) {
+					paddrp->spp_flags |= SPP_HB_ENABLE;
+				} else {
+					paddrp->spp_flags |= SPP_HB_DISABLE;
+				}
+				if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_NOT_PMTUD)) {
+					paddrp->spp_flags |= SPP_PMTUD_ENABLE;
+				} else {
+					paddrp->spp_flags |= SPP_PMTUD_DISABLE;
+				}
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_paddrparams);
+		}
+		break;
+	}
+	case SCTP_GET_PEER_ADDR_INFO:
+	{
+		struct sctp_paddrinfo *paddri;
+		struct sctp_nets *net;
+		struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+		struct sockaddr_in sin_store;
+#endif
+
+		SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize);
+		SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id);
+
+#if defined(INET) && defined(INET6)
+		if (paddri->spinfo_address.ss_family == AF_INET6) {
+			struct sockaddr_in6 *sin6;
+
+			sin6 = (struct sockaddr_in6 *)&paddri->spinfo_address;
+			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+				in6_sin6_2_sin(&sin_store, sin6);
+				addr = (struct sockaddr *)&sin_store;
+			} else {
+				addr = (struct sockaddr *)&paddri->spinfo_address;
+			}
+		} else {
+			addr = (struct sockaddr *)&paddri->spinfo_address;
+		}
+#else
+		addr = (struct sockaddr *)&paddri->spinfo_address;
+#endif
+		if (stcb != NULL) {
+			net = sctp_findnet(stcb, addr);
+		} else {
+			/* We increment here since sctp_findassociation_ep_addr() wil
+			 * do a decrement if it finds the stcb as long as the locked
+			 * tcb (last argument) is NOT a TCB.. aka NULL.
+			 */
+			net = NULL;
+			SCTP_INP_INCR_REF(inp);
+			stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL);
+			if (stcb == NULL) {
+				SCTP_INP_DECR_REF(inp);
+			}
+		}
+
+		if ((stcb != NULL) && (net != NULL)) {
+			if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
+				/* It's unconfirmed */
+				paddri->spinfo_state = SCTP_UNCONFIRMED;
+			} else if (net->dest_state & SCTP_ADDR_REACHABLE) {
+				/* It's active */
+				paddri->spinfo_state = SCTP_ACTIVE;
+			} else {
+				/* It's inactive */
+				paddri->spinfo_state = SCTP_INACTIVE;
+			}
+			paddri->spinfo_cwnd = net->cwnd;
+			paddri->spinfo_srtt = net->lastsa >> SCTP_RTT_SHIFT;
+			paddri->spinfo_rto = net->RTO;
+			paddri->spinfo_assoc_id = sctp_get_associd(stcb);
+			paddri->spinfo_mtu = net->mtu;
+			switch (addr->sa_family) {
+#if defined(INET)
+			case AF_INET:
+				paddri->spinfo_mtu -= SCTP_MIN_V4_OVERHEAD;
+				break;
+#endif
+#if defined(INET6)
+			case AF_INET6:
+				paddri->spinfo_mtu -= SCTP_MIN_OVERHEAD;
+				break;
+#endif
+			default:
+				break;
+			}
+			SCTP_TCB_UNLOCK(stcb);
+			*optsize = sizeof(struct sctp_paddrinfo);
+		} else {
+			if (stcb != NULL) {
+				SCTP_TCB_UNLOCK(stcb);
+			}
+		        SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+			error = ENOENT;
+		}
+		break;
+	}
+	case SCTP_PCB_STATUS:
+	{
+		struct sctp_pcbinfo *spcb;
+
+		SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize);
+		sctp_fill_pcbinfo(spcb);
+		*optsize = sizeof(struct sctp_pcbinfo);
+		break;
+	}
+	case SCTP_STATUS:
+	{
+		struct sctp_nets *net;
+		struct sctp_status *sstat;
+
+		SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize);
+		SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id);
+
+		if (stcb == NULL) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+		sstat->sstat_state = sctp_map_assoc_state(stcb->asoc.state);
+		sstat->sstat_assoc_id = sctp_get_associd(stcb);
+		sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
+		sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
+		/*
+		 * We can't include chunks that have been passed to
+		 * the socket layer. Only things in queue.
+		 */
+		sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue +
+					 stcb->asoc.cnt_on_all_streams);
+
+
+		sstat->sstat_instrms = stcb->asoc.streamincnt;
+		sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
+		sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
+#ifdef HAVE_SA_LEN
+		memcpy(&sstat->sstat_primary.spinfo_address,
+		       &stcb->asoc.primary_destination->ro._l_addr,
+		       ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len);
+#else
+		if (stcb->asoc.primary_destination->ro._l_addr.sa.sa_family == AF_INET) {
+			memcpy(&sstat->sstat_primary.spinfo_address,
+			       &stcb->asoc.primary_destination->ro._l_addr,
+			       sizeof(struct sockaddr_in));
+		} else {
+			memcpy(&sstat->sstat_primary.spinfo_address,
+			       &stcb->asoc.primary_destination->ro._l_addr,
+			       sizeof(struct sockaddr_in6));
+		}
+#endif
+		net = stcb->asoc.primary_destination;
+		((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
+		/*
+		 * Again the user can get info from sctp_constants.h
+		 * for what the state of the network is.
+		 */
+		if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
+			/* It's unconfirmed */
+			sstat->sstat_primary.spinfo_state = SCTP_UNCONFIRMED;
+		} else if (net->dest_state & SCTP_ADDR_REACHABLE) {
+			/* It's active */
+			sstat->sstat_primary.spinfo_state = SCTP_ACTIVE;
+		} else {
+			/* It's inactive */
+			sstat->sstat_primary.spinfo_state = SCTP_INACTIVE;
+		}
+		sstat->sstat_primary.spinfo_cwnd = net->cwnd;
+		sstat->sstat_primary.spinfo_srtt = net->lastsa >> SCTP_RTT_SHIFT;
+		sstat->sstat_primary.spinfo_rto = net->RTO;
+		sstat->sstat_primary.spinfo_mtu = net->mtu;
+		switch (stcb->asoc.primary_destination->ro._l_addr.sa.sa_family) {
+#if defined(INET)
+		case AF_INET:
+			sstat->sstat_primary.spinfo_mtu -= SCTP_MIN_V4_OVERHEAD;
+			break;
+#endif
+#if defined(INET6)
+		case AF_INET6:
+			sstat->sstat_primary.spinfo_mtu -= SCTP_MIN_OVERHEAD;
+			break;
+#endif
+		default:
+			break;
+		}
+		sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
+		SCTP_TCB_UNLOCK(stcb);
+		*optsize = sizeof(struct sctp_status);
+		break;
+	}
+	case SCTP_RTOINFO:
+	{
+		struct sctp_rtoinfo *srto;
+
+		SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize);
+		SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
+
+		if (stcb) {
+			srto->srto_initial = stcb->asoc.initial_rto;
+			srto->srto_max = stcb->asoc.maxrto;
+			srto->srto_min = stcb->asoc.minrto;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (srto->srto_assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				srto->srto_initial = inp->sctp_ep.initial_rto;
+				srto->srto_max = inp->sctp_ep.sctp_maxrto;
+				srto->srto_min = inp->sctp_ep.sctp_minrto;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_rtoinfo);
+		}
+		break;
+	}
+	case SCTP_TIMEOUTS:
+	{
+		struct sctp_timeouts *stimo;
+
+		SCTP_CHECK_AND_CAST(stimo, optval, struct sctp_timeouts, *optsize);
+		SCTP_FIND_STCB(inp, stcb, stimo->stimo_assoc_id);
+
+		if (stcb) {
+			stimo->stimo_init= stcb->asoc.timoinit;
+			stimo->stimo_data= stcb->asoc.timodata;
+			stimo->stimo_sack= stcb->asoc.timosack;
+			stimo->stimo_shutdown= stcb->asoc.timoshutdown;
+			stimo->stimo_heartbeat= stcb->asoc.timoheartbeat;
+			stimo->stimo_cookie= stcb->asoc.timocookie;
+			stimo->stimo_shutdownack= stcb->asoc.timoshutdownack;
+			SCTP_TCB_UNLOCK(stcb);
+			*optsize = sizeof(struct sctp_timeouts);
+		} else {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+		}
+		break;
+	}
+	case SCTP_ASSOCINFO:
+	{
+		struct sctp_assocparams *sasoc;
+
+		SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize);
+		SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
+
+		if (stcb) {
+			sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life);
+			sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
+			sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
+			sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
+			sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (sasoc->sasoc_assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life);
+				sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
+				sasoc->sasoc_number_peer_destinations = 0;
+				sasoc->sasoc_peer_rwnd = 0;
+				sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv);
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_assocparams);
+		}
+		break;
+	}
+	case SCTP_DEFAULT_SEND_PARAM:
+	{
+		struct sctp_sndrcvinfo *s_info;
+
+		SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize);
+		SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
+
+		if (stcb) {
+			memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send));
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (s_info->sinfo_assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				memcpy(s_info, &inp->def_send, sizeof(inp->def_send));
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_sndrcvinfo);
+		}
+		break;
+	}
+	case SCTP_INITMSG:
+	{
+		struct sctp_initmsg *sinit;
+
+		SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize);
+		SCTP_INP_RLOCK(inp);
+		sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
+		sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
+		sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
+		sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
+		SCTP_INP_RUNLOCK(inp);
+		*optsize = sizeof(struct sctp_initmsg);
+		break;
+	}
+	case SCTP_PRIMARY_ADDR:
+		/* we allow a "get" operation on this */
+	{
+		struct sctp_setprim *ssp;
+
+		SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize);
+		SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id);
+
+		if (stcb) {
+			union sctp_sockstore *addr;
+
+			addr = &stcb->asoc.primary_destination->ro._l_addr;
+			switch (addr->sa.sa_family) {
+#ifdef INET
+			case AF_INET:
+#ifdef INET6
+				if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
+					in6_sin_2_v4mapsin6(&addr->sin,
+					                    (struct sockaddr_in6 *)&ssp->ssp_addr);
+				} else {
+					memcpy(&ssp->ssp_addr, &addr->sin, sizeof(struct sockaddr_in));
+				}
+#else
+				memcpy(&ssp->ssp_addr, &addr->sin, sizeof(struct sockaddr_in));
+#endif
+				break;
+#endif
+#ifdef INET6
+			case AF_INET6:
+				memcpy(&ssp->ssp_addr, &addr->sin6, sizeof(struct sockaddr_in6));
+				break;
+#endif
+#if defined(__Userspace__)
+			case AF_CONN:
+				memcpy(&ssp->ssp_addr, &addr->sconn, sizeof(struct sockaddr_conn));
+				break;
+#endif
+			default:
+				break;
+			}
+			SCTP_TCB_UNLOCK(stcb);
+			*optsize = sizeof(struct sctp_setprim);
+		} else {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+		}
+		break;
+	}
+	case SCTP_HMAC_IDENT:
+	{
+		struct sctp_hmacalgo *shmac;
+		sctp_hmaclist_t *hmaclist;
+		uint32_t size;
+		int i;
+
+		SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize);
+
+		SCTP_INP_RLOCK(inp);
+		hmaclist = inp->sctp_ep.local_hmacs;
+		if (hmaclist == NULL) {
+			/* no HMACs to return */
+			*optsize = sizeof(*shmac);
+			SCTP_INP_RUNLOCK(inp);
+			break;
+		}
+		/* is there room for all of the hmac ids? */
+		size = sizeof(*shmac) + (hmaclist->num_algo *
+					 sizeof(shmac->shmac_idents[0]));
+		if ((size_t)(*optsize) < size) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			SCTP_INP_RUNLOCK(inp);
+			break;
+		}
+		/* copy in the list */
+		shmac->shmac_number_of_idents = hmaclist->num_algo;
+		for (i = 0; i < hmaclist->num_algo; i++) {
+			shmac->shmac_idents[i] = hmaclist->hmac[i];
+		}
+		SCTP_INP_RUNLOCK(inp);
+		*optsize = size;
+		break;
+	}
+	case SCTP_AUTH_ACTIVE_KEY:
+	{
+		struct sctp_authkeyid *scact;
+
+		SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize);
+		SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
+
+		if (stcb) {
+			/* get the active key on the assoc */
+			scact->scact_keynumber = stcb->asoc.authinfo.active_keyid;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (scact->scact_assoc_id == SCTP_FUTURE_ASSOC)) {
+				/* get the endpoint active key */
+				SCTP_INP_RLOCK(inp);
+				scact->scact_keynumber = inp->sctp_ep.default_keyid;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_authkeyid);
+		}
+		break;
+	}
+	case SCTP_LOCAL_AUTH_CHUNKS:
+	{
+		struct sctp_authchunks *sac;
+		sctp_auth_chklist_t *chklist = NULL;
+		size_t size = 0;
+
+		SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
+		SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
+
+		if (stcb) {
+			/* get off the assoc */
+			chklist = stcb->asoc.local_auth_chunks;
+			/* is there enough space? */
+			size = sctp_auth_get_chklist_size(chklist);
+			if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
+				error = EINVAL;
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+			} else {
+				/* copy in the chunks */
+				(void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
+				sac->gauth_number_of_chunks = (uint32_t)size;
+				*optsize = sizeof(struct sctp_authchunks) + size;
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (sac->gauth_assoc_id == SCTP_FUTURE_ASSOC)) {
+				/* get off the endpoint */
+				SCTP_INP_RLOCK(inp);
+				chklist = inp->sctp_ep.local_auth_chunks;
+				/* is there enough space? */
+				size = sctp_auth_get_chklist_size(chklist);
+				if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
+					error = EINVAL;
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+				} else {
+					/* copy in the chunks */
+					(void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
+					sac->gauth_number_of_chunks = (uint32_t)size;
+					*optsize = sizeof(struct sctp_authchunks) + size;
+				}
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		break;
+	}
+	case SCTP_PEER_AUTH_CHUNKS:
+	{
+		struct sctp_authchunks *sac;
+		sctp_auth_chklist_t *chklist = NULL;
+		size_t size = 0;
+
+		SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
+		SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
+
+		if (stcb) {
+			/* get off the assoc */
+			chklist = stcb->asoc.peer_auth_chunks;
+			/* is there enough space? */
+			size = sctp_auth_get_chklist_size(chklist);
+			if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
+				error = EINVAL;
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+			} else {
+				/* copy in the chunks */
+				(void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
+				sac->gauth_number_of_chunks = (uint32_t)size;
+				*optsize = sizeof(struct sctp_authchunks) + size;
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+		        SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+			error = ENOENT;
+		}
+		break;
+	}
+#if defined(HAVE_SCTP_PEELOFF_SOCKOPT)
+	case SCTP_PEELOFF:
+	{
+		struct sctp_peeloff_opt *peeloff;
+
+		SCTP_CHECK_AND_CAST(peeloff, optval, struct sctp_peeloff_opt, *optsize);
+		/* do the peeloff */
+		error = sctp_peeloff_option(p, peeloff);
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_peeloff_opt);
+		}
+	}
+	break;
+#endif /* HAVE_SCTP_PEELOFF_SOCKOPT */
+	case SCTP_EVENT:
+	{
+		struct sctp_event *event;
+		uint32_t event_type;
+
+		SCTP_CHECK_AND_CAST(event, optval, struct sctp_event, *optsize);
+		SCTP_FIND_STCB(inp, stcb, event->se_assoc_id);
+
+		switch (event->se_type) {
+		case SCTP_ASSOC_CHANGE:
+			event_type = SCTP_PCB_FLAGS_RECVASSOCEVNT;
+			break;
+		case SCTP_PEER_ADDR_CHANGE:
+			event_type = SCTP_PCB_FLAGS_RECVPADDREVNT;
+			break;
+		case SCTP_REMOTE_ERROR:
+			event_type = SCTP_PCB_FLAGS_RECVPEERERR;
+			break;
+		case SCTP_SEND_FAILED:
+			event_type = SCTP_PCB_FLAGS_RECVSENDFAILEVNT;
+			break;
+		case SCTP_SHUTDOWN_EVENT:
+			event_type = SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT;
+			break;
+		case SCTP_ADAPTATION_INDICATION:
+			event_type = SCTP_PCB_FLAGS_ADAPTATIONEVNT;
+			break;
+		case SCTP_PARTIAL_DELIVERY_EVENT:
+			event_type = SCTP_PCB_FLAGS_PDAPIEVNT;
+			break;
+		case SCTP_AUTHENTICATION_EVENT:
+			event_type = SCTP_PCB_FLAGS_AUTHEVNT;
+			break;
+		case SCTP_STREAM_RESET_EVENT:
+			event_type = SCTP_PCB_FLAGS_STREAM_RESETEVNT;
+			break;
+		case SCTP_SENDER_DRY_EVENT:
+			event_type = SCTP_PCB_FLAGS_DRYEVNT;
+			break;
+		case SCTP_NOTIFICATIONS_STOPPED_EVENT:
+			event_type = 0;
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTSUP);
+			error = ENOTSUP;
+			break;
+		case SCTP_ASSOC_RESET_EVENT:
+			event_type = SCTP_PCB_FLAGS_ASSOC_RESETEVNT;
+			break;
+		case SCTP_STREAM_CHANGE_EVENT:
+			event_type = SCTP_PCB_FLAGS_STREAM_CHANGEEVNT;
+			break;
+		case SCTP_SEND_FAILED_EVENT:
+			event_type = SCTP_PCB_FLAGS_RECVNSENDFAILEVNT;
+			break;
+		default:
+			event_type = 0;
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+		if (event_type > 0) {
+			if (stcb) {
+				event->se_on = sctp_stcb_is_feature_on(inp, stcb, event_type);
+				SCTP_TCB_UNLOCK(stcb);
+			} else {
+				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+				    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+				    (event->se_assoc_id == SCTP_FUTURE_ASSOC)) {
+					SCTP_INP_RLOCK(inp);
+					event->se_on = sctp_is_feature_on(inp, event_type);
+					SCTP_INP_RUNLOCK(inp);
+				} else {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					error = EINVAL;
+				}
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_event);
+		}
+		break;
+	}
+	case SCTP_RECVRCVINFO:
+	{
+		int onoff;
+
+		if (*optsize < sizeof(int)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+		} else {
+			SCTP_INP_RLOCK(inp);
+			onoff = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO);
+			SCTP_INP_RUNLOCK(inp);
+		}
+		if (error == 0) {
+			/* return the option value */
+			*(int *)optval = onoff;
+			*optsize = sizeof(int);
+		}
+		break;
+	}
+	case SCTP_RECVNXTINFO:
+	{
+		int onoff;
+
+		if (*optsize < sizeof(int)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+		} else {
+			SCTP_INP_RLOCK(inp);
+			onoff = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO);
+			SCTP_INP_RUNLOCK(inp);
+		}
+		if (error == 0) {
+			/* return the option value */
+			*(int *)optval = onoff;
+			*optsize = sizeof(int);
+		}
+		break;
+	}
+	case SCTP_DEFAULT_SNDINFO:
+	{
+		struct sctp_sndinfo *info;
+
+		SCTP_CHECK_AND_CAST(info, optval, struct sctp_sndinfo, *optsize);
+		SCTP_FIND_STCB(inp, stcb, info->snd_assoc_id);
+
+		if (stcb) {
+			info->snd_sid = stcb->asoc.def_send.sinfo_stream;
+			info->snd_flags = stcb->asoc.def_send.sinfo_flags;
+			info->snd_flags &= 0xfff0;
+			info->snd_ppid = stcb->asoc.def_send.sinfo_ppid;
+			info->snd_context = stcb->asoc.def_send.sinfo_context;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (info->snd_assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				info->snd_sid = inp->def_send.sinfo_stream;
+				info->snd_flags = inp->def_send.sinfo_flags;
+				info->snd_flags &= 0xfff0;
+				info->snd_ppid = inp->def_send.sinfo_ppid;
+				info->snd_context = inp->def_send.sinfo_context;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_sndinfo);
+		}
+		break;
+	}
+	case SCTP_DEFAULT_PRINFO:
+	{
+		struct sctp_default_prinfo *info;
+
+		SCTP_CHECK_AND_CAST(info, optval, struct sctp_default_prinfo, *optsize);
+		SCTP_FIND_STCB(inp, stcb, info->pr_assoc_id);
+
+		if (stcb) {
+			info->pr_policy = PR_SCTP_POLICY(stcb->asoc.def_send.sinfo_flags);
+			info->pr_value = stcb->asoc.def_send.sinfo_timetolive;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (info->pr_assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				info->pr_policy = PR_SCTP_POLICY(inp->def_send.sinfo_flags);
+				info->pr_value = inp->def_send.sinfo_timetolive;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_default_prinfo);
+		}
+		break;
+	}
+	case SCTP_PEER_ADDR_THLDS:
+	{
+		struct sctp_paddrthlds *thlds;
+		struct sctp_nets *net;
+		struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+		struct sockaddr_in sin_store;
+#endif
+
+		SCTP_CHECK_AND_CAST(thlds, optval, struct sctp_paddrthlds, *optsize);
+		SCTP_FIND_STCB(inp, stcb, thlds->spt_assoc_id);
+
+#if defined(INET) && defined(INET6)
+		if (thlds->spt_address.ss_family == AF_INET6) {
+			struct sockaddr_in6 *sin6;
+
+			sin6 = (struct sockaddr_in6 *)&thlds->spt_address;
+			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+				in6_sin6_2_sin(&sin_store, sin6);
+				addr = (struct sockaddr *)&sin_store;
+			} else {
+				addr = (struct sockaddr *)&thlds->spt_address;
+			}
+		} else {
+			addr = (struct sockaddr *)&thlds->spt_address;
+		}
+#else
+		addr = (struct sockaddr *)&thlds->spt_address;
+#endif
+		if (stcb != NULL) {
+			net = sctp_findnet(stcb, addr);
+		} else {
+			/* We increment here since sctp_findassociation_ep_addr() wil
+			 * do a decrement if it finds the stcb as long as the locked
+			 * tcb (last argument) is NOT a TCB.. aka NULL.
+			 */
+			net = NULL;
+			SCTP_INP_INCR_REF(inp);
+			stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL);
+			if (stcb == NULL) {
+				SCTP_INP_DECR_REF(inp);
+			}
+		}
+		if ((stcb != NULL) && (net == NULL)) {
+#ifdef INET
+			if (addr->sa_family == AF_INET) {
+				struct sockaddr_in *sin;
+
+				sin = (struct sockaddr_in *)addr;
+				if (sin->sin_addr.s_addr != INADDR_ANY) {
+					error = EINVAL;
+					SCTP_TCB_UNLOCK(stcb);
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+					break;
+				}
+			} else
+#endif
+#ifdef INET6
+			if (addr->sa_family == AF_INET6) {
+				struct sockaddr_in6 *sin6;
+
+				sin6 = (struct sockaddr_in6 *)addr;
+				if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+					error = EINVAL;
+					SCTP_TCB_UNLOCK(stcb);
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+					break;
+				}
+			} else
+#endif
+#if defined(__Userspace__)
+			if (addr->sa_family == AF_CONN) {
+				struct sockaddr_conn *sconn;
+
+				sconn = (struct sockaddr_conn *)addr;
+				if (sconn->sconn_addr != NULL) {
+					error = EINVAL;
+					SCTP_TCB_UNLOCK(stcb);
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+					break;
+				}
+			} else
+#endif
+			{
+				error = EAFNOSUPPORT;
+				SCTP_TCB_UNLOCK(stcb);
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+				break;
+			}
+		}
+
+		if (stcb != NULL) {
+			if (net != NULL) {
+				thlds->spt_pathmaxrxt = net->failure_threshold;
+				thlds->spt_pathpfthld = net->pf_threshold;
+				thlds->spt_pathcpthld = 0xffff;
+			} else {
+				thlds->spt_pathmaxrxt = stcb->asoc.def_net_failure;
+				thlds->spt_pathpfthld = stcb->asoc.def_net_pf_threshold;
+				thlds->spt_pathcpthld = 0xffff;
+			}
+			thlds->spt_assoc_id = sctp_get_associd(stcb);
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (thlds->spt_assoc_id == SCTP_FUTURE_ASSOC)) {
+				/* Use endpoint defaults */
+				SCTP_INP_RLOCK(inp);
+				thlds->spt_pathmaxrxt = inp->sctp_ep.def_net_failure;
+				thlds->spt_pathpfthld = inp->sctp_ep.def_net_pf_threshold;
+				thlds->spt_pathcpthld = 0xffff;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_paddrthlds);
+		}
+		break;
+	}
+	case SCTP_REMOTE_UDP_ENCAPS_PORT:
+	{
+		struct sctp_udpencaps *encaps;
+		struct sctp_nets *net;
+		struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+		struct sockaddr_in sin_store;
+#endif
+
+		SCTP_CHECK_AND_CAST(encaps, optval, struct sctp_udpencaps, *optsize);
+		SCTP_FIND_STCB(inp, stcb, encaps->sue_assoc_id);
+
+#if defined(INET) && defined(INET6)
+		if (encaps->sue_address.ss_family == AF_INET6) {
+			struct sockaddr_in6 *sin6;
+
+			sin6 = (struct sockaddr_in6 *)&encaps->sue_address;
+			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+				in6_sin6_2_sin(&sin_store, sin6);
+				addr = (struct sockaddr *)&sin_store;
+			} else {
+				addr = (struct sockaddr *)&encaps->sue_address;
+			}
+		} else {
+			addr = (struct sockaddr *)&encaps->sue_address;
+		}
+#else
+		addr = (struct sockaddr *)&encaps->sue_address;
+#endif
+		if (stcb) {
+			net = sctp_findnet(stcb, addr);
+		} else {
+			/* We increment here since sctp_findassociation_ep_addr() wil
+			 * do a decrement if it finds the stcb as long as the locked
+			 * tcb (last argument) is NOT a TCB.. aka NULL.
+			 */
+			net = NULL;
+			SCTP_INP_INCR_REF(inp);
+			stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL);
+			if (stcb == NULL) {
+				SCTP_INP_DECR_REF(inp);
+			}
+		}
+		if ((stcb != NULL) && (net == NULL)) {
+#ifdef INET
+			if (addr->sa_family == AF_INET) {
+				struct sockaddr_in *sin;
+
+				sin = (struct sockaddr_in *)addr;
+				if (sin->sin_addr.s_addr != INADDR_ANY) {
+					error = EINVAL;
+					SCTP_TCB_UNLOCK(stcb);
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+					break;
+				}
+			} else
+#endif
+#ifdef INET6
+			if (addr->sa_family == AF_INET6) {
+				struct sockaddr_in6 *sin6;
+
+				sin6 = (struct sockaddr_in6 *)addr;
+				if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+					error = EINVAL;
+					SCTP_TCB_UNLOCK(stcb);
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+					break;
+				}
+			} else
+#endif
+#if defined(__Userspace__)
+			if (addr->sa_family == AF_CONN) {
+				struct sockaddr_conn *sconn;
+
+				sconn = (struct sockaddr_conn *)addr;
+				if (sconn->sconn_addr != NULL) {
+					error = EINVAL;
+					SCTP_TCB_UNLOCK(stcb);
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+					break;
+				}
+			} else
+#endif
+			{
+				error = EAFNOSUPPORT;
+				SCTP_TCB_UNLOCK(stcb);
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+				break;
+			}
+		}
+
+		if (stcb != NULL) {
+			if (net) {
+				encaps->sue_port = net->port;
+			} else {
+				encaps->sue_port = stcb->asoc.port;
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (encaps->sue_assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				encaps->sue_port = inp->sctp_ep.port;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_udpencaps);
+		}
+		break;
+	}
+	case SCTP_ECN_SUPPORTED:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			av->assoc_value = stcb->asoc.ecn_supported;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				av->assoc_value = inp->ecn_supported;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+		break;
+	}
+	case SCTP_PR_SUPPORTED:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			av->assoc_value = stcb->asoc.prsctp_supported;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				av->assoc_value = inp->prsctp_supported;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+		break;
+	}
+	case SCTP_AUTH_SUPPORTED:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			av->assoc_value = stcb->asoc.auth_supported;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				av->assoc_value = inp->auth_supported;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+		break;
+	}
+	case SCTP_ASCONF_SUPPORTED:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			av->assoc_value = stcb->asoc.asconf_supported;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				av->assoc_value = inp->asconf_supported;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+		break;
+	}
+	case SCTP_RECONFIG_SUPPORTED:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			av->assoc_value = stcb->asoc.reconfig_supported;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				av->assoc_value = inp->reconfig_supported;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+		break;
+	}
+	case SCTP_NRSACK_SUPPORTED:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			av->assoc_value = stcb->asoc.nrsack_supported;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				av->assoc_value = inp->nrsack_supported;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+		break;
+	}
+	case SCTP_PKTDROP_SUPPORTED:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			av->assoc_value = stcb->asoc.pktdrop_supported;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				av->assoc_value = inp->pktdrop_supported;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+		break;
+	}
+	case SCTP_ENABLE_STREAM_RESET:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			av->assoc_value = (uint32_t)stcb->asoc.local_strreset_support;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				av->assoc_value = (uint32_t)inp->local_strreset_support;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+		break;
+	}
+	case SCTP_PR_STREAM_STATUS:
+	{
+		struct sctp_prstatus *sprstat;
+		uint16_t sid;
+		uint16_t policy;
+
+		SCTP_CHECK_AND_CAST(sprstat, optval, struct sctp_prstatus, *optsize);
+		SCTP_FIND_STCB(inp, stcb, sprstat->sprstat_assoc_id);
+
+		sid = sprstat->sprstat_sid;
+		policy = sprstat->sprstat_policy;
+#if defined(SCTP_DETAILED_STR_STATS)
+		if ((stcb != NULL) &&
+		    (sid < stcb->asoc.streamoutcnt) &&
+		    (policy != SCTP_PR_SCTP_NONE) &&
+		    ((policy <= SCTP_PR_SCTP_MAX) ||
+		     (policy == SCTP_PR_SCTP_ALL))) {
+			if (policy == SCTP_PR_SCTP_ALL) {
+				sprstat->sprstat_abandoned_unsent = stcb->asoc.strmout[sid].abandoned_unsent[0];
+				sprstat->sprstat_abandoned_sent = stcb->asoc.strmout[sid].abandoned_sent[0];
+			} else {
+				sprstat->sprstat_abandoned_unsent = stcb->asoc.strmout[sid].abandoned_unsent[policy];
+				sprstat->sprstat_abandoned_sent = stcb->asoc.strmout[sid].abandoned_sent[policy];
+			}
+#else
+		if ((stcb != NULL) &&
+		    (sid < stcb->asoc.streamoutcnt) &&
+		    (policy == SCTP_PR_SCTP_ALL)) {
+			sprstat->sprstat_abandoned_unsent = stcb->asoc.strmout[sid].abandoned_unsent[0];
+			sprstat->sprstat_abandoned_sent = stcb->asoc.strmout[sid].abandoned_sent[0];
+#endif
+			SCTP_TCB_UNLOCK(stcb);
+			*optsize = sizeof(struct sctp_prstatus);
+		} else {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+		}
+		break;
+	}
+	case SCTP_PR_ASSOC_STATUS:
+	{
+		struct sctp_prstatus *sprstat;
+		uint16_t policy;
+
+		SCTP_CHECK_AND_CAST(sprstat, optval, struct sctp_prstatus, *optsize);
+		SCTP_FIND_STCB(inp, stcb, sprstat->sprstat_assoc_id);
+
+		policy = sprstat->sprstat_policy;
+		if ((stcb != NULL) &&
+		    (policy != SCTP_PR_SCTP_NONE) &&
+		    ((policy <= SCTP_PR_SCTP_MAX) ||
+		     (policy == SCTP_PR_SCTP_ALL))) {
+			if (policy == SCTP_PR_SCTP_ALL) {
+				sprstat->sprstat_abandoned_unsent = stcb->asoc.abandoned_unsent[0];
+				sprstat->sprstat_abandoned_sent = stcb->asoc.abandoned_sent[0];
+			} else {
+				sprstat->sprstat_abandoned_unsent = stcb->asoc.abandoned_unsent[policy];
+				sprstat->sprstat_abandoned_sent = stcb->asoc.abandoned_sent[policy];
+			}
+			SCTP_TCB_UNLOCK(stcb);
+			*optsize = sizeof(struct sctp_prstatus);
+		} else {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+		}
+		break;
+	}
+	case SCTP_MAX_CWND:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			av->assoc_value = stcb->asoc.max_cwnd;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				av->assoc_value = inp->max_cwnd;
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		if (error == 0) {
+			*optsize = sizeof(struct sctp_assoc_value);
+		}
+		break;
+	}
+	default:
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
+		error = ENOPROTOOPT;
+		break;
+	} /* end switch (sopt->sopt_name) */
+	if (error) {
+		*optsize = 0;
+	}
+	return (error);
+}
+
+#if defined(__Panda__) || defined(__Userspace__)
+int
+#else
+static int
+#endif
+sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
+	    void *p)
+{
+	int error, set_opt;
+	uint32_t *mopt;
+	struct sctp_tcb *stcb = NULL;
+	struct sctp_inpcb *inp = NULL;
+	uint32_t vrf_id;
+
+	if (optval == NULL) {
+		SCTP_PRINTF("optval is NULL\n");
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (EINVAL);
+	}
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp == NULL) {
+		SCTP_PRINTF("inp is NULL?\n");
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (EINVAL);
+	}
+	vrf_id = inp->def_vrf_id;
+
+	error = 0;
+	switch (optname) {
+	case SCTP_NODELAY:
+	case SCTP_AUTOCLOSE:
+	case SCTP_AUTO_ASCONF:
+	case SCTP_EXPLICIT_EOR:
+	case SCTP_DISABLE_FRAGMENTS:
+	case SCTP_USE_EXT_RCVINFO:
+	case SCTP_I_WANT_MAPPED_V4_ADDR:
+		/* copy in the option value */
+		SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
+		set_opt = 0;
+		if (error)
+			break;
+		switch (optname) {
+		case SCTP_DISABLE_FRAGMENTS:
+			set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
+			break;
+		case SCTP_AUTO_ASCONF:
+			/*
+			 * NOTE: we don't really support this flag
+			 */
+			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+				/* only valid for bound all sockets */
+				if ((SCTP_BASE_SYSCTL(sctp_auto_asconf) == 0) &&
+				    (*mopt != 0)) {
+					/* forbidden by admin */
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPERM);
+					return (EPERM);
+				}
+				set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				return (EINVAL);
+			}
+			break;
+		case SCTP_EXPLICIT_EOR:
+			set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR;
+			break;
+		case SCTP_USE_EXT_RCVINFO:
+			set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO;
+			break;
+		case SCTP_I_WANT_MAPPED_V4_ADDR:
+			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+				set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				return (EINVAL);
+			}
+			break;
+		case SCTP_NODELAY:
+			set_opt = SCTP_PCB_FLAGS_NODELAY;
+			break;
+		case SCTP_AUTOCLOSE:
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				return (EINVAL);
+			}
+			set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
+			/*
+			 * The value is in ticks. Note this does not effect
+			 * old associations, only new ones.
+			 */
+			inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt);
+			break;
+		}
+		SCTP_INP_WLOCK(inp);
+		if (*mopt != 0) {
+			sctp_feature_on(inp, set_opt);
+		} else {
+			sctp_feature_off(inp, set_opt);
+		}
+		SCTP_INP_WUNLOCK(inp);
+		break;
+	case SCTP_REUSE_PORT:
+	{
+		SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND)  == 0) {
+			/* Can't set it after we are bound */
+			error = EINVAL;
+			break;
+		}
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
+			/* Can't do this for a 1-m socket */
+			error = EINVAL;
+			break;
+		}
+		if (optval)
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE);
+		else
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE);
+		break;
+	}
+	case SCTP_PARTIAL_DELIVERY_POINT:
+	{
+		uint32_t *value;
+
+		SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize);
+		if (*value > SCTP_SB_LIMIT_RCV(so)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+		inp->partial_delivery_point = *value;
+		break;
+	}
+	case SCTP_FRAGMENT_INTERLEAVE:
+		/* not yet until we re-write sctp_recvmsg() */
+	{
+		uint32_t *level;
+
+		SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize);
+		if (*level == SCTP_FRAG_LEVEL_2) {
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+		} else if (*level == SCTP_FRAG_LEVEL_1) {
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+		} else if (*level == SCTP_FRAG_LEVEL_0) {
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
+
+		} else {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+		}
+		break;
+	}
+	case SCTP_INTERLEAVING_SUPPORTED:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				if (av->assoc_value == 0) {
+					inp->idata_supported = 0;
+				} else {
+					if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))  &&
+					    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS))) {
+						inp->idata_supported = 1;
+					} else {
+						/* Must have Frag interleave and stream interleave on */
+						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+						error = EINVAL;
+					}
+				}
+				SCTP_INP_WUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		break;
+	}
+	case SCTP_CMT_ON_OFF:
+		if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
+			struct sctp_assoc_value *av;
+
+			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+			if (av->assoc_value > SCTP_CMT_MAX) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+				break;
+			}
+			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+			if (stcb) {
+				stcb->asoc.sctp_cmt_on_off = av->assoc_value;
+				SCTP_TCB_UNLOCK(stcb);
+			} else {
+				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+				    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+				    (av->assoc_id == SCTP_FUTURE_ASSOC) ||
+				    (av->assoc_id == SCTP_ALL_ASSOC)) {
+					SCTP_INP_WLOCK(inp);
+					inp->sctp_cmt_on_off = av->assoc_value;
+					SCTP_INP_WUNLOCK(inp);
+				}
+				if ((av->assoc_id == SCTP_CURRENT_ASSOC) ||
+				    (av->assoc_id == SCTP_ALL_ASSOC)) {
+					SCTP_INP_RLOCK(inp);
+					LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+						SCTP_TCB_LOCK(stcb);
+						stcb->asoc.sctp_cmt_on_off = av->assoc_value;
+						SCTP_TCB_UNLOCK(stcb);
+					}
+					SCTP_INP_RUNLOCK(inp);
+				}
+			}
+		} else {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
+			error = ENOPROTOOPT;
+		}
+		break;
+	case SCTP_PLUGGABLE_CC:
+	{
+		struct sctp_assoc_value *av;
+		struct sctp_nets *net;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+		if ((av->assoc_value != SCTP_CC_RFC2581) &&
+		    (av->assoc_value != SCTP_CC_HSTCP) &&
+		    (av->assoc_value != SCTP_CC_HTCP) &&
+		    (av->assoc_value != SCTP_CC_RTCC)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+		if (stcb) {
+			stcb->asoc.cc_functions = sctp_cc_functions[av->assoc_value];
+			stcb->asoc.congestion_control_module = av->assoc_value;
+			if (stcb->asoc.cc_functions.sctp_set_initial_cc_param != NULL) {
+				TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+					stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net);
+				}
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC) ||
+			    (av->assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				inp->sctp_ep.sctp_default_cc_module = av->assoc_value;
+				SCTP_INP_WUNLOCK(inp);
+			}
+			if ((av->assoc_id == SCTP_CURRENT_ASSOC) ||
+			    (av->assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+					SCTP_TCB_LOCK(stcb);
+					stcb->asoc.cc_functions = sctp_cc_functions[av->assoc_value];
+					stcb->asoc.congestion_control_module = av->assoc_value;
+					if (stcb->asoc.cc_functions.sctp_set_initial_cc_param != NULL) {
+						TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+							stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net);
+						}
+					}
+					SCTP_TCB_UNLOCK(stcb);
+				}
+				SCTP_INP_RUNLOCK(inp);
+			}
+		}
+		break;
+	}
+	case SCTP_CC_OPTION:
+	{
+		struct sctp_cc_option *cc_opt;
+
+		SCTP_CHECK_AND_CAST(cc_opt, optval, struct sctp_cc_option, optsize);
+		SCTP_FIND_STCB(inp, stcb, cc_opt->aid_value.assoc_id);
+		if (stcb == NULL) {
+			if (cc_opt->aid_value.assoc_id == SCTP_CURRENT_ASSOC) {
+				SCTP_INP_RLOCK(inp);
+				LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+					SCTP_TCB_LOCK(stcb);
+					if (stcb->asoc.cc_functions.sctp_cwnd_socket_option) {
+						(*stcb->asoc.cc_functions.sctp_cwnd_socket_option)(stcb, 1, cc_opt);
+					}
+					SCTP_TCB_UNLOCK(stcb);
+				}
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				error = EINVAL;
+			}
+		} else {
+			if (stcb->asoc.cc_functions.sctp_cwnd_socket_option == NULL) {
+				error = ENOTSUP;
+			} else {
+				error = (*stcb->asoc.cc_functions.sctp_cwnd_socket_option)(stcb, 1,
+											   cc_opt);
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		}
+		break;
+	}
+	case SCTP_PLUGGABLE_SS:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+		if ((av->assoc_value != SCTP_SS_DEFAULT) &&
+		    (av->assoc_value != SCTP_SS_ROUND_ROBIN) &&
+		    (av->assoc_value != SCTP_SS_ROUND_ROBIN_PACKET) &&
+		    (av->assoc_value != SCTP_SS_PRIORITY) &&
+		    (av->assoc_value != SCTP_SS_FAIR_BANDWITH) &&
+		    (av->assoc_value != SCTP_SS_FIRST_COME)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+		if (stcb) {
+			stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 1, 1);
+			stcb->asoc.ss_functions = sctp_ss_functions[av->assoc_value];
+			stcb->asoc.stream_scheduling_module = av->assoc_value;
+			stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC) ||
+			    (av->assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				inp->sctp_ep.sctp_default_ss_module = av->assoc_value;
+				SCTP_INP_WUNLOCK(inp);
+			}
+			if ((av->assoc_id == SCTP_CURRENT_ASSOC) ||
+			    (av->assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+					SCTP_TCB_LOCK(stcb);
+					stcb->asoc.ss_functions.sctp_ss_clear(stcb, &stcb->asoc, 1, 1);
+					stcb->asoc.ss_functions = sctp_ss_functions[av->assoc_value];
+					stcb->asoc.stream_scheduling_module = av->assoc_value;
+					stcb->asoc.ss_functions.sctp_ss_init(stcb, &stcb->asoc, 1);
+					SCTP_TCB_UNLOCK(stcb);
+				}
+				SCTP_INP_RUNLOCK(inp);
+			}
+		}
+		break;
+	}
+	case SCTP_SS_VALUE:
+	{
+		struct sctp_stream_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_stream_value, optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+		if (stcb) {
+			if ((av->stream_id >= stcb->asoc.streamoutcnt) ||
+			    (stcb->asoc.ss_functions.sctp_ss_set_value(stcb, &stcb->asoc, &stcb->asoc.strmout[av->stream_id],
+			                                               av->stream_value) < 0)) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if (av->assoc_id == SCTP_CURRENT_ASSOC) {
+				SCTP_INP_RLOCK(inp);
+				LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+					SCTP_TCB_LOCK(stcb);
+					if (av->stream_id < stcb->asoc.streamoutcnt) {
+						stcb->asoc.ss_functions.sctp_ss_set_value(stcb,
+						                                          &stcb->asoc,
+						                                          &stcb->asoc.strmout[av->stream_id],
+						                                          av->stream_value);
+					}
+					SCTP_TCB_UNLOCK(stcb);
+				}
+				SCTP_INP_RUNLOCK(inp);
+			} else {
+				/* Can't set stream value without association */
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		break;
+	}
+	case SCTP_CLR_STAT_LOG:
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+		error = EOPNOTSUPP;
+		break;
+	case SCTP_CONTEXT:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			stcb->asoc.context = av->assoc_value;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC) ||
+			    (av->assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				inp->sctp_context = av->assoc_value;
+				SCTP_INP_WUNLOCK(inp);
+			}
+			if ((av->assoc_id == SCTP_CURRENT_ASSOC) ||
+			    (av->assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+					SCTP_TCB_LOCK(stcb);
+					stcb->asoc.context = av->assoc_value;
+					SCTP_TCB_UNLOCK(stcb);
+				}
+				SCTP_INP_RUNLOCK(inp);
+			}
+		}
+		break;
+	}
+	case SCTP_VRF_ID:
+	{
+		uint32_t *default_vrfid;
+#ifdef SCTP_MVRF
+		int i;
+#endif
+		SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize);
+		if (*default_vrfid > SCTP_MAX_VRF_ID) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+#ifdef SCTP_MVRF
+		for (i = 0; i < inp->num_vrfs; i++) {
+			/* The VRF must be in the VRF list */
+			if (*default_vrfid == inp->m_vrf_ids[i]) {
+				SCTP_INP_WLOCK(inp);
+				inp->def_vrf_id = *default_vrfid;
+				SCTP_INP_WUNLOCK(inp);
+				goto sctp_done;
+			}
+		}
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		error = EINVAL;
+#else
+		inp->def_vrf_id = *default_vrfid;
+#endif
+#ifdef SCTP_MVRF
+	sctp_done:
+#endif
+		break;
+	}
+	case SCTP_DEL_VRF_ID:
+	{
+#ifdef SCTP_MVRF
+		uint32_t *del_vrfid;
+		int i, fnd = 0;
+
+		SCTP_CHECK_AND_CAST(del_vrfid, optval, uint32_t, optsize);
+		if (*del_vrfid > SCTP_MAX_VRF_ID) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+		if (inp->num_vrfs == 1) {
+			/* Can't delete last one */
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
+			/* Can't add more once you are bound */
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+		SCTP_INP_WLOCK(inp);
+		for (i = 0; i < inp->num_vrfs; i++) {
+			if (*del_vrfid == inp->m_vrf_ids[i]) {
+				fnd = 1;
+				break;
+			}
+		}
+		if (!fnd) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+		if (i != (inp->num_vrfs - 1)) {
+			/* Take bottom one and move to this slot */
+			inp->m_vrf_ids[i] = inp->m_vrf_ids[(inp->num_vrfs-1)];
+		}
+		if (*del_vrfid == inp->def_vrf_id) {
+			/* Take the first one as the new default */
+			inp->def_vrf_id = inp->m_vrf_ids[0];
+		}
+		/* Drop the number by one killing last one */
+		inp->num_vrfs--;
+#else
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+		error = EOPNOTSUPP;
+#endif
+		break;
+	}
+	case SCTP_ADD_VRF_ID:
+	{
+#ifdef SCTP_MVRF
+		uint32_t *add_vrfid;
+		int i;
+
+		SCTP_CHECK_AND_CAST(add_vrfid, optval, uint32_t, optsize);
+		if (*add_vrfid > SCTP_MAX_VRF_ID) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
+			/* Can't add more once you are bound */
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+		SCTP_INP_WLOCK(inp);
+		/* Verify its not already here */
+		for (i = 0; i < inp->num_vrfs; i++) {
+			if (*add_vrfid == inp->m_vrf_ids[i]) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+				error = EALREADY;
+				SCTP_INP_WUNLOCK(inp);
+				break;
+			}
+		}
+		if ((inp->num_vrfs + 1) > inp->vrf_size) {
+			/* need to grow array */
+			uint32_t *tarray;
+			SCTP_MALLOC(tarray, uint32_t *,
+				    (sizeof(uint32_t) * (inp->vrf_size + SCTP_DEFAULT_VRF_SIZE)),
+				    SCTP_M_MVRF);
+			if (tarray == NULL) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+				error = ENOMEM;
+				SCTP_INP_WUNLOCK(inp);
+				break;
+			}
+			memcpy(tarray, inp->m_vrf_ids, (sizeof(uint32_t) * inp->vrf_size));
+			SCTP_FREE(inp->m_vrf_ids, SCTP_M_MVRF);
+			inp->m_vrf_ids = tarray;
+			inp->vrf_size += SCTP_DEFAULT_VRF_SIZE;
+		}
+		inp->m_vrf_ids[inp->num_vrfs] = *add_vrfid;
+		inp->num_vrfs++;
+		SCTP_INP_WUNLOCK(inp);
+#else
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+		error = EOPNOTSUPP;
+#endif
+		break;
+	}
+	case SCTP_DELAYED_SACK:
+	{
+		struct sctp_sack_info *sack;
+
+		SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize);
+		SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
+		if (sack->sack_delay) {
+			if (sack->sack_delay > SCTP_MAX_SACK_DELAY)
+				sack->sack_delay = SCTP_MAX_SACK_DELAY;
+			if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
+				sack->sack_delay = TICKS_TO_MSEC(1);
+			}
+		}
+		if (stcb) {
+			if (sack->sack_delay) {
+				stcb->asoc.delayed_ack = sack->sack_delay;
+			}
+			if (sack->sack_freq) {
+				stcb->asoc.sack_freq = sack->sack_freq;
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (sack->sack_assoc_id == SCTP_FUTURE_ASSOC) ||
+			    (sack->sack_assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				if (sack->sack_delay) {
+					inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay);
+				}
+				if (sack->sack_freq) {
+					inp->sctp_ep.sctp_sack_freq = sack->sack_freq;
+				}
+				SCTP_INP_WUNLOCK(inp);
+			}
+			if ((sack->sack_assoc_id == SCTP_CURRENT_ASSOC) ||
+			    (sack->sack_assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+					SCTP_TCB_LOCK(stcb);
+					if (sack->sack_delay) {
+						stcb->asoc.delayed_ack = sack->sack_delay;
+					}
+					if (sack->sack_freq) {
+						stcb->asoc.sack_freq = sack->sack_freq;
+					}
+					SCTP_TCB_UNLOCK(stcb);
+				}
+				SCTP_INP_RUNLOCK(inp);
+			}
+		}
+		break;
+	}
+	case SCTP_AUTH_CHUNK:
+	{
+		struct sctp_authchunk *sauth;
+
+		SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize);
+
+		SCTP_INP_WLOCK(inp);
+		if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+		}
+		SCTP_INP_WUNLOCK(inp);
+		break;
+	}
+	case SCTP_AUTH_KEY:
+	{
+		struct sctp_authkey *sca;
+		struct sctp_keyhead *shared_keys;
+		sctp_sharedkey_t *shared_key;
+		sctp_key_t *key = NULL;
+		size_t size;
+
+		SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize);
+		if (sca->sca_keylength == 0) {
+			size = optsize - sizeof(struct sctp_authkey);
+		} else {
+		        if (sca->sca_keylength + sizeof(struct sctp_authkey) <= optsize) {
+				size = sca->sca_keylength;
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+				break;
+			}
+		}
+		SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id);
+
+		if (stcb) {
+			shared_keys = &stcb->asoc.shared_keys;
+			/* clear the cached keys for this key id */
+			sctp_clear_cachedkeys(stcb, sca->sca_keynumber);
+			/*
+			 * create the new shared key and
+			 * insert/replace it
+			 */
+			if (size > 0) {
+				key = sctp_set_key(sca->sca_key, (uint32_t) size);
+				if (key == NULL) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+					error = ENOMEM;
+					SCTP_TCB_UNLOCK(stcb);
+					break;
+				}
+			}
+			shared_key = sctp_alloc_sharedkey();
+			if (shared_key == NULL) {
+				sctp_free_key(key);
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+				error = ENOMEM;
+				SCTP_TCB_UNLOCK(stcb);
+				break;
+			}
+			shared_key->key = key;
+			shared_key->keyid = sca->sca_keynumber;
+			error = sctp_insert_sharedkey(shared_keys, shared_key);
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (sca->sca_assoc_id == SCTP_FUTURE_ASSOC) ||
+			    (sca->sca_assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				shared_keys = &inp->sctp_ep.shared_keys;
+				/*
+				 * clear the cached keys on all assocs for
+				 * this key id
+				 */
+				sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber);
+				/*
+				 * create the new shared key and
+				 * insert/replace it
+				 */
+				if (size > 0) {
+					key = sctp_set_key(sca->sca_key, (uint32_t) size);
+					if (key == NULL) {
+						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+						error = ENOMEM;
+						SCTP_INP_WUNLOCK(inp);
+						break;
+					}
+				}
+				shared_key = sctp_alloc_sharedkey();
+				if (shared_key == NULL) {
+					sctp_free_key(key);
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+					error = ENOMEM;
+					SCTP_INP_WUNLOCK(inp);
+					break;
+				}
+				shared_key->key = key;
+				shared_key->keyid = sca->sca_keynumber;
+				error = sctp_insert_sharedkey(shared_keys, shared_key);
+				SCTP_INP_WUNLOCK(inp);
+			}
+			if ((sca->sca_assoc_id == SCTP_CURRENT_ASSOC) ||
+			    (sca->sca_assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+					SCTP_TCB_LOCK(stcb);
+					shared_keys = &stcb->asoc.shared_keys;
+					/* clear the cached keys for this key id */
+					sctp_clear_cachedkeys(stcb, sca->sca_keynumber);
+					/*
+					 * create the new shared key and
+					 * insert/replace it
+					 */
+					if (size > 0) {
+						key = sctp_set_key(sca->sca_key, (uint32_t) size);
+						if (key == NULL) {
+							SCTP_TCB_UNLOCK(stcb);
+							continue;
+						}
+					}
+					shared_key = sctp_alloc_sharedkey();
+					if (shared_key == NULL) {
+						sctp_free_key(key);
+						SCTP_TCB_UNLOCK(stcb);
+						continue;
+					}
+					shared_key->key = key;
+					shared_key->keyid = sca->sca_keynumber;
+					error = sctp_insert_sharedkey(shared_keys, shared_key);
+					SCTP_TCB_UNLOCK(stcb);
+				}
+				SCTP_INP_RUNLOCK(inp);
+			}
+		}
+		break;
+	}
+	case SCTP_HMAC_IDENT:
+	{
+		struct sctp_hmacalgo *shmac;
+		sctp_hmaclist_t *hmaclist;
+		uint16_t hmacid;
+		uint32_t i;
+
+		SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize);
+		if ((optsize < sizeof(struct sctp_hmacalgo) + shmac->shmac_number_of_idents * sizeof(uint16_t)) ||
+		    (shmac->shmac_number_of_idents > 0xffff)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+
+		hmaclist = sctp_alloc_hmaclist((uint16_t)shmac->shmac_number_of_idents);
+		if (hmaclist == NULL) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+			error = ENOMEM;
+			break;
+		}
+		for (i = 0; i < shmac->shmac_number_of_idents; i++) {
+			hmacid = shmac->shmac_idents[i];
+			if (sctp_auth_add_hmacid(hmaclist, hmacid)) {
+				/* invalid HMACs were found */;
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+				sctp_free_hmaclist(hmaclist);
+				goto sctp_set_hmac_done;
+			}
+		}
+		for (i = 0; i < hmaclist->num_algo; i++) {
+			if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) {
+				/* already in list */
+				break;
+			}
+		}
+		if (i == hmaclist->num_algo) {
+			/* not found in list */
+			sctp_free_hmaclist(hmaclist);
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+		/* set it on the endpoint */
+		SCTP_INP_WLOCK(inp);
+		if (inp->sctp_ep.local_hmacs)
+			sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
+		inp->sctp_ep.local_hmacs = hmaclist;
+		SCTP_INP_WUNLOCK(inp);
+	sctp_set_hmac_done:
+		break;
+	}
+	case SCTP_AUTH_ACTIVE_KEY:
+	{
+		struct sctp_authkeyid *scact;
+
+		SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, optsize);
+		SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
+
+		/* set the active key on the right place */
+		if (stcb) {
+			/* set the active key on the assoc */
+			if (sctp_auth_setactivekey(stcb,
+						   scact->scact_keynumber)) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
+						    SCTP_FROM_SCTP_USRREQ,
+						    EINVAL);
+				error = EINVAL;
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (scact->scact_assoc_id == SCTP_FUTURE_ASSOC) ||
+			    (scact->scact_assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				if (sctp_auth_setactivekey_ep(inp, scact->scact_keynumber)) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					error = EINVAL;
+				}
+				SCTP_INP_WUNLOCK(inp);
+			}
+			if ((scact->scact_assoc_id == SCTP_CURRENT_ASSOC) ||
+			    (scact->scact_assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+					SCTP_TCB_LOCK(stcb);
+					sctp_auth_setactivekey(stcb, scact->scact_keynumber);
+					SCTP_TCB_UNLOCK(stcb);
+				}
+				SCTP_INP_RUNLOCK(inp);
+			}
+		}
+		break;
+	}
+	case SCTP_AUTH_DELETE_KEY:
+	{
+		struct sctp_authkeyid *scdel;
+
+		SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, optsize);
+		SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id);
+
+		/* delete the key from the right place */
+		if (stcb) {
+			if (sctp_delete_sharedkey(stcb, scdel->scact_keynumber)) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (scdel->scact_assoc_id == SCTP_FUTURE_ASSOC) ||
+			    (scdel->scact_assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				if (sctp_delete_sharedkey_ep(inp, scdel->scact_keynumber)) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					error = EINVAL;
+				}
+				SCTP_INP_WUNLOCK(inp);
+			}
+			if ((scdel->scact_assoc_id == SCTP_CURRENT_ASSOC) ||
+			    (scdel->scact_assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+					SCTP_TCB_LOCK(stcb);
+					sctp_delete_sharedkey(stcb, scdel->scact_keynumber);
+					SCTP_TCB_UNLOCK(stcb);
+				}
+				SCTP_INP_RUNLOCK(inp);
+			}
+		}
+		break;
+	}
+	case SCTP_AUTH_DEACTIVATE_KEY:
+	{
+		struct sctp_authkeyid *keyid;
+
+		SCTP_CHECK_AND_CAST(keyid, optval, struct sctp_authkeyid, optsize);
+		SCTP_FIND_STCB(inp, stcb, keyid->scact_assoc_id);
+
+		/* deactivate the key from the right place */
+		if (stcb) {
+			if (sctp_deact_sharedkey(stcb, keyid->scact_keynumber)) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (keyid->scact_assoc_id == SCTP_FUTURE_ASSOC) ||
+			    (keyid->scact_assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				if (sctp_deact_sharedkey_ep(inp, keyid->scact_keynumber)) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					error = EINVAL;
+				}
+				SCTP_INP_WUNLOCK(inp);
+			}
+			if ((keyid->scact_assoc_id == SCTP_CURRENT_ASSOC) ||
+			    (keyid->scact_assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+					SCTP_TCB_LOCK(stcb);
+					sctp_deact_sharedkey(stcb, keyid->scact_keynumber);
+					SCTP_TCB_UNLOCK(stcb);
+				}
+				SCTP_INP_RUNLOCK(inp);
+			}
+		}
+		break;
+	}
+	case SCTP_ENABLE_STREAM_RESET:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+		if (av->assoc_value & (~SCTP_ENABLE_VALUE_MASK)) {
+		        SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+		if (stcb) {
+			stcb->asoc.local_strreset_support = (uint8_t)av->assoc_value;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC) ||
+			    (av->assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				inp->local_strreset_support = (uint8_t)av->assoc_value;
+				SCTP_INP_WUNLOCK(inp);
+			}
+			if ((av->assoc_id == SCTP_CURRENT_ASSOC) ||
+			    (av->assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+					SCTP_TCB_LOCK(stcb);
+					stcb->asoc.local_strreset_support = (uint8_t)av->assoc_value;
+					SCTP_TCB_UNLOCK(stcb);
+				}
+				SCTP_INP_RUNLOCK(inp);
+			}
+
+		}
+		break;
+	}
+	case SCTP_RESET_STREAMS:
+	{
+		struct sctp_reset_streams *strrst;
+		int i, send_out = 0;
+		int send_in = 0;
+
+		SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_reset_streams, optsize);
+		SCTP_FIND_STCB(inp, stcb, strrst->srs_assoc_id);
+		if (stcb == NULL) {
+		        SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+			error = ENOENT;
+			break;
+		}
+		if (stcb->asoc.reconfig_supported == 0) {
+			/*
+			 * Peer does not support the chunk type.
+			 */
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+			error = EOPNOTSUPP;
+			SCTP_TCB_UNLOCK(stcb);
+			break;
+		}
+		if (sizeof(struct sctp_reset_streams) +
+		    strrst->srs_number_streams * sizeof(uint16_t) > optsize) {
+			error = EINVAL;
+			SCTP_TCB_UNLOCK(stcb);
+			break;
+		}
+		if (strrst->srs_flags & SCTP_STREAM_RESET_INCOMING) {
+			send_in = 1;
+			if (stcb->asoc.stream_reset_outstanding) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+				error = EALREADY;
+				SCTP_TCB_UNLOCK(stcb);
+				break;
+			}
+		}
+		if (strrst->srs_flags & SCTP_STREAM_RESET_OUTGOING) {
+			send_out = 1;
+		}
+		if ((strrst->srs_number_streams > SCTP_MAX_STREAMS_AT_ONCE_RESET) && send_in) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
+			error = ENOMEM;
+			SCTP_TCB_UNLOCK(stcb);
+			break;
+		}
+		if ((send_in == 0) && (send_out == 0)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			SCTP_TCB_UNLOCK(stcb);
+			break;
+		}
+		for (i = 0; i < strrst->srs_number_streams; i++) {
+			if ((send_in) &&
+			    (strrst->srs_stream_list[i] > stcb->asoc.streamincnt)) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+				break;
+			}
+			if ((send_out) &&
+			    (strrst->srs_stream_list[i] > stcb->asoc.streamoutcnt)) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+				break;
+			}
+		}
+		if (error) {
+			SCTP_TCB_UNLOCK(stcb);
+			break;
+		}
+		if (send_out) {
+			int cnt;
+			uint16_t strm;
+			if (strrst->srs_number_streams) {
+				for (i = 0, cnt = 0; i < strrst->srs_number_streams; i++) {
+					strm = strrst->srs_stream_list[i];
+					if (stcb->asoc.strmout[strm].state == SCTP_STREAM_OPEN) {
+						stcb->asoc.strmout[strm].state = SCTP_STREAM_RESET_PENDING;
+						cnt++;
+					}
+				}
+			} else {
+				/* Its all */
+				for (i = 0, cnt = 0; i < stcb->asoc.streamoutcnt; i++) {
+					if (stcb->asoc.strmout[i].state == SCTP_STREAM_OPEN) {
+						stcb->asoc.strmout[i].state = SCTP_STREAM_RESET_PENDING;
+						cnt++;
+					}
+				}
+			}
+		}
+		if (send_in) {
+			error = sctp_send_str_reset_req(stcb, strrst->srs_number_streams,
+							strrst->srs_stream_list,
+							send_in, 0, 0, 0, 0, 0);
+		} else {
+			error = sctp_send_stream_reset_out_if_possible(stcb, SCTP_SO_LOCKED);
+		}
+		if (error == 0) {
+			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED);
+		} else {
+			 /*
+			  * For outgoing streams don't report any problems in
+			  * sending the request to the application.
+			  * XXX: Double check resetting incoming streams.
+			  */
+			error = 0;
+		}
+		SCTP_TCB_UNLOCK(stcb);
+		break;
+	}
+	case SCTP_ADD_STREAMS:
+	{
+		struct sctp_add_streams *stradd;
+		uint8_t addstream = 0;
+		uint16_t add_o_strmcnt = 0;
+		uint16_t add_i_strmcnt = 0;
+
+		SCTP_CHECK_AND_CAST(stradd, optval, struct sctp_add_streams, optsize);
+		SCTP_FIND_STCB(inp, stcb, stradd->sas_assoc_id);
+		if (stcb == NULL) {
+		        SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+			error = ENOENT;
+			break;
+		}
+		if (stcb->asoc.reconfig_supported == 0) {
+			/*
+			 * Peer does not support the chunk type.
+			 */
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+			error = EOPNOTSUPP;
+			SCTP_TCB_UNLOCK(stcb);
+			break;
+		}
+		if (stcb->asoc.stream_reset_outstanding) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+			error = EALREADY;
+			SCTP_TCB_UNLOCK(stcb);
+			break;
+		}
+		if ((stradd->sas_outstrms == 0) &&
+		    (stradd->sas_instrms == 0)) {
+			error = EINVAL;
+			goto skip_stuff;
+		}
+		if (stradd->sas_outstrms) {
+			addstream = 1;
+			/* We allocate here */
+			add_o_strmcnt = stradd->sas_outstrms;
+			if ((((int)add_o_strmcnt) + ((int)stcb->asoc.streamoutcnt)) > 0x0000ffff) {
+				/* You can't have more than 64k */
+				error = EINVAL;
+				goto skip_stuff;
+			}
+		}
+		if (stradd->sas_instrms) {
+			int cnt;
+
+			addstream |= 2;
+			/* We allocate inside sctp_send_str_reset_req() */
+			add_i_strmcnt = stradd->sas_instrms;
+			cnt = add_i_strmcnt;
+			cnt += stcb->asoc.streamincnt;
+			if (cnt > 0x0000ffff) {
+				/* You can't have more than 64k */
+				error = EINVAL;
+				goto skip_stuff;
+			}
+			if (cnt > (int)stcb->asoc.max_inbound_streams) {
+				/* More than you are allowed */
+				error = EINVAL;
+				goto skip_stuff;
+			}
+		}
+		error = sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, addstream, add_o_strmcnt, add_i_strmcnt, 0);
+		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED);
+	skip_stuff:
+		SCTP_TCB_UNLOCK(stcb);
+		break;
+	}
+	case SCTP_RESET_ASSOC:
+	{
+		int i;
+		uint32_t *value;
+
+		SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize);
+		SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) *value);
+		if (stcb == NULL) {
+		        SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+			error = ENOENT;
+			break;
+		}
+		if (stcb->asoc.reconfig_supported == 0) {
+			/*
+			 * Peer does not support the chunk type.
+			 */
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+			error = EOPNOTSUPP;
+			SCTP_TCB_UNLOCK(stcb);
+			break;
+		}
+		if (stcb->asoc.stream_reset_outstanding) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+			error = EALREADY;
+			SCTP_TCB_UNLOCK(stcb);
+			break;
+		}
+		/* Is there any data pending in the send or sent queues? */
+		if (!TAILQ_EMPTY(&stcb->asoc.send_queue) ||
+		    !TAILQ_EMPTY(&stcb->asoc.sent_queue)) {
+		busy_out:
+			error = EBUSY;
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+			SCTP_TCB_UNLOCK(stcb);
+			break;
+		}
+		/* Do any streams have data queued? */
+		for ( i = 0; i< stcb->asoc.streamoutcnt; i++) {
+			if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
+				goto busy_out;
+			}
+		}
+		error = sctp_send_str_reset_req(stcb, 0, NULL, 0, 1, 0, 0, 0, 0);
+		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED);
+		SCTP_TCB_UNLOCK(stcb);
+		break;
+	}
+	case SCTP_CONNECT_X:
+		if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+		error = sctp_do_connect_x(so, inp, optval, optsize, p, 0);
+		break;
+	case SCTP_CONNECT_X_DELAYED:
+		if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+		error = sctp_do_connect_x(so, inp, optval, optsize, p, 1);
+		break;
+	case SCTP_CONNECT_X_COMPLETE:
+	{
+		struct sockaddr *sa;
+
+		/* FIXME MT: check correct? */
+		SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize);
+
+		/* find tcb */
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+			SCTP_INP_RLOCK(inp);
+			stcb = LIST_FIRST(&inp->sctp_asoc_list);
+			if (stcb) {
+				SCTP_TCB_LOCK(stcb);
+			}
+			SCTP_INP_RUNLOCK(inp);
+		} else {
+			/* We increment here since sctp_findassociation_ep_addr() wil
+			 * do a decrement if it finds the stcb as long as the locked
+			 * tcb (last argument) is NOT a TCB.. aka NULL.
+			 */
+			SCTP_INP_INCR_REF(inp);
+			stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
+			if (stcb == NULL) {
+				SCTP_INP_DECR_REF(inp);
+			}
+		}
+
+		if (stcb == NULL) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+			error = ENOENT;
+			break;
+		}
+		if (stcb->asoc.delayed_connection == 1) {
+			stcb->asoc.delayed_connection = 0;
+			(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+			sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb,
+					stcb->asoc.primary_destination,
+					SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
+			sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+		} else {
+			/*
+			 * already expired or did not use delayed
+			 * connectx
+			 */
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+			error = EALREADY;
+		}
+		SCTP_TCB_UNLOCK(stcb);
+		break;
+	}
+	case SCTP_MAX_BURST:
+	{
+#if defined(__FreeBSD__) && __FreeBSD_version < 900000
+		uint8_t *burst;
+
+		SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize);
+
+		SCTP_INP_WLOCK(inp);
+		inp->sctp_ep.max_burst = *burst;
+		SCTP_INP_WUNLOCK(inp);
+#else
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			stcb->asoc.max_burst = av->assoc_value;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC) ||
+			    (av->assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				inp->sctp_ep.max_burst = av->assoc_value;
+				SCTP_INP_WUNLOCK(inp);
+			}
+			if ((av->assoc_id == SCTP_CURRENT_ASSOC) ||
+			    (av->assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+					SCTP_TCB_LOCK(stcb);
+					stcb->asoc.max_burst = av->assoc_value;
+					SCTP_TCB_UNLOCK(stcb);
+				}
+				SCTP_INP_RUNLOCK(inp);
+			}
+		}
+#endif
+		break;
+	}
+	case SCTP_MAXSEG:
+	{
+		struct sctp_assoc_value *av;
+		int ovh;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+			ovh = SCTP_MED_OVERHEAD;
+		} else {
+			ovh = SCTP_MED_V4_OVERHEAD;
+		}
+		if (stcb) {
+			if (av->assoc_value) {
+				stcb->asoc.sctp_frag_point = (av->assoc_value + ovh);
+			} else {
+				stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				/* FIXME MT: I think this is not in tune with the API ID */
+				if (av->assoc_value) {
+					inp->sctp_frag_point = (av->assoc_value + ovh);
+				} else {
+					inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
+				}
+				SCTP_INP_WUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		break;
+	}
+	case SCTP_EVENTS:
+	{
+		struct sctp_event_subscribe *events;
+
+		SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize);
+
+		SCTP_INP_WLOCK(inp);
+		if (events->sctp_data_io_event) {
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
+		} else {
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
+		}
+
+		if (events->sctp_association_event) {
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
+		} else {
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
+		}
+
+		if (events->sctp_address_event) {
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
+		} else {
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
+		}
+
+		if (events->sctp_send_failure_event) {
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
+		} else {
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
+		}
+
+		if (events->sctp_peer_error_event) {
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR);
+		} else {
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR);
+		}
+
+		if (events->sctp_shutdown_event) {
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
+		} else {
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
+		}
+
+		if (events->sctp_partial_delivery_event) {
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
+		} else {
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
+		}
+
+		if (events->sctp_adaptation_layer_event) {
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
+		} else {
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
+		}
+
+		if (events->sctp_authentication_event) {
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT);
+		} else {
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT);
+		}
+
+		if (events->sctp_sender_dry_event) {
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT);
+		} else {
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_DRYEVNT);
+		}
+
+		if (events->sctp_stream_reset_event) {
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
+		} else {
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
+		}
+		SCTP_INP_WUNLOCK(inp);
+
+		SCTP_INP_RLOCK(inp);
+		LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+			SCTP_TCB_LOCK(stcb);
+			if (events->sctp_association_event) {
+				sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT);
+			} else {
+				sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT);
+			}
+			if (events->sctp_address_event) {
+				sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT);
+			} else {
+				sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT);
+			}
+			if (events->sctp_send_failure_event) {
+				sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
+			} else {
+				sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
+			}
+			if (events->sctp_peer_error_event) {
+				sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVPEERERR);
+			} else {
+				sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVPEERERR);
+			}
+			if (events->sctp_shutdown_event) {
+				sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
+			} else {
+				sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
+			}
+			if (events->sctp_partial_delivery_event) {
+				sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_PDAPIEVNT);
+			} else {
+				sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_PDAPIEVNT);
+			}
+			if (events->sctp_adaptation_layer_event) {
+				sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
+			} else {
+				sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
+			}
+			if (events->sctp_authentication_event) {
+				sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_AUTHEVNT);
+			} else {
+				sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_AUTHEVNT);
+			}
+			if (events->sctp_sender_dry_event) {
+				sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_DRYEVNT);
+			} else {
+				sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_DRYEVNT);
+			}
+			if (events->sctp_stream_reset_event) {
+				sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
+			} else {
+				sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		}
+		/* Send up the sender dry event only for 1-to-1 style sockets. */
+		if (events->sctp_sender_dry_event) {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+				stcb = LIST_FIRST(&inp->sctp_asoc_list);
+				if (stcb) {
+					SCTP_TCB_LOCK(stcb);
+					if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
+					    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
+					    (stcb->asoc.stream_queue_cnt == 0)) {
+						sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb,  0, NULL, SCTP_SO_LOCKED);
+					}
+					SCTP_TCB_UNLOCK(stcb);
+				}
+			}
+		}
+		SCTP_INP_RUNLOCK(inp);
+		break;
+	}
+	case SCTP_ADAPTATION_LAYER:
+	{
+		struct sctp_setadaptation *adap_bits;
+
+		SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize);
+		SCTP_INP_WLOCK(inp);
+		inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind;
+		inp->sctp_ep.adaptation_layer_indicator_provided = 1;
+		SCTP_INP_WUNLOCK(inp);
+		break;
+	}
+#ifdef SCTP_DEBUG
+	case SCTP_SET_INITIAL_DBG_SEQ:
+	{
+		uint32_t *vvv;
+
+		SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize);
+		SCTP_INP_WLOCK(inp);
+		inp->sctp_ep.initial_sequence_debug = *vvv;
+		SCTP_INP_WUNLOCK(inp);
+		break;
+	}
+#endif
+	case SCTP_DEFAULT_SEND_PARAM:
+	{
+		struct sctp_sndrcvinfo *s_info;
+
+		SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize);
+		SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
+
+		if (stcb) {
+			if (s_info->sinfo_stream < stcb->asoc.streamoutcnt) {
+				memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send)));
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (s_info->sinfo_assoc_id == SCTP_FUTURE_ASSOC) ||
+			    (s_info->sinfo_assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send)));
+				SCTP_INP_WUNLOCK(inp);
+			}
+			if ((s_info->sinfo_assoc_id == SCTP_CURRENT_ASSOC) ||
+			    (s_info->sinfo_assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+					SCTP_TCB_LOCK(stcb);
+					if (s_info->sinfo_stream < stcb->asoc.streamoutcnt) {
+						memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send)));
+					}
+					SCTP_TCB_UNLOCK(stcb);
+				}
+				SCTP_INP_RUNLOCK(inp);
+			}
+		}
+		break;
+	}
+	case SCTP_PEER_ADDR_PARAMS:
+	{
+		struct sctp_paddrparams *paddrp;
+		struct sctp_nets *net;
+		struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+		struct sockaddr_in sin_store;
+#endif
+
+		SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize);
+		SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
+
+#if defined(INET) && defined(INET6)
+		if (paddrp->spp_address.ss_family == AF_INET6) {
+			struct sockaddr_in6 *sin6;
+
+			sin6 = (struct sockaddr_in6 *)&paddrp->spp_address;
+			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+				in6_sin6_2_sin(&sin_store, sin6);
+				addr = (struct sockaddr *)&sin_store;
+			} else {
+				addr = (struct sockaddr *)&paddrp->spp_address;
+			}
+		} else {
+			addr = (struct sockaddr *)&paddrp->spp_address;
+		}
+#else
+		addr = (struct sockaddr *)&paddrp->spp_address;
+#endif
+		if (stcb != NULL) {
+			net = sctp_findnet(stcb, addr);
+		} else {
+			/* We increment here since sctp_findassociation_ep_addr() wil
+			 * do a decrement if it finds the stcb as long as the locked
+			 * tcb (last argument) is NOT a TCB.. aka NULL.
+			 */
+			net = NULL;
+			SCTP_INP_INCR_REF(inp);
+			stcb = sctp_findassociation_ep_addr(&inp, addr,
+			                                    &net, NULL, NULL);
+			if (stcb == NULL) {
+				SCTP_INP_DECR_REF(inp);
+			}
+		}
+		if ((stcb != NULL) && (net == NULL)) {
+#ifdef INET
+			if (addr->sa_family == AF_INET) {
+
+				struct sockaddr_in *sin;
+				sin = (struct sockaddr_in *)addr;
+				if (sin->sin_addr.s_addr != INADDR_ANY) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					SCTP_TCB_UNLOCK(stcb);
+					error = EINVAL;
+					break;
+				}
+			} else
+#endif
+#ifdef INET6
+			if (addr->sa_family == AF_INET6) {
+				struct sockaddr_in6 *sin6;
+
+				sin6 = (struct sockaddr_in6 *)addr;
+				if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					SCTP_TCB_UNLOCK(stcb);
+					error = EINVAL;
+					break;
+				}
+			} else
+#endif
+#if defined(__Userspace__)
+			if (addr->sa_family == AF_CONN) {
+				struct sockaddr_conn *sconn;
+
+				sconn = (struct sockaddr_conn *)addr;
+				if (sconn->sconn_addr != NULL) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					SCTP_TCB_UNLOCK(stcb);
+					error = EINVAL;
+					break;
+				}
+			} else
+#endif
+			{
+				error = EAFNOSUPPORT;
+				SCTP_TCB_UNLOCK(stcb);
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+				break;
+			}
+		}
+		/* sanity checks */
+		if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) {
+			if (stcb)
+				SCTP_TCB_UNLOCK(stcb);
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			return (EINVAL);
+		}
+
+		if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) {
+			if (stcb)
+				SCTP_TCB_UNLOCK(stcb);
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			return (EINVAL);
+		}
+
+		if (stcb != NULL) {
+			/************************TCB SPECIFIC SET ******************/
+			if (net != NULL) {
+				/************************NET SPECIFIC SET ******************/
+				if (paddrp->spp_flags & SPP_HB_DISABLE) {
+					if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
+					    !(net->dest_state & SCTP_ADDR_NOHB)) {
+						sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
+								SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9);
+					}
+					net->dest_state |= SCTP_ADDR_NOHB;
+				}
+				if (paddrp->spp_flags & SPP_HB_ENABLE) {
+					if (paddrp->spp_hbinterval) {
+						net->heart_beat_delay = paddrp->spp_hbinterval;
+					} else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) {
+						net->heart_beat_delay = 0;
+					}
+					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
+					                SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
+					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
+					net->dest_state &= ~SCTP_ADDR_NOHB;
+				}
+				if (paddrp->spp_flags & SPP_HB_DEMAND) {
+					/* on demand HB */
+					sctp_send_hb(stcb, net, SCTP_SO_LOCKED);
+					sctp_chunk_output(inp, stcb,  SCTP_OUTPUT_FROM_SOCKOPT, SCTP_SO_LOCKED);
+					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
+				}
+				if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
+					if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+						sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
+								SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11);
+					}
+					net->dest_state |= SCTP_ADDR_NO_PMTUD;
+					net->mtu = paddrp->spp_pathmtu;
+					switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+					case AF_INET:
+						net->mtu += SCTP_MIN_V4_OVERHEAD;
+						break;
+#endif
+#ifdef INET6
+					case AF_INET6:
+						net->mtu += SCTP_MIN_OVERHEAD;
+						break;
+#endif
+					default:
+						break;
+					}
+					if (net->mtu < stcb->asoc.smallest_mtu) {
+						sctp_pathmtu_adjustment(stcb, net->mtu);
+					}
+				}
+				if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
+					if (!SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+						sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+					}
+					net->dest_state &= ~SCTP_ADDR_NO_PMTUD;
+				}
+				if (paddrp->spp_pathmaxrxt) {
+					if (net->dest_state & SCTP_ADDR_PF) {
+						if (net->error_count > paddrp->spp_pathmaxrxt) {
+							net->dest_state &= ~SCTP_ADDR_PF;
+						}
+					} else {
+						if ((net->error_count <= paddrp->spp_pathmaxrxt) &&
+						    (net->error_count > net->pf_threshold)) {
+							net->dest_state |= SCTP_ADDR_PF;
+							sctp_send_hb(stcb, net, SCTP_SO_LOCKED);
+							sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
+							                stcb->sctp_ep, stcb, net,
+							                SCTP_FROM_SCTP_USRREQ + SCTP_LOC_12);
+							sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
+						}
+					}
+					if (net->dest_state & SCTP_ADDR_REACHABLE) {
+						if (net->error_count > paddrp->spp_pathmaxrxt) {
+							net->dest_state &= ~SCTP_ADDR_REACHABLE;
+							sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED);
+						}
+					} else {
+						if (net->error_count <= paddrp->spp_pathmaxrxt) {
+							net->dest_state |= SCTP_ADDR_REACHABLE;
+							sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED);
+						}
+					}
+					net->failure_threshold = paddrp->spp_pathmaxrxt;
+				}
+				if (paddrp->spp_flags & SPP_DSCP) {
+					net->dscp = paddrp->spp_dscp & 0xfc;
+					net->dscp |= 0x01;
+				}
+#ifdef INET6
+				if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) {
+					if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+						net->flowlabel = paddrp->spp_ipv6_flowlabel & 0x000fffff;
+						net->flowlabel |= 0x80000000;
+					}
+				}
+#endif
+			} else {
+				/************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/
+				if (paddrp->spp_pathmaxrxt != 0) {
+					stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
+					TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+						if (net->dest_state & SCTP_ADDR_PF) {
+							if (net->error_count > paddrp->spp_pathmaxrxt) {
+								net->dest_state &= ~SCTP_ADDR_PF;
+							}
+						} else {
+							if ((net->error_count <= paddrp->spp_pathmaxrxt) &&
+							    (net->error_count > net->pf_threshold)) {
+								net->dest_state |= SCTP_ADDR_PF;
+								sctp_send_hb(stcb, net, SCTP_SO_LOCKED);
+								sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
+								                stcb->sctp_ep, stcb, net,
+								                SCTP_FROM_SCTP_USRREQ + SCTP_LOC_13);
+								sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
+							}
+						}
+						if (net->dest_state & SCTP_ADDR_REACHABLE) {
+							if (net->error_count > paddrp->spp_pathmaxrxt) {
+								net->dest_state &= ~SCTP_ADDR_REACHABLE;
+								sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED);
+							}
+						} else {
+							if (net->error_count <= paddrp->spp_pathmaxrxt) {
+								net->dest_state |= SCTP_ADDR_REACHABLE;
+								sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED);
+							}
+						}
+						net->failure_threshold = paddrp->spp_pathmaxrxt;
+					}
+				}
+
+				if (paddrp->spp_flags & SPP_HB_ENABLE) {
+					if (paddrp->spp_hbinterval != 0) {
+						stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
+					} else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) {
+						stcb->asoc.heart_beat_delay = 0;
+					}
+					/* Turn back on the timer */
+					TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+						if (paddrp->spp_hbinterval != 0) {
+							net->heart_beat_delay = paddrp->spp_hbinterval;
+						} else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) {
+							net->heart_beat_delay = 0;
+						}
+						if (net->dest_state & SCTP_ADDR_NOHB) {
+							net->dest_state &= ~SCTP_ADDR_NOHB;
+						}
+						sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
+								SCTP_FROM_SCTP_USRREQ + SCTP_LOC_14);
+						sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
+					}
+					sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
+				}
+				if (paddrp->spp_flags & SPP_HB_DISABLE) {
+					TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+						if (!(net->dest_state & SCTP_ADDR_NOHB)) {
+							net->dest_state |= SCTP_ADDR_NOHB;
+							if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
+								sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
+								                inp, stcb, net,
+								                SCTP_FROM_SCTP_USRREQ + SCTP_LOC_15);
+							}
+						}
+					}
+					sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
+				}
+				if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
+					TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+						if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+							sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
+									SCTP_FROM_SCTP_USRREQ + SCTP_LOC_16);
+						}
+						net->dest_state |= SCTP_ADDR_NO_PMTUD;
+						net->mtu = paddrp->spp_pathmtu;
+						switch (net->ro._l_addr.sa.sa_family) {
+#ifdef INET
+						case AF_INET:
+							net->mtu += SCTP_MIN_V4_OVERHEAD;
+							break;
+#endif
+#ifdef INET6
+						case AF_INET6:
+							net->mtu += SCTP_MIN_OVERHEAD;
+							break;
+#endif
+						default:
+							break;
+						}
+						if (net->mtu < stcb->asoc.smallest_mtu) {
+							sctp_pathmtu_adjustment(stcb, net->mtu);
+						}
+					}
+					sctp_stcb_feature_on(inp, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD);
+				}
+				if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
+					TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+						if (!SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
+							sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
+						}
+						net->dest_state &= ~SCTP_ADDR_NO_PMTUD;
+					}
+					sctp_stcb_feature_off(inp, stcb, SCTP_PCB_FLAGS_DO_NOT_PMTUD);
+				}
+				if (paddrp->spp_flags & SPP_DSCP) {
+					TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+						net->dscp = paddrp->spp_dscp & 0xfc;
+						net->dscp |= 0x01;
+					}
+					stcb->asoc.default_dscp = paddrp->spp_dscp & 0xfc;
+					stcb->asoc.default_dscp |= 0x01;
+				}
+#ifdef INET6
+				if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) {
+					TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+						if (net->ro._l_addr.sa.sa_family == AF_INET6) {
+							net->flowlabel = paddrp->spp_ipv6_flowlabel & 0x000fffff;
+							net->flowlabel |= 0x80000000;
+						}
+					}
+					stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel & 0x000fffff;
+					stcb->asoc.default_flowlabel |= 0x80000000;
+				}
+#endif
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			/************************NO TCB, SET TO default stuff ******************/
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (paddrp->spp_assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				/*
+				 * For the TOS/FLOWLABEL stuff you set it
+				 * with the options on the socket
+				 */
+				if (paddrp->spp_pathmaxrxt != 0) {
+					inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
+				}
+
+				if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
+					inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0;
+				else if (paddrp->spp_hbinterval != 0) {
+					if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL)
+						paddrp->spp_hbinterval= SCTP_MAX_HB_INTERVAL;
+					inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval);
+				}
+
+				if (paddrp->spp_flags & SPP_HB_ENABLE) {
+					if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO) {
+						inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0;
+					} else if (paddrp->spp_hbinterval) {
+						inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval);
+					}
+					sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
+				} else if (paddrp->spp_flags & SPP_HB_DISABLE) {
+					sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
+				}
+				if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
+					sctp_feature_off(inp, SCTP_PCB_FLAGS_DO_NOT_PMTUD);
+				} else if (paddrp->spp_flags & SPP_PMTUD_DISABLE) {
+					sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_NOT_PMTUD);
+				}
+				if (paddrp->spp_flags & SPP_DSCP) {
+					inp->sctp_ep.default_dscp = paddrp->spp_dscp & 0xfc;
+					inp->sctp_ep.default_dscp |= 0x01;
+				}
+#ifdef INET6
+				if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) {
+					if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+						inp->sctp_ep.default_flowlabel = paddrp->spp_ipv6_flowlabel & 0x000fffff;
+						inp->sctp_ep.default_flowlabel |= 0x80000000;
+					}
+				}
+#endif
+				SCTP_INP_WUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		break;
+	}
+	case SCTP_RTOINFO:
+	{
+		struct sctp_rtoinfo *srto;
+		uint32_t new_init, new_min, new_max;
+
+		SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize);
+		SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
+
+		if (stcb) {
+			if (srto->srto_initial)
+				new_init = srto->srto_initial;
+			else
+				new_init = stcb->asoc.initial_rto;
+			if (srto->srto_max)
+				new_max = srto->srto_max;
+			else
+				new_max = stcb->asoc.maxrto;
+			if (srto->srto_min)
+				new_min = srto->srto_min;
+			else
+				new_min = stcb->asoc.minrto;
+			if ((new_min <= new_init) && (new_init <= new_max)) {
+				stcb->asoc.initial_rto = new_init;
+				stcb->asoc.maxrto = new_max;
+				stcb->asoc.minrto = new_min;
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (srto->srto_assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				if (srto->srto_initial)
+					new_init = srto->srto_initial;
+				else
+					new_init = inp->sctp_ep.initial_rto;
+				if (srto->srto_max)
+					new_max = srto->srto_max;
+				else
+					new_max = inp->sctp_ep.sctp_maxrto;
+				if (srto->srto_min)
+					new_min = srto->srto_min;
+				else
+					new_min = inp->sctp_ep.sctp_minrto;
+				if ((new_min <= new_init) && (new_init <= new_max)) {
+					inp->sctp_ep.initial_rto = new_init;
+					inp->sctp_ep.sctp_maxrto = new_max;
+					inp->sctp_ep.sctp_minrto = new_min;
+				} else {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					error = EINVAL;
+				}
+				SCTP_INP_WUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		break;
+	}
+	case SCTP_ASSOCINFO:
+	{
+		struct sctp_assocparams *sasoc;
+
+		SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize);
+		SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
+		if (sasoc->sasoc_cookie_life) {
+			/* boundary check the cookie life */
+			if (sasoc->sasoc_cookie_life < 1000)
+				sasoc->sasoc_cookie_life = 1000;
+			if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) {
+				sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE;
+			}
+		}
+		if (stcb) {
+			if (sasoc->sasoc_asocmaxrxt)
+				stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
+			if (sasoc->sasoc_cookie_life) {
+				stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (sasoc->sasoc_assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				if (sasoc->sasoc_asocmaxrxt)
+					inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
+				if (sasoc->sasoc_cookie_life) {
+					inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
+				}
+				SCTP_INP_WUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		break;
+	}
+	case SCTP_INITMSG:
+	{
+		struct sctp_initmsg *sinit;
+
+		SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize);
+		SCTP_INP_WLOCK(inp);
+		if (sinit->sinit_num_ostreams)
+			inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
+
+		if (sinit->sinit_max_instreams)
+			inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
+
+		if (sinit->sinit_max_attempts)
+			inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
+
+		if (sinit->sinit_max_init_timeo)
+			inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
+		SCTP_INP_WUNLOCK(inp);
+		break;
+	}
+	case SCTP_PRIMARY_ADDR:
+	{
+		struct sctp_setprim *spa;
+		struct sctp_nets *net;
+		struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+		struct sockaddr_in sin_store;
+#endif
+
+		SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize);
+		SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id);
+
+#if defined(INET) && defined(INET6)
+		if (spa->ssp_addr.ss_family == AF_INET6) {
+			struct sockaddr_in6 *sin6;
+
+			sin6 = (struct sockaddr_in6 *)&spa->ssp_addr;
+			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+				in6_sin6_2_sin(&sin_store, sin6);
+				addr = (struct sockaddr *)&sin_store;
+			} else {
+				addr = (struct sockaddr *)&spa->ssp_addr;
+			}
+		} else {
+			addr = (struct sockaddr *)&spa->ssp_addr;
+		}
+#else
+		addr = (struct sockaddr *)&spa->ssp_addr;
+#endif
+		if (stcb != NULL) {
+			net = sctp_findnet(stcb, addr);
+		} else {
+			/* We increment here since sctp_findassociation_ep_addr() wil
+			 * do a decrement if it finds the stcb as long as the locked
+			 * tcb (last argument) is NOT a TCB.. aka NULL.
+			 */
+			net = NULL;
+			SCTP_INP_INCR_REF(inp);
+			stcb = sctp_findassociation_ep_addr(&inp, addr,
+			                                    &net, NULL, NULL);
+			if (stcb == NULL) {
+				SCTP_INP_DECR_REF(inp);
+			}
+		}
+
+		if ((stcb != NULL) && (net != NULL)) {
+			if (net != stcb->asoc.primary_destination) {
+				if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
+					/* Ok we need to set it */
+					if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) {
+						if ((stcb->asoc.alternate) &&
+						    (!(net->dest_state & SCTP_ADDR_PF)) &&
+						    (net->dest_state & SCTP_ADDR_REACHABLE)) {
+							sctp_free_remote_addr(stcb->asoc.alternate);
+							stcb->asoc.alternate = NULL;
+						}
+					} else {
+						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+						error = EINVAL;
+					}
+				} else {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					error = EINVAL;
+				}
+			}
+		} else {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+		}
+		if (stcb != NULL) {
+			SCTP_TCB_UNLOCK(stcb);
+		}
+		break;
+	}
+	case SCTP_SET_DYNAMIC_PRIMARY:
+	{
+		union sctp_sockstore *ss;
+#ifdef SCTP_MVRF
+		int i, fnd = 0;
+#endif
+#if !defined(__Windows__) && !defined(__Userspace__)
+#if defined(__APPLE__)
+		struct proc *proc;
+#endif
+#ifdef __FreeBSD__
+#if __FreeBSD_version > 602000
+		error = priv_check(curthread,
+				   PRIV_NETINET_RESERVEDPORT);
+#elif __FreeBSD_version >= 500000
+		error = suser((struct thread *)p);
+#else
+		error = suser(p);
+#endif
+#elif defined(__APPLE__)
+		proc = (struct proc *)p;
+		if (p) {
+			error = suser(proc->p_ucred, &proc->p_acflag);
+		} else {
+			break;
+		}
+#else
+		error = suser(p, 0);
+#endif
+#endif
+		if (error)
+			break;
+
+		SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize);
+		/* SUPER USER CHECK? */
+#ifdef SCTP_MVRF
+		for (i = 0; i < inp->num_vrfs; i++) {
+			if (vrf_id == inp->m_vrf_ids[i]) {
+				fnd = 1;
+				break;
+			}
+		}
+		if (!fnd) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+#endif
+		error = sctp_dynamic_set_primary(&ss->sa, vrf_id);
+		break;
+	}
+	case SCTP_SET_PEER_PRIMARY_ADDR:
+	{
+		struct sctp_setpeerprim *sspp;
+		struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+		struct sockaddr_in sin_store;
+#endif
+
+		SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize);
+		SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id);
+		if (stcb != NULL) {
+			struct sctp_ifa *ifa;
+
+#if defined(INET) && defined(INET6)
+			if (sspp->sspp_addr.ss_family == AF_INET6) {
+				struct sockaddr_in6 *sin6;
+
+				sin6 = (struct sockaddr_in6 *)&sspp->sspp_addr;
+				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+					in6_sin6_2_sin(&sin_store, sin6);
+					addr = (struct sockaddr *)&sin_store;
+				} else {
+					addr = (struct sockaddr *)&sspp->sspp_addr;
+				}
+			} else {
+				addr = (struct sockaddr *)&sspp->sspp_addr;
+			}
+#else
+			addr = (struct sockaddr *)&sspp->sspp_addr;
+#endif
+			ifa = sctp_find_ifa_by_addr(addr, stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED);
+			if (ifa == NULL) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+				goto out_of_it;
+			}
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+				/* Must validate the ifa found is in our ep */
+				struct sctp_laddr *laddr;
+				int found = 0;
+
+				LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+					if (laddr->ifa == NULL) {
+						SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
+							__func__);
+						continue;
+					}
+					if ((sctp_is_addr_restricted(stcb, laddr->ifa)) &&
+					    (!sctp_is_addr_pending(stcb, laddr->ifa))) {
+						continue;
+					}
+					if (laddr->ifa == ifa) {
+						found = 1;
+						break;
+					}
+				}
+				if (!found) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					error = EINVAL;
+					goto out_of_it;
+				}
+#if defined(__FreeBSD__)
+			} else {
+				switch (addr->sa_family) {
+#ifdef INET
+				case AF_INET:
+				{
+					struct sockaddr_in *sin;
+
+					sin = (struct sockaddr_in *)addr;
+					if (prison_check_ip4(inp->ip_inp.inp.inp_cred,
+					                     &sin->sin_addr) != 0) {
+						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+						error = EINVAL;
+						goto out_of_it;
+					}
+					break;
+				}
+#endif
+#ifdef INET6
+				case AF_INET6:
+				{
+					struct sockaddr_in6 *sin6;
+
+					sin6 = (struct sockaddr_in6 *)addr;
+					if (prison_check_ip6(inp->ip_inp.inp.inp_cred,
+					                     &sin6->sin6_addr) != 0) {
+						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+						error = EINVAL;
+						goto out_of_it;
+					}
+					break;
+				}
+#endif
+				default:
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					error = EINVAL;
+					goto out_of_it;
+				}
+#endif
+			}
+			if (sctp_set_primary_ip_address_sa(stcb, addr) != 0) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+			sctp_chunk_output(inp, stcb,  SCTP_OUTPUT_FROM_SOCKOPT, SCTP_SO_LOCKED);
+		out_of_it:
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+		}
+		break;
+	}
+	case SCTP_BINDX_ADD_ADDR:
+	{
+		struct sctp_getaddresses *addrs;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+		struct thread *td;
+
+		td = (struct thread *)p;
+#endif
+		SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses,
+				    optsize);
+#ifdef INET
+		if (addrs->addr->sa_family == AF_INET) {
+			if (optsize < sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in)) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+				break;
+			}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+			if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) {
+				SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
+				break;
+			}
+#endif
+		} else
+#endif
+#ifdef INET6
+		if (addrs->addr->sa_family == AF_INET6) {
+			if (optsize < sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6)) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+				break;
+			}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+			if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr),
+											   (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) {
+			  SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
+			  break;
+			}
+#endif
+		} else
+#endif
+		{
+		       error = EAFNOSUPPORT;
+		       break;
+		}
+		sctp_bindx_add_address(so, inp, addrs->addr,
+				       addrs->sget_assoc_id, vrf_id,
+				       &error, p);
+		break;
+	}
+	case SCTP_BINDX_REM_ADDR:
+	{
+		struct sctp_getaddresses *addrs;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+		struct thread *td;
+		td = (struct thread *)p;
+
+#endif
+		SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize);
+#ifdef INET
+		if (addrs->addr->sa_family == AF_INET) {
+			if (optsize < sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in)) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+				break;
+			}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+		if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) {
+				SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
+				break;
+			}
+#endif
+		} else
+#endif
+#ifdef INET6
+		if (addrs->addr->sa_family == AF_INET6) {
+			if (optsize < sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6)) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+				break;
+			}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+			if (td != NULL &&
+			    (error = prison_local_ip6(td->td_ucred,
+			                              &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr),
+			                              (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) {
+				SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
+				break;
+			}
+#endif
+		} else
+#endif
+		{
+			error = EAFNOSUPPORT;
+			break;
+		}
+		sctp_bindx_delete_address(inp, addrs->addr,
+					  addrs->sget_assoc_id, vrf_id,
+					  &error);
+		break;
+	}
+#ifdef __APPLE__
+	case SCTP_LISTEN_FIX:
+		/* only applies to one-to-many sockets */
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
+			/* make sure the ACCEPTCONN flag is OFF */
+			so->so_options &= ~SO_ACCEPTCONN;
+		} else {
+			/* otherwise, not allowed */
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+		}
+		break;
+#endif				/* __APPLE__ */
+	case SCTP_EVENT:
+	{
+		struct sctp_event *event;
+		uint32_t event_type;
+
+		SCTP_CHECK_AND_CAST(event, optval, struct sctp_event, optsize);
+		SCTP_FIND_STCB(inp, stcb, event->se_assoc_id);
+		switch (event->se_type) {
+		case SCTP_ASSOC_CHANGE:
+			event_type = SCTP_PCB_FLAGS_RECVASSOCEVNT;
+			break;
+		case SCTP_PEER_ADDR_CHANGE:
+			event_type = SCTP_PCB_FLAGS_RECVPADDREVNT;
+			break;
+		case SCTP_REMOTE_ERROR:
+			event_type = SCTP_PCB_FLAGS_RECVPEERERR;
+			break;
+		case SCTP_SEND_FAILED:
+			event_type = SCTP_PCB_FLAGS_RECVSENDFAILEVNT;
+			break;
+		case SCTP_SHUTDOWN_EVENT:
+			event_type = SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT;
+			break;
+		case SCTP_ADAPTATION_INDICATION:
+			event_type = SCTP_PCB_FLAGS_ADAPTATIONEVNT;
+			break;
+		case SCTP_PARTIAL_DELIVERY_EVENT:
+			event_type = SCTP_PCB_FLAGS_PDAPIEVNT;
+			break;
+		case SCTP_AUTHENTICATION_EVENT:
+			event_type = SCTP_PCB_FLAGS_AUTHEVNT;
+			break;
+		case SCTP_STREAM_RESET_EVENT:
+			event_type = SCTP_PCB_FLAGS_STREAM_RESETEVNT;
+			break;
+		case SCTP_SENDER_DRY_EVENT:
+			event_type = SCTP_PCB_FLAGS_DRYEVNT;
+			break;
+		case SCTP_NOTIFICATIONS_STOPPED_EVENT:
+			event_type = 0;
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTSUP);
+			error = ENOTSUP;
+			break;
+		case SCTP_ASSOC_RESET_EVENT:
+			event_type = SCTP_PCB_FLAGS_ASSOC_RESETEVNT;
+			break;
+		case SCTP_STREAM_CHANGE_EVENT:
+			event_type = SCTP_PCB_FLAGS_STREAM_CHANGEEVNT;
+			break;
+		case SCTP_SEND_FAILED_EVENT:
+			event_type = SCTP_PCB_FLAGS_RECVNSENDFAILEVNT;
+			break;
+		default:
+			event_type = 0;
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+		if (event_type > 0) {
+			if (stcb) {
+				if (event->se_on) {
+					sctp_stcb_feature_on(inp, stcb, event_type);
+					if (event_type == SCTP_PCB_FLAGS_DRYEVNT) {
+						if (TAILQ_EMPTY(&stcb->asoc.send_queue) &&
+						    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
+						    (stcb->asoc.stream_queue_cnt == 0)) {
+							sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb,  0, NULL, SCTP_SO_LOCKED);
+						}
+					}
+				} else {
+					sctp_stcb_feature_off(inp, stcb, event_type);
+				}
+				SCTP_TCB_UNLOCK(stcb);
+			} else {
+				/*
+				 * We don't want to send up a storm of events,
+				 * so return an error for sender dry events
+				 */
+				if ((event_type == SCTP_PCB_FLAGS_DRYEVNT) &&
+				    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) == 0) &&
+				    ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) &&
+				    ((event->se_assoc_id == SCTP_ALL_ASSOC) ||
+				     (event->se_assoc_id == SCTP_CURRENT_ASSOC))) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTSUP);
+					error = ENOTSUP;
+					break;
+				}
+				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+				    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+				    (event->se_assoc_id == SCTP_FUTURE_ASSOC) ||
+				    (event->se_assoc_id == SCTP_ALL_ASSOC)) {
+					SCTP_INP_WLOCK(inp);
+					if (event->se_on) {
+						sctp_feature_on(inp, event_type);
+					} else {
+						sctp_feature_off(inp, event_type);
+					}
+					SCTP_INP_WUNLOCK(inp);
+				}
+				if ((event->se_assoc_id == SCTP_CURRENT_ASSOC) ||
+				    (event->se_assoc_id == SCTP_ALL_ASSOC)) {
+					SCTP_INP_RLOCK(inp);
+					LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+						SCTP_TCB_LOCK(stcb);
+						if (event->se_on) {
+							sctp_stcb_feature_on(inp, stcb, event_type);
+						} else {
+							sctp_stcb_feature_off(inp, stcb, event_type);
+						}
+						SCTP_TCB_UNLOCK(stcb);
+					}
+					SCTP_INP_RUNLOCK(inp);
+				}
+			}
+		}
+		break;
+	}
+	case SCTP_RECVRCVINFO:
+	{
+		int *onoff;
+
+		SCTP_CHECK_AND_CAST(onoff, optval, int, optsize);
+		SCTP_INP_WLOCK(inp);
+		if (*onoff != 0) {
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO);
+		} else {
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO);
+		}
+		SCTP_INP_WUNLOCK(inp);
+		break;
+	}
+	case SCTP_RECVNXTINFO:
+	{
+		int *onoff;
+
+		SCTP_CHECK_AND_CAST(onoff, optval, int, optsize);
+		SCTP_INP_WLOCK(inp);
+		if (*onoff != 0) {
+			sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO);
+		} else {
+			sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO);
+		}
+		SCTP_INP_WUNLOCK(inp);
+		break;
+	}
+	case SCTP_DEFAULT_SNDINFO:
+	{
+		struct sctp_sndinfo *info;
+		uint16_t policy;
+
+		SCTP_CHECK_AND_CAST(info, optval, struct sctp_sndinfo, optsize);
+		SCTP_FIND_STCB(inp, stcb, info->snd_assoc_id);
+
+		if (stcb) {
+			if (info->snd_sid < stcb->asoc.streamoutcnt) {
+				stcb->asoc.def_send.sinfo_stream = info->snd_sid;
+				policy = PR_SCTP_POLICY(stcb->asoc.def_send.sinfo_flags);
+				stcb->asoc.def_send.sinfo_flags = info->snd_flags;
+				stcb->asoc.def_send.sinfo_flags |= policy;
+				stcb->asoc.def_send.sinfo_ppid = info->snd_ppid;
+				stcb->asoc.def_send.sinfo_context = info->snd_context;
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (info->snd_assoc_id == SCTP_FUTURE_ASSOC) ||
+			    (info->snd_assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				inp->def_send.sinfo_stream = info->snd_sid;
+				policy = PR_SCTP_POLICY(inp->def_send.sinfo_flags);
+				inp->def_send.sinfo_flags = info->snd_flags;
+				inp->def_send.sinfo_flags |= policy;
+				inp->def_send.sinfo_ppid = info->snd_ppid;
+				inp->def_send.sinfo_context = info->snd_context;
+				SCTP_INP_WUNLOCK(inp);
+			}
+			if ((info->snd_assoc_id == SCTP_CURRENT_ASSOC) ||
+			    (info->snd_assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+					SCTP_TCB_LOCK(stcb);
+					if (info->snd_sid < stcb->asoc.streamoutcnt) {
+						stcb->asoc.def_send.sinfo_stream = info->snd_sid;
+						policy = PR_SCTP_POLICY(stcb->asoc.def_send.sinfo_flags);
+						stcb->asoc.def_send.sinfo_flags = info->snd_flags;
+						stcb->asoc.def_send.sinfo_flags |= policy;
+						stcb->asoc.def_send.sinfo_ppid = info->snd_ppid;
+						stcb->asoc.def_send.sinfo_context = info->snd_context;
+					}
+					SCTP_TCB_UNLOCK(stcb);
+				}
+				SCTP_INP_RUNLOCK(inp);
+			}
+		}
+		break;
+	}
+	case SCTP_DEFAULT_PRINFO:
+	{
+		struct sctp_default_prinfo *info;
+
+		SCTP_CHECK_AND_CAST(info, optval, struct sctp_default_prinfo, optsize);
+		SCTP_FIND_STCB(inp, stcb, info->pr_assoc_id);
+
+		if (info->pr_policy > SCTP_PR_SCTP_MAX) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			break;
+		}
+		if (stcb) {
+			stcb->asoc.def_send.sinfo_flags &= 0xfff0;
+			stcb->asoc.def_send.sinfo_flags |= info->pr_policy;
+			stcb->asoc.def_send.sinfo_timetolive = info->pr_value;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (info->pr_assoc_id == SCTP_FUTURE_ASSOC) ||
+			    (info->pr_assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				inp->def_send.sinfo_flags &= 0xfff0;
+				inp->def_send.sinfo_flags |= info->pr_policy;
+				inp->def_send.sinfo_timetolive = info->pr_value;
+				SCTP_INP_WUNLOCK(inp);
+			}
+			if ((info->pr_assoc_id == SCTP_CURRENT_ASSOC) ||
+			    (info->pr_assoc_id == SCTP_ALL_ASSOC)) {
+				SCTP_INP_RLOCK(inp);
+				LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
+					SCTP_TCB_LOCK(stcb);
+					stcb->asoc.def_send.sinfo_flags &= 0xfff0;
+					stcb->asoc.def_send.sinfo_flags |= info->pr_policy;
+					stcb->asoc.def_send.sinfo_timetolive = info->pr_value;
+					SCTP_TCB_UNLOCK(stcb);
+				}
+				SCTP_INP_RUNLOCK(inp);
+			}
+		}
+		break;
+	}
+	case SCTP_PEER_ADDR_THLDS:
+		/* Applies to the specific association */
+	{
+		struct sctp_paddrthlds *thlds;
+		struct sctp_nets *net;
+		struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+		struct sockaddr_in sin_store;
+#endif
+
+		SCTP_CHECK_AND_CAST(thlds, optval, struct sctp_paddrthlds, optsize);
+		SCTP_FIND_STCB(inp, stcb, thlds->spt_assoc_id);
+
+#if defined(INET) && defined(INET6)
+		if (thlds->spt_address.ss_family == AF_INET6) {
+			struct sockaddr_in6 *sin6;
+
+			sin6 = (struct sockaddr_in6 *)&thlds->spt_address;
+			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+				in6_sin6_2_sin(&sin_store, sin6);
+				addr = (struct sockaddr *)&sin_store;
+			} else {
+				addr = (struct sockaddr *)&thlds->spt_address;
+			}
+		} else {
+			addr = (struct sockaddr *)&thlds->spt_address;
+		}
+#else
+		addr = (struct sockaddr *)&thlds->spt_address;
+#endif
+		if (stcb != NULL) {
+			net = sctp_findnet(stcb, addr);
+		} else {
+			/* We increment here since sctp_findassociation_ep_addr() wil
+			 * do a decrement if it finds the stcb as long as the locked
+			 * tcb (last argument) is NOT a TCB.. aka NULL.
+			 */
+			net = NULL;
+			SCTP_INP_INCR_REF(inp);
+			stcb = sctp_findassociation_ep_addr(&inp, addr,
+			                                    &net, NULL, NULL);
+			if (stcb == NULL) {
+				SCTP_INP_DECR_REF(inp);
+			}
+		}
+		if ((stcb != NULL) && (net == NULL)) {
+#ifdef INET
+			if (addr->sa_family == AF_INET) {
+
+				struct sockaddr_in *sin;
+				sin = (struct sockaddr_in *)addr;
+				if (sin->sin_addr.s_addr != INADDR_ANY) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					SCTP_TCB_UNLOCK(stcb);
+					error = EINVAL;
+					break;
+				}
+			} else
+#endif
+#ifdef INET6
+			if (addr->sa_family == AF_INET6) {
+				struct sockaddr_in6 *sin6;
+
+				sin6 = (struct sockaddr_in6 *)addr;
+				if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					SCTP_TCB_UNLOCK(stcb);
+					error = EINVAL;
+					break;
+				}
+			} else
+#endif
+#if defined(__Userspace__)
+			if (addr->sa_family == AF_CONN) {
+				struct sockaddr_conn *sconn;
+
+				sconn = (struct sockaddr_conn *)addr;
+				if (sconn->sconn_addr != NULL) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					SCTP_TCB_UNLOCK(stcb);
+					error = EINVAL;
+					break;
+				}
+			} else
+#endif
+			{
+				error = EAFNOSUPPORT;
+				SCTP_TCB_UNLOCK(stcb);
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+				break;
+			}
+		}
+		if (thlds->spt_pathcpthld != 0xffff) {
+			error = EINVAL;
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+			break;
+		}
+		if (stcb != NULL) {
+			if (net != NULL) {
+				net->failure_threshold = thlds->spt_pathmaxrxt;
+				net->pf_threshold = thlds->spt_pathpfthld;
+				if (net->dest_state & SCTP_ADDR_PF) {
+					if ((net->error_count > net->failure_threshold) ||
+					    (net->error_count <= net->pf_threshold)) {
+						net->dest_state &= ~SCTP_ADDR_PF;
+					}
+				} else {
+					if ((net->error_count > net->pf_threshold) &&
+					    (net->error_count <= net->failure_threshold)) {
+						net->dest_state |= SCTP_ADDR_PF;
+						sctp_send_hb(stcb, net, SCTP_SO_LOCKED);
+						sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
+						                stcb->sctp_ep, stcb, net,
+						                SCTP_FROM_SCTP_USRREQ + SCTP_LOC_17);
+						sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
+					}
+				}
+				if (net->dest_state & SCTP_ADDR_REACHABLE) {
+					if (net->error_count > net->failure_threshold) {
+						net->dest_state &= ~SCTP_ADDR_REACHABLE;
+						sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED);
+					}
+				} else {
+					if (net->error_count <= net->failure_threshold) {
+						net->dest_state |= SCTP_ADDR_REACHABLE;
+						sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED);
+					}
+				}
+			} else {
+				TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+					net->failure_threshold = thlds->spt_pathmaxrxt;
+					net->pf_threshold = thlds->spt_pathpfthld;
+					if (net->dest_state & SCTP_ADDR_PF) {
+						if ((net->error_count > net->failure_threshold) ||
+						    (net->error_count <= net->pf_threshold)) {
+							net->dest_state &= ~SCTP_ADDR_PF;
+						}
+					} else {
+						if ((net->error_count > net->pf_threshold) &&
+						    (net->error_count <= net->failure_threshold)) {
+							net->dest_state |= SCTP_ADDR_PF;
+							sctp_send_hb(stcb, net, SCTP_SO_LOCKED);
+							sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
+							                stcb->sctp_ep, stcb, net,
+							                SCTP_FROM_SCTP_USRREQ + SCTP_LOC_18);
+							sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
+						}
+					}
+					if (net->dest_state & SCTP_ADDR_REACHABLE) {
+						if (net->error_count > net->failure_threshold) {
+							net->dest_state &= ~SCTP_ADDR_REACHABLE;
+							sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, stcb, 0, net, SCTP_SO_LOCKED);
+						}
+					} else {
+						if (net->error_count <= net->failure_threshold) {
+							net->dest_state |= SCTP_ADDR_REACHABLE;
+							sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 0, net, SCTP_SO_LOCKED);
+						}
+					}
+				}
+				stcb->asoc.def_net_failure = thlds->spt_pathmaxrxt;
+				stcb->asoc.def_net_pf_threshold = thlds->spt_pathpfthld;
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (thlds->spt_assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				inp->sctp_ep.def_net_failure = thlds->spt_pathmaxrxt;
+				inp->sctp_ep.def_net_pf_threshold = thlds->spt_pathpfthld;
+				SCTP_INP_WUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		break;
+	}
+	case SCTP_REMOTE_UDP_ENCAPS_PORT:
+	{
+		struct sctp_udpencaps *encaps;
+		struct sctp_nets *net;
+		struct sockaddr *addr;
+#if defined(INET) && defined(INET6)
+		struct sockaddr_in sin_store;
+#endif
+
+		SCTP_CHECK_AND_CAST(encaps, optval, struct sctp_udpencaps, optsize);
+		SCTP_FIND_STCB(inp, stcb, encaps->sue_assoc_id);
+
+#if defined(INET) && defined(INET6)
+		if (encaps->sue_address.ss_family == AF_INET6) {
+			struct sockaddr_in6 *sin6;
+
+			sin6 = (struct sockaddr_in6 *)&encaps->sue_address;
+			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+				in6_sin6_2_sin(&sin_store, sin6);
+				addr = (struct sockaddr *)&sin_store;
+			} else {
+				addr = (struct sockaddr *)&encaps->sue_address;
+			}
+		} else {
+			addr = (struct sockaddr *)&encaps->sue_address;
+		}
+#else
+		addr = (struct sockaddr *)&encaps->sue_address;
+#endif
+		if (stcb != NULL) {
+			net = sctp_findnet(stcb, addr);
+		} else {
+			/* We increment here since sctp_findassociation_ep_addr() wil
+			 * do a decrement if it finds the stcb as long as the locked
+			 * tcb (last argument) is NOT a TCB.. aka NULL.
+			 */
+			net = NULL;
+			SCTP_INP_INCR_REF(inp);
+			stcb = sctp_findassociation_ep_addr(&inp, addr, &net, NULL, NULL);
+			if (stcb == NULL) {
+				SCTP_INP_DECR_REF(inp);
+			}
+		}
+		if ((stcb != NULL) && (net == NULL)) {
+#ifdef INET
+			if (addr->sa_family == AF_INET) {
+
+				struct sockaddr_in *sin;
+				sin = (struct sockaddr_in *)addr;
+				if (sin->sin_addr.s_addr != INADDR_ANY) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					SCTP_TCB_UNLOCK(stcb);
+					error = EINVAL;
+					break;
+				}
+			} else
+#endif
+#ifdef INET6
+			if (addr->sa_family == AF_INET6) {
+				struct sockaddr_in6 *sin6;
+
+				sin6 = (struct sockaddr_in6 *)addr;
+				if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					SCTP_TCB_UNLOCK(stcb);
+					error = EINVAL;
+					break;
+				}
+			} else
+#endif
+#if defined(__Userspace__)
+			if (addr->sa_family == AF_CONN) {
+				struct sockaddr_conn *sconn;
+
+				sconn = (struct sockaddr_conn *)addr;
+				if (sconn->sconn_addr != NULL) {
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					SCTP_TCB_UNLOCK(stcb);
+					error = EINVAL;
+					break;
+				}
+			} else
+#endif
+			{
+					error = EAFNOSUPPORT;
+					SCTP_TCB_UNLOCK(stcb);
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+					break;
+				}
+		}
+
+		if (stcb != NULL) {
+			if (net != NULL) {
+				net->port = encaps->sue_port;
+			} else {
+				stcb->asoc.port = encaps->sue_port;
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (encaps->sue_assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				inp->sctp_ep.port = encaps->sue_port;
+				SCTP_INP_WUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		break;
+	}
+	case SCTP_ECN_SUPPORTED:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				if (av->assoc_value == 0) {
+					inp->ecn_supported = 0;
+				} else {
+					inp->ecn_supported = 1;
+				}
+				SCTP_INP_WUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		break;
+	}
+	case SCTP_PR_SUPPORTED:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				if (av->assoc_value == 0) {
+					inp->prsctp_supported = 0;
+				} else {
+					inp->prsctp_supported = 1;
+				}
+				SCTP_INP_WUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		break;
+	}
+	case SCTP_AUTH_SUPPORTED:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				if ((av->assoc_value == 0) &&
+				    (inp->asconf_supported == 1)) {
+				    	/* AUTH is required for ASCONF */
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					error = EINVAL;
+				} else {
+					SCTP_INP_WLOCK(inp);
+					if (av->assoc_value == 0) {
+						inp->auth_supported = 0;
+					} else {
+						inp->auth_supported = 1;
+					}
+					SCTP_INP_WUNLOCK(inp);
+				}
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		break;
+	}
+	case SCTP_ASCONF_SUPPORTED:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				if ((av->assoc_value != 0) &&
+				    (inp->auth_supported == 0)) {
+				    	/* AUTH is required for ASCONF */
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+					error = EINVAL;
+				} else {
+					SCTP_INP_WLOCK(inp);
+					if (av->assoc_value == 0) {
+						inp->asconf_supported = 0;
+						sctp_auth_delete_chunk(SCTP_ASCONF,
+						                       inp->sctp_ep.local_auth_chunks);
+						sctp_auth_delete_chunk(SCTP_ASCONF_ACK,
+						                       inp->sctp_ep.local_auth_chunks);
+					} else {
+						inp->asconf_supported = 1;
+						sctp_auth_add_chunk(SCTP_ASCONF,
+						                    inp->sctp_ep.local_auth_chunks);
+						sctp_auth_add_chunk(SCTP_ASCONF_ACK,
+						                    inp->sctp_ep.local_auth_chunks);
+					}
+					SCTP_INP_WUNLOCK(inp);
+				}
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		break;
+	}
+	case SCTP_RECONFIG_SUPPORTED:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				if (av->assoc_value == 0) {
+					inp->reconfig_supported = 0;
+				} else {
+					inp->reconfig_supported = 1;
+				}
+				SCTP_INP_WUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		break;
+	}
+	case SCTP_NRSACK_SUPPORTED:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				if (av->assoc_value == 0) {
+					inp->nrsack_supported = 0;
+				} else {
+					inp->nrsack_supported = 1;
+				}
+				SCTP_INP_WUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		break;
+	}
+	case SCTP_PKTDROP_SUPPORTED:
+	{
+		struct sctp_assoc_value *av;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			error = EINVAL;
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				if (av->assoc_value == 0) {
+					inp->pktdrop_supported = 0;
+				} else {
+					inp->pktdrop_supported = 1;
+				}
+				SCTP_INP_WUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		break;
+	}
+	case SCTP_MAX_CWND:
+	{
+		struct sctp_assoc_value *av;
+		struct sctp_nets *net;
+
+		SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
+		SCTP_FIND_STCB(inp, stcb, av->assoc_id);
+
+		if (stcb) {
+			stcb->asoc.max_cwnd = av->assoc_value;
+			if (stcb->asoc.max_cwnd > 0) {
+				TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+					if ((net->cwnd > stcb->asoc.max_cwnd) &&
+					    (net->cwnd > (net->mtu - sizeof(struct sctphdr)))) {
+						net->cwnd = stcb->asoc.max_cwnd;
+						if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
+							net->cwnd = net->mtu - sizeof(struct sctphdr);
+						}
+					}
+				}
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+			    (av->assoc_id == SCTP_FUTURE_ASSOC)) {
+				SCTP_INP_WLOCK(inp);
+				inp->max_cwnd = av->assoc_value;
+				SCTP_INP_WUNLOCK(inp);
+			} else {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+				error = EINVAL;
+			}
+		}
+		break;
+	}
+	default:
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
+		error = ENOPROTOOPT;
+		break;
+	} /* end switch (opt) */
+	return (error);
+}
+
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+int
+sctp_ctloutput(struct socket *so, struct sockopt *sopt)
+{
+	void *optval = NULL;
+	size_t optsize = 0;
+	void *p;
+	int error = 0;
+#if defined(__FreeBSD__)
+	struct sctp_inpcb *inp;
+#endif
+
+#if defined(__FreeBSD__)
+	if ((sopt->sopt_level == SOL_SOCKET) &&
+	    (sopt->sopt_name == SO_SETFIB)) {
+		inp = (struct sctp_inpcb *)so->so_pcb;
+		if (inp == NULL) {
+			SCTP_LTRACE_ERR_RET(so->so_pcb, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS);
+			return (EINVAL);
+		}
+		SCTP_INP_WLOCK(inp);
+		inp->fibnum = so->so_fibnum;
+		SCTP_INP_WUNLOCK(inp);
+		return (0);
+	}
+#endif
+	if (sopt->sopt_level != IPPROTO_SCTP) {
+		/* wrong proto level... send back up to IP */
+#ifdef INET6
+		if (INP_CHECK_SOCKAF(so, AF_INET6))
+			error = ip6_ctloutput(so, sopt);
+#endif				/* INET6 */
+#if defined(INET) && defined(INET6)
+		else
+#endif
+#ifdef INET
+			error = ip_ctloutput(so, sopt);
+#endif
+		return (error);
+	}
+	optsize = sopt->sopt_valsize;
+	if (optsize) {
+		SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT);
+		if (optval == NULL) {
+			SCTP_LTRACE_ERR_RET(so->so_pcb, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS);
+			return (ENOBUFS);
+		}
+		error = sooptcopyin(sopt, optval, optsize, optsize);
+		if (error) {
+			SCTP_FREE(optval, SCTP_M_SOCKOPT);
+			goto out;
+		}
+	}
+#if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__Windows__)
+	p = (void *)sopt->sopt_td;
+#else
+	p = (void *)sopt->sopt_p;
+#endif
+	if (sopt->sopt_dir == SOPT_SET) {
+		error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p);
+	} else if (sopt->sopt_dir == SOPT_GET) {
+		error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p);
+	} else {
+		SCTP_LTRACE_ERR_RET(so->so_pcb, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		error = EINVAL;
+	}
+	if ((error == 0) && (optval != NULL)) {
+		error = sooptcopyout(sopt, optval, optsize);
+		SCTP_FREE(optval, SCTP_M_SOCKOPT);
+	} else if (optval != NULL) {
+		SCTP_FREE(optval, SCTP_M_SOCKOPT);
+	}
+out:
+	return (error);
+}
+#endif
+
+#ifdef INET
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+static int
+sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
+{
+#else
+#if defined(__FreeBSD__) || defined(__APPLE__)
+static int
+sctp_connect(struct socket *so, struct sockaddr *addr, struct proc *p)
+{
+#elif defined(__Panda__) || defined(__Userspace__)
+int
+sctp_connect(struct socket *so, struct sockaddr *addr)
+{
+	void *p = NULL;
+#elif defined(__Windows__)
+static int
+sctp_connect(struct socket *so, struct sockaddr *addr, PKTHREAD p)
+{
+#else
+static int
+sctp_connect(struct socket *so, struct mbuf *nam, struct proc *p)
+{
+	struct sockaddr *addr = mtod(nam, struct sockaddr *);
+
+#endif
+#endif
+#ifdef SCTP_MVRF
+	int i, fnd = 0;
+#endif
+	int error = 0;
+	int create_lock_on = 0;
+	uint32_t vrf_id;
+	struct sctp_inpcb *inp;
+	struct sctp_tcb *stcb = NULL;
+
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp == NULL) {
+		/* I made the same as TCP since we are not setup? */
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (ECONNRESET);
+	}
+	if (addr == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return EINVAL;
+	}
+
+#if defined(__Userspace__)
+        /* TODO __Userspace__ falls into this code for IPv6 stuff at the moment... */
+#endif
+#if !defined(__Windows__) && !defined(__Userspace_os_Linux) && !defined(__Userspace_os_Windows)
+	switch (addr->sa_family) {
+#ifdef INET6
+	case AF_INET6:
+	{
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+		struct sockaddr_in6 *sin6p;
+
+#endif
+		if (addr->sa_len != sizeof(struct sockaddr_in6)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			return (EINVAL);
+		}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+		sin6p = (struct sockaddr_in6 *)addr;
+		if (p != NULL && (error = prison_remote_ip6(p->td_ucred, &sin6p->sin6_addr)) != 0) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+			return (error);
+		}
+#endif
+		break;
+	}
+#endif
+#ifdef INET
+	case AF_INET:
+	{
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+		struct sockaddr_in *sinp;
+
+#endif
+#if !defined(__Userspace_os_Windows)
+		if (addr->sa_len != sizeof(struct sockaddr_in)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			return (EINVAL);
+		}
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+		sinp = (struct sockaddr_in *)addr;
+		if (p != NULL && (error = prison_remote_ip4(p->td_ucred, &sinp->sin_addr)) != 0) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
+			return (error);
+		}
+#endif
+		break;
+	}
+#endif
+	default:
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT);
+		return (EAFNOSUPPORT);
+	}
+#endif
+	SCTP_INP_INCR_REF(inp);
+	SCTP_ASOC_CREATE_LOCK(inp);
+	create_lock_on = 1;
+
+
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+		/* Should I really unlock ? */
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
+	        error = EFAULT;
+		goto out_now;
+	}
+#ifdef INET6
+	if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
+	    (addr->sa_family == AF_INET6)) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		error = EINVAL;
+		goto out_now;
+	}
+#endif
+#if defined(__Userspace__)
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) &&
+	    (addr->sa_family != AF_CONN)) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		error = EINVAL;
+		goto out_now;
+	}
+#endif
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
+	    SCTP_PCB_FLAGS_UNBOUND) {
+		/* Bind a ephemeral port */
+		error = sctp_inpcb_bind(so, NULL, NULL, p);
+		if (error) {
+			goto out_now;
+		}
+	}
+	/* Now do we connect? */
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
+	    (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		error = EINVAL;
+		goto out_now;
+	}
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
+		/* We are already connected AND the TCP model */
+		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
+		error = EADDRINUSE;
+		goto out_now;
+	}
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+		SCTP_INP_RLOCK(inp);
+		stcb = LIST_FIRST(&inp->sctp_asoc_list);
+		SCTP_INP_RUNLOCK(inp);
+	} else {
+		/* We increment here since sctp_findassociation_ep_addr() will
+		 * do a decrement if it finds the stcb as long as the locked
+		 * tcb (last argument) is NOT a TCB.. aka NULL.
+		 */
+		SCTP_INP_INCR_REF(inp);
+		stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
+		if (stcb == NULL) {
+			SCTP_INP_DECR_REF(inp);
+		} else {
+			SCTP_TCB_UNLOCK(stcb);
+		}
+	}
+	if (stcb != NULL) {
+		/* Already have or am bring up an association */
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+		error = EALREADY;
+		goto out_now;
+	}
+
+	vrf_id = inp->def_vrf_id;
+#ifdef SCTP_MVRF
+	for (i = 0; i < inp->num_vrfs; i++) {
+		if (vrf_id == inp->m_vrf_ids[i]) {
+			fnd = 1;
+			break;
+		}
+	}
+	if (!fnd) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		error = EINVAL;
+		goto out_now;
+	}
+#endif
+	/* We are GOOD to go */
+	stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
+	                       inp->sctp_ep.pre_open_stream_count,
+	                       inp->sctp_ep.port, p);
+	if (stcb == NULL) {
+		/* Gak! no memory */
+		goto out_now;
+	}
+	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+		/* Set the connected flag so we can queue data */
+		soisconnecting(so);
+	}
+	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
+	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+
+	/* initialize authentication parameters for the assoc */
+	sctp_initialize_auth_params(inp, stcb);
+
+	sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+	SCTP_TCB_UNLOCK(stcb);
+ out_now:
+	if (create_lock_on) {
+		SCTP_ASOC_CREATE_UNLOCK(inp);
+	}
+
+	SCTP_INP_DECR_REF(inp);
+	return (error);
+}
+#endif
+
+#if defined(__Userspace__)
+int
+sctpconn_connect(struct socket *so, struct sockaddr *addr)
+{
+#ifdef SCTP_MVRF
+	int i, fnd = 0;
+#endif
+	void *p = NULL;
+	int error = 0;
+	int create_lock_on = 0;
+	uint32_t vrf_id;
+	struct sctp_inpcb *inp;
+	struct sctp_tcb *stcb = NULL;
+
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp == NULL) {
+		/* I made the same as TCP since we are not setup? */
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (ECONNRESET);
+	}
+	if (addr == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return EINVAL;
+	}
+	switch (addr->sa_family) {
+#ifdef INET
+	case AF_INET:
+#ifdef HAVE_SA_LEN
+		if (addr->sa_len != sizeof(struct sockaddr_in)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			return (EINVAL);
+		}
+#endif
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+#ifdef HAVE_SA_LEN
+		if (addr->sa_len != sizeof(struct sockaddr_in6)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			return (EINVAL);
+		}
+#endif
+		break;
+#endif
+	case AF_CONN:
+#ifdef HAVE_SA_LEN
+		if (addr->sa_len != sizeof(struct sockaddr_conn)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			return (EINVAL);
+		}
+#endif
+		break;
+	default:
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT);
+		return (EAFNOSUPPORT);
+	}
+	SCTP_INP_INCR_REF(inp);
+	SCTP_ASOC_CREATE_LOCK(inp);
+	create_lock_on = 1;
+
+
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+		/* Should I really unlock ? */
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
+	        error = EFAULT;
+		goto out_now;
+	}
+#ifdef INET6
+	if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
+	    (addr->sa_family == AF_INET6)) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		error = EINVAL;
+		goto out_now;
+	}
+#endif
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == SCTP_PCB_FLAGS_UNBOUND) {
+		/* Bind a ephemeral port */
+		error = sctp_inpcb_bind(so, NULL, NULL, p);
+		if (error) {
+			goto out_now;
+		}
+	}
+	/* Now do we connect? */
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
+	    (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		error = EINVAL;
+		goto out_now;
+	}
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
+		/* We are already connected AND the TCP model */
+		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
+		error = EADDRINUSE;
+		goto out_now;
+	}
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+		SCTP_INP_RLOCK(inp);
+		stcb = LIST_FIRST(&inp->sctp_asoc_list);
+		SCTP_INP_RUNLOCK(inp);
+	} else {
+		/* We increment here since sctp_findassociation_ep_addr() will
+		 * do a decrement if it finds the stcb as long as the locked
+		 * tcb (last argument) is NOT a TCB.. aka NULL.
+		 */
+		SCTP_INP_INCR_REF(inp);
+		stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
+		if (stcb == NULL) {
+			SCTP_INP_DECR_REF(inp);
+		} else {
+			SCTP_TCB_UNLOCK(stcb);
+		}
+	}
+	if (stcb != NULL) {
+		/* Already have or am bring up an association */
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
+		error = EALREADY;
+		goto out_now;
+	}
+
+	vrf_id = inp->def_vrf_id;
+#ifdef SCTP_MVRF
+	for (i = 0; i < inp->num_vrfs; i++) {
+		if (vrf_id == inp->m_vrf_ids[i]) {
+			fnd = 1;
+			break;
+		}
+	}
+	if (!fnd) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		error = EINVAL;
+		goto out_now;
+	}
+#endif
+	/* We are GOOD to go */
+	stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
+	                       inp->sctp_ep.pre_open_stream_count,
+	                       inp->sctp_ep.port, p);
+	if (stcb == NULL) {
+		/* Gak! no memory */
+		goto out_now;
+	}
+	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
+		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
+		/* Set the connected flag so we can queue data */
+		soisconnecting(so);
+	}
+	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
+	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
+
+	/* initialize authentication parameters for the assoc */
+	sctp_initialize_auth_params(inp, stcb);
+
+	sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
+	SCTP_TCB_UNLOCK(stcb);
+ out_now:
+	if (create_lock_on) {
+		SCTP_ASOC_CREATE_UNLOCK(inp);
+	}
+
+	SCTP_INP_DECR_REF(inp);
+	return (error);
+}
+#endif
+int
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+#if __FreeBSD_version >= 700000
+sctp_listen(struct socket *so, int backlog, struct thread *p)
+#else
+sctp_listen(struct socket *so, struct thread *p)
+#endif
+#elif defined(__Windows__)
+sctp_listen(struct socket *so, int backlog, PKTHREAD p)
+#elif defined(__Userspace__)
+sctp_listen(struct socket *so, int backlog, struct proc *p)
+#else
+sctp_listen(struct socket *so, struct proc *p)
+#endif
+{
+	/*
+	 * Note this module depends on the protocol processing being called
+	 * AFTER any socket level flags and backlog are applied to the
+	 * socket. The traditional way that the socket flags are applied is
+	 * AFTER protocol processing. We have made a change to the
+	 * sys/kern/uipc_socket.c module to reverse this but this MUST be in
+	 * place if the socket API for SCTP is to work properly.
+	 */
+
+	int error = 0;
+	struct sctp_inpcb *inp;
+
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp == NULL) {
+		/* I made the same as TCP since we are not setup? */
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (ECONNRESET);
+	}
+	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) {
+		/* See if we have a listener */
+		struct sctp_inpcb *tinp;
+		union sctp_sockstore store;
+
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
+			/* not bound all */
+			struct sctp_laddr *laddr;
+
+			LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+				memcpy(&store, &laddr->ifa->address, sizeof(store));
+				switch (store.sa.sa_family) {
+#ifdef INET
+				case AF_INET:
+					store.sin.sin_port = inp->sctp_lport;
+					break;
+#endif
+#ifdef INET6
+				case AF_INET6:
+					store.sin6.sin6_port = inp->sctp_lport;
+					break;
+#endif
+#if defined(__Userspace__)
+				case AF_CONN:
+					store.sconn.sconn_port = inp->sctp_lport;
+					break;
+#endif
+				default:
+					break;
+				}
+				tinp = sctp_pcb_findep(&store.sa, 0, 0, inp->def_vrf_id);
+				if (tinp && (tinp != inp) &&
+				    ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
+				    ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+				    (tinp->sctp_socket->so_qlimit)) {
+					/* we have a listener already and its not this inp. */
+					SCTP_INP_DECR_REF(tinp);
+					return (EADDRINUSE);
+				} else if (tinp) {
+					SCTP_INP_DECR_REF(tinp);
+				}
+			}
+		} else {
+			/* Setup a local addr bound all */
+			memset(&store, 0, sizeof(store));
+#ifdef INET6
+			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+				store.sa.sa_family = AF_INET6;
+#ifdef HAVE_SA_LEN
+				store.sa.sa_len = sizeof(struct sockaddr_in6);
+#endif
+			}
+#endif
+#if defined(__Userspace__)
+			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
+				store.sa.sa_family = AF_CONN;
+#ifdef HAVE_SA_LEN
+				store.sa.sa_len = sizeof(struct sockaddr_conn);
+#endif
+			}
+#endif
+#ifdef INET
+#if defined(__Userspace__)
+			if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
+			    ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) == 0)) {
+#else
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+#endif
+				store.sa.sa_family = AF_INET;
+#ifdef HAVE_SA_LEN
+				store.sa.sa_len = sizeof(struct sockaddr_in);
+#endif
+			}
+#endif
+			switch (store.sa.sa_family) {
+#ifdef INET
+			case AF_INET:
+				store.sin.sin_port = inp->sctp_lport;
+				break;
+#endif
+#ifdef INET6
+			case AF_INET6:
+				store.sin6.sin6_port = inp->sctp_lport;
+				break;
+#endif
+#if defined(__Userspace__)
+			case AF_CONN:
+				store.sconn.sconn_port = inp->sctp_lport;
+				break;
+#endif
+			default:
+				break;
+			}
+			tinp = sctp_pcb_findep(&store.sa, 0, 0, inp->def_vrf_id);
+			if (tinp && (tinp != inp) &&
+			    ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
+			    ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
+			    (tinp->sctp_socket->so_qlimit)) {
+				/* we have a listener already and its not this inp. */
+				SCTP_INP_DECR_REF(tinp);
+				return (EADDRINUSE);
+			} else if (tinp) {
+				SCTP_INP_DECR_REF(tinp);
+			}
+		}
+	}
+	SCTP_INP_RLOCK(inp);
+#ifdef SCTP_LOCK_LOGGING
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) {
+		sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
+	}
+#endif
+	SOCK_LOCK(so);
+#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Userspace__)
+	error = solisten_proto_check(so);
+	SOCK_UNLOCK(so);
+	if (error) {
+		SCTP_INP_RUNLOCK(inp);
+		return (error);
+	}
+#endif
+	if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+		/* The unlucky case
+		 * - We are in the tcp pool with this guy.
+		 * - Someone else is in the main inp slot.
+		 * - We must move this guy (the listener) to the main slot
+		 * - We must then move the guy that was listener to the TCP Pool.
+		 */
+		if (sctp_swap_inpcb_for_listen(inp)) {
+			SCTP_INP_RUNLOCK(inp);
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
+			return (EADDRINUSE);
+		}
+	}
+
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
+		/* We are already connected AND the TCP model */
+		SCTP_INP_RUNLOCK(inp);
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
+		return (EADDRINUSE);
+	}
+	SCTP_INP_RUNLOCK(inp);
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
+		/* We must do a bind. */
+		if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) {
+			/* bind error, probably perm */
+			return (error);
+		}
+	}
+	SOCK_LOCK(so);
+#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__) || defined(__Userspace__)
+#if __FreeBSD_version >= 700000 || defined(__Windows__) || defined(__Userspace__)
+	/* It appears for 7.0 and on, we must always call this. */
+	solisten_proto(so, backlog);
+#else
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) == 0) {
+		solisten_proto(so);
+	}
+#endif
+#endif
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
+		/* remove the ACCEPTCONN flag for one-to-many sockets */
+#if defined(__Userspace__)
+		so->so_options &= ~SCTP_SO_ACCEPTCONN;
+#else
+		so->so_options &= ~SO_ACCEPTCONN;
+#endif
+	}
+
+#if __FreeBSD_version >= 700000 || defined(__Windows__) || defined(__Userspace__)
+	if (backlog == 0) {
+		/* turning off listen */
+#if defined(__Userspace__)
+		so->so_options &= ~SCTP_SO_ACCEPTCONN;
+#else
+		so->so_options &= ~SO_ACCEPTCONN;
+#endif
+	}
+#endif
+	SOCK_UNLOCK(so);
+	return (error);
+}
+
+static int sctp_defered_wakeup_cnt = 0;
+
+int
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)
+sctp_accept(struct socket *so, struct sockaddr **addr)
+{
+#elif defined(__Panda__)
+sctp_accept(struct socket *so, struct sockaddr *addr, int *namelen,
+	    void *accept_info, int *accept_info_len)
+{
+#else
+sctp_accept(struct socket *so, struct mbuf *nam)
+{
+	struct sockaddr *addr = mtod(nam, struct sockaddr *);
+#endif
+	struct sctp_tcb *stcb;
+	struct sctp_inpcb *inp;
+	union sctp_sockstore store;
+#ifdef INET6
+#ifdef SCTP_KAME
+	int error;
+#endif /* SCTP_KAME */
+#endif
+	inp = (struct sctp_inpcb *)so->so_pcb;
+
+	if (inp == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (ECONNRESET);
+	}
+	SCTP_INP_RLOCK(inp);
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
+		SCTP_INP_RUNLOCK(inp);
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
+		return (EOPNOTSUPP);
+	}
+	if (so->so_state & SS_ISDISCONNECTED) {
+		SCTP_INP_RUNLOCK(inp);
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED);
+		return (ECONNABORTED);
+	}
+	stcb = LIST_FIRST(&inp->sctp_asoc_list);
+	if (stcb == NULL) {
+		SCTP_INP_RUNLOCK(inp);
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (ECONNRESET);
+	}
+	SCTP_TCB_LOCK(stcb);
+	SCTP_INP_RUNLOCK(inp);
+	store = stcb->asoc.primary_destination->ro._l_addr;
+	stcb->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE;
+	SCTP_TCB_UNLOCK(stcb);
+	switch (store.sa.sa_family) {
+#ifdef INET
+	case AF_INET:
+	{
+		struct sockaddr_in *sin;
+
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)
+		SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
+		if (sin == NULL)
+			return (ENOMEM);
+#else
+		sin = (struct sockaddr_in *)addr;
+		bzero((caddr_t)sin, sizeof(*sin));
+#endif
+		sin->sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+		sin->sin_len = sizeof(*sin);
+#endif
+		sin->sin_port = store.sin.sin_port;
+		sin->sin_addr = store.sin.sin_addr;
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)
+		*addr = (struct sockaddr *)sin;
+#elif !defined(__Panda__)
+		SCTP_BUF_LEN(nam) = sizeof(*sin);
+#endif
+		break;
+	}
+#endif
+#ifdef INET6
+	case AF_INET6:
+	{
+		struct sockaddr_in6 *sin6;
+
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)
+		SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
+		if (sin6 == NULL)
+			return (ENOMEM);
+#else
+		sin6 = (struct sockaddr_in6 *)addr;
+		bzero((caddr_t)sin6, sizeof(*sin6));
+#endif
+		sin6->sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+		sin6->sin6_len = sizeof(*sin6);
+#endif
+		sin6->sin6_port = store.sin6.sin6_port;
+		sin6->sin6_addr = store.sin6.sin6_addr;
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+#ifdef SCTP_KAME
+		if ((error = sa6_recoverscope(sin6)) != 0) {
+			SCTP_FREE_SONAME(sin6);
+			return (error);
+		}
+#else
+		if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr))
+			/*
+			 * sin6->sin6_scope_id =
+			 * ntohs(sin6->sin6_addr.s6_addr16[1]);
+			 */
+			in6_recoverscope(sin6, &sin6->sin6_addr, NULL);	/* skip ifp check */
+		else
+			sin6->sin6_scope_id = 0;	/* XXX */
+#endif /* SCTP_KAME */
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)
+		*addr = (struct sockaddr *)sin6;
+#elif !defined(__Panda__)
+		SCTP_BUF_LEN(nam) = sizeof(*sin6);
+#endif
+		break;
+	}
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+	{
+		struct sockaddr_conn *sconn;
+
+		SCTP_MALLOC_SONAME(sconn, struct sockaddr_conn *, sizeof(struct sockaddr_conn));
+		if (sconn == NULL) {
+			return (ENOMEM);
+		}
+		sconn->sconn_family = AF_CONN;
+#ifdef HAVE_SCONN_LEN
+		sconn->sconn_len = sizeof(struct sockaddr_conn);
+#endif
+		sconn->sconn_port = store.sconn.sconn_port;
+		sconn->sconn_addr = store.sconn.sconn_addr;
+		*addr = (struct sockaddr *)sconn;
+		break;
+	}
+#endif
+	default:
+		/* TSNH */
+		break;
+	}
+	/* Wake any delayed sleep action */
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
+		SCTP_INP_WLOCK(inp);
+		inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
+			inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
+			SCTP_INP_WUNLOCK(inp);
+			SOCKBUF_LOCK(&inp->sctp_socket->so_snd);
+			if (sowriteable(inp->sctp_socket)) {
+#if defined(__Userspace__)
+                            /*__Userspace__ calling sowwakup_locked because of SOCKBUF_LOCK above. */
+#endif
+#if defined(__FreeBSD__) || defined(__Windows__) || defined(__Userspace__)
+				sowwakeup_locked(inp->sctp_socket);
+#else
+#if defined(__APPLE__)
+				/* socket is locked */
+#endif
+				sowwakeup(inp->sctp_socket);
+#endif
+			} else {
+				SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd);
+			}
+			SCTP_INP_WLOCK(inp);
+		}
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
+			inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
+			SCTP_INP_WUNLOCK(inp);
+			SOCKBUF_LOCK(&inp->sctp_socket->so_rcv);
+			if (soreadable(inp->sctp_socket)) {
+				sctp_defered_wakeup_cnt++;
+#if defined(__Userspace__)
+                                /*__Userspace__ calling sorwakup_locked because of SOCKBUF_LOCK above */
+#endif
+#if defined(__FreeBSD__) || defined(__Windows__) || defined(__Userspace__)
+				sorwakeup_locked(inp->sctp_socket);
+#else
+#if defined(__APPLE__)
+				/* socket is locked */
+#endif
+				sorwakeup(inp->sctp_socket);
+#endif
+			} else {
+				SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv);
+			}
+			SCTP_INP_WLOCK(inp);
+		}
+		SCTP_INP_WUNLOCK(inp);
+	}
+	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+		SCTP_TCB_LOCK(stcb);
+		sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+		                SCTP_FROM_SCTP_USRREQ + SCTP_LOC_19);
+	}
+	return (0);
+}
+
+#ifdef INET
+int
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+sctp_ingetaddr(struct socket *so, struct sockaddr **addr)
+{
+	struct sockaddr_in *sin;
+#elif defined(__Panda__)
+sctp_ingetaddr(struct socket *so, struct sockaddr *addr)
+{
+	struct sockaddr_in *sin = (struct sockaddr_in *)addr;
+#else
+sctp_ingetaddr(struct socket *so, struct mbuf *nam)
+{
+	struct sockaddr_in *sin = mtod(nam, struct sockaddr_in *);
+#endif
+	uint32_t vrf_id;
+	struct sctp_inpcb *inp;
+	struct sctp_ifa *sctp_ifa;
+
+	/*
+	 * Do the malloc first in case it blocks.
+	 */
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+	SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
+	if (sin == NULL)
+		return (ENOMEM);
+#elif defined(__Panda__)
+	bzero(sin, sizeof(*sin));
+#else
+	SCTP_BUF_LEN(nam) = sizeof(*sin);
+	memset(sin, 0, sizeof(*sin));
+#endif
+	sin->sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+	sin->sin_len = sizeof(*sin);
+#endif
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (!inp) {
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+		SCTP_FREE_SONAME(sin);
+#endif
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (ECONNRESET);
+	}
+	SCTP_INP_RLOCK(inp);
+	sin->sin_port = inp->sctp_lport;
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
+			struct sctp_tcb *stcb;
+			struct sockaddr_in *sin_a;
+			struct sctp_nets *net;
+			int fnd;
+
+			stcb = LIST_FIRST(&inp->sctp_asoc_list);
+			if (stcb == NULL) {
+				goto notConn;
+			}
+			fnd = 0;
+			sin_a = NULL;
+			SCTP_TCB_LOCK(stcb);
+			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+				sin_a = (struct sockaddr_in *)&net->ro._l_addr;
+				if (sin_a == NULL)
+					/* this will make coverity happy */
+					continue;
+
+				if (sin_a->sin_family == AF_INET) {
+					fnd = 1;
+					break;
+				}
+			}
+			if ((!fnd) || (sin_a == NULL)) {
+				/* punt */
+				SCTP_TCB_UNLOCK(stcb);
+				goto notConn;
+			}
+
+			vrf_id = inp->def_vrf_id;
+			sctp_ifa = sctp_source_address_selection(inp,
+								 stcb,
+								 (sctp_route_t *)&net->ro,
+								 net, 0, vrf_id);
+			if (sctp_ifa) {
+				sin->sin_addr = sctp_ifa->address.sin.sin_addr;
+				sctp_free_ifa(sctp_ifa);
+			}
+			SCTP_TCB_UNLOCK(stcb);
+		} else {
+			/* For the bound all case you get back 0 */
+	notConn:
+			sin->sin_addr.s_addr = 0;
+		}
+
+	} else {
+		/* Take the first IPv4 address in the list */
+		struct sctp_laddr *laddr;
+		int fnd = 0;
+
+		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+			if (laddr->ifa->address.sa.sa_family == AF_INET) {
+				struct sockaddr_in *sin_a;
+
+				sin_a = &laddr->ifa->address.sin;
+				sin->sin_addr = sin_a->sin_addr;
+				fnd = 1;
+				break;
+			}
+		}
+		if (!fnd) {
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+			SCTP_FREE_SONAME(sin);
+#endif
+			SCTP_INP_RUNLOCK(inp);
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+			return (ENOENT);
+		}
+	}
+	SCTP_INP_RUNLOCK(inp);
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+	(*addr) = (struct sockaddr *)sin;
+#endif
+	return (0);
+}
+
+int
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+sctp_peeraddr(struct socket *so, struct sockaddr **addr)
+{
+	struct sockaddr_in *sin;
+#elif defined(__Panda__)
+sctp_peeraddr(struct socket *so, struct sockaddr *addr)
+{
+	struct sockaddr_in *sin = (struct sockaddr_in *)addr;
+#else
+sctp_peeraddr(struct socket *so, struct mbuf *nam)
+{
+	struct sockaddr_in *sin = mtod(nam, struct sockaddr_in *);
+
+#endif
+	int fnd;
+	struct sockaddr_in *sin_a;
+	struct sctp_inpcb *inp;
+	struct sctp_tcb *stcb;
+	struct sctp_nets *net;
+
+	/* Do the malloc first in case it blocks. */
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+	SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
+	if (sin == NULL)
+		return (ENOMEM);
+#elif defined(__Panda__)
+	memset(sin, 0, sizeof(*sin));
+#else
+	SCTP_BUF_LEN(nam) = sizeof(*sin);
+	memset(sin, 0, sizeof(*sin));
+#endif
+	sin->sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+	sin->sin_len = sizeof(*sin);
+#endif
+
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if ((inp == NULL) ||
+	    ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
+		/* UDP type and listeners will drop out here */
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+		SCTP_FREE_SONAME(sin);
+#endif
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
+		return (ENOTCONN);
+	}
+	SCTP_INP_RLOCK(inp);
+	stcb = LIST_FIRST(&inp->sctp_asoc_list);
+	if (stcb) {
+		SCTP_TCB_LOCK(stcb);
+	}
+	SCTP_INP_RUNLOCK(inp);
+	if (stcb == NULL) {
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+		SCTP_FREE_SONAME(sin);
+#endif
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+		return (ECONNRESET);
+	}
+	fnd = 0;
+	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
+		sin_a = (struct sockaddr_in *)&net->ro._l_addr;
+		if (sin_a->sin_family == AF_INET) {
+			fnd = 1;
+			sin->sin_port = stcb->rport;
+			sin->sin_addr = sin_a->sin_addr;
+			break;
+		}
+	}
+	SCTP_TCB_UNLOCK(stcb);
+	if (!fnd) {
+		/* No IPv4 address */
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+		SCTP_FREE_SONAME(sin);
+#endif
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
+		return (ENOENT);
+	}
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+	(*addr) = (struct sockaddr *)sin;
+#endif
+	return (0);
+}
+
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+struct pr_usrreqs sctp_usrreqs = {
+#if defined(__FreeBSD__)
+	.pru_abort = sctp_abort,
+	.pru_accept = sctp_accept,
+	.pru_attach = sctp_attach,
+	.pru_bind = sctp_bind,
+	.pru_connect = sctp_connect,
+	.pru_control = in_control,
+#if __FreeBSD_version >= 690000
+	.pru_close = sctp_close,
+	.pru_detach = sctp_close,
+	.pru_sopoll = sopoll_generic,
+	.pru_flush = sctp_flush,
+#else
+	.pru_detach = sctp_detach,
+	.pru_sopoll = sopoll,
+#endif
+	.pru_disconnect = sctp_disconnect,
+	.pru_listen = sctp_listen,
+	.pru_peeraddr = sctp_peeraddr,
+	.pru_send = sctp_sendm,
+	.pru_shutdown = sctp_shutdown,
+	.pru_sockaddr = sctp_ingetaddr,
+	.pru_sosend = sctp_sosend,
+	.pru_soreceive = sctp_soreceive
+#elif defined(__APPLE__)
+	.pru_abort = sctp_abort,
+	.pru_accept = sctp_accept,
+	.pru_attach = sctp_attach,
+	.pru_bind = sctp_bind,
+	.pru_connect = sctp_connect,
+	.pru_connect2 = pru_connect2_notsupp,
+	.pru_control = in_control,
+	.pru_detach = sctp_detach,
+	.pru_disconnect = sctp_disconnect,
+	.pru_listen = sctp_listen,
+	.pru_peeraddr = sctp_peeraddr,
+	.pru_rcvd = NULL,
+	.pru_rcvoob = pru_rcvoob_notsupp,
+	.pru_send = sctp_sendm,
+	.pru_sense = pru_sense_null,
+	.pru_shutdown = sctp_shutdown,
+	.pru_sockaddr = sctp_ingetaddr,
+	.pru_sosend = sctp_sosend,
+	.pru_soreceive = sctp_soreceive,
+	.pru_sopoll = sopoll
+#elif defined(__Windows__)
+	sctp_abort,
+	sctp_accept,
+	sctp_attach,
+	sctp_bind,
+	sctp_connect,
+	pru_connect2_notsupp,
+	NULL,
+	NULL,
+	sctp_disconnect,
+	sctp_listen,
+	sctp_peeraddr,
+	NULL,
+	pru_rcvoob_notsupp,
+	NULL,
+	pru_sense_null,
+	sctp_shutdown,
+	sctp_flush,
+	sctp_ingetaddr,
+	sctp_sosend,
+	sctp_soreceive,
+	sopoll_generic,
+	NULL,
+	sctp_close
+#endif
+};
+#elif !defined(__Panda__) && !defined(__Userspace__)
+int
+sctp_usrreq(so, req, m, nam, control)
+	struct socket *so;
+	int req;
+	struct mbuf *m, *nam, *control;
+{
+	struct proc *p = curproc;
+	uint32_t vrf_id;
+	struct sctp_vrf *vrf;
+	int error;
+	int family;
+	struct sctp_inpcb *inp = (struct sctp_inpcb *)so->so_pcb;
+
+	error = 0;
+	family = so->so_proto->pr_domain->dom_family;
+	if (req == PRU_CONTROL) {
+		switch (family) {
+		case PF_INET:
+			error = in_control(so, (long)m, (caddr_t)nam,
+			    (struct ifnet *)control);
+			break;
+#ifdef INET6
+		case PF_INET6:
+			error = in6_control(so, (long)m, (caddr_t)nam,
+			    (struct ifnet *)control, p);
+			break;
+#endif
+		default:
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT);
+			error = EAFNOSUPPORT;
+		}
+		return (error);
+	}
+	switch (req) {
+	case PRU_ATTACH:
+		error = sctp_attach(so, family, p);
+		break;
+	case PRU_DETACH:
+		error = sctp_detach(so);
+		break;
+	case PRU_BIND:
+		if (nam == NULL) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			return (EINVAL);
+		}
+		error = sctp_bind(so, nam, p);
+		break;
+	case PRU_LISTEN:
+		error = sctp_listen(so, p);
+		break;
+	case PRU_CONNECT:
+		if (nam == NULL) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			return (EINVAL);
+		}
+		error = sctp_connect(so, nam, p);
+		break;
+	case PRU_DISCONNECT:
+		error = sctp_disconnect(so);
+		break;
+	case PRU_ACCEPT:
+		if (nam == NULL) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
+			return (EINVAL);
+		}
+		error = sctp_accept(so, nam);
+		break;
+	case PRU_SHUTDOWN:
+		error = sctp_shutdown(so);
+		break;
+
+	case PRU_RCVD:
+		/*
+		 * For Open and Net BSD, this is real ugly. The mbuf *nam
+		 * that is passed (by soreceive()) is the int flags c ast as
+		 * a (mbuf *) yuck!
+		 */
+		break;
+
+	case PRU_SEND:
+		/* Flags are ignored */
+		{
+			struct sockaddr *addr;
+
+			if (nam == NULL)
+				addr = NULL;
+			else
+				addr = mtod(nam, struct sockaddr *);
+
+			error = sctp_sendm(so, 0, m, addr, control, p);
+		}
+		break;
+	case PRU_ABORT:
+		error = sctp_abort(so);
+		break;
+
+	case PRU_SENSE:
+		error = 0;
+		break;
+	case PRU_RCVOOB:
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT);
+		error = EAFNOSUPPORT;
+		break;
+	case PRU_SENDOOB:
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT);
+		error = EAFNOSUPPORT;
+		break;
+	case PRU_PEERADDR:
+		error = sctp_peeraddr(so, nam);
+		break;
+	case PRU_SOCKADDR:
+		error = sctp_ingetaddr(so, nam);
+		break;
+	case PRU_SLOWTIMO:
+		error = 0;
+		break;
+	default:
+		break;
+	}
+	return (error);
+}
+
+#endif
+#endif
+
+#if defined(__Userspace__)
+int
+register_recv_cb(struct socket *so,
+                 int (*receive_cb)(struct socket *sock, union sctp_sockstore addr, void *data,
+                 size_t datalen, struct sctp_rcvinfo, int flags, void *ulp_info))
+{
+	struct sctp_inpcb *inp;
+
+	inp = (struct sctp_inpcb *) so->so_pcb;
+	if (inp == NULL) {
+		return (0);
+	}
+	SCTP_INP_WLOCK(inp);
+	inp->recv_callback = receive_cb;
+	SCTP_INP_WUNLOCK(inp);
+	return (1);
+}
+
+int
+register_send_cb(struct socket *so, uint32_t sb_threshold, int (*send_cb)(struct socket *sock, uint32_t sb_free))
+{
+	struct sctp_inpcb *inp;
+
+	inp = (struct sctp_inpcb *) so->so_pcb;
+	if (inp == NULL) {
+		return (0);
+	}
+	SCTP_INP_WLOCK(inp);
+	inp->send_callback = send_cb;
+	inp->send_sb_threshold = sb_threshold;
+	SCTP_INP_WUNLOCK(inp);
+	/* FIXME change to current amount free. This will be the full buffer
+	 * the first time this is registered but it could be only a portion
+	 * of the send buffer if this is called a second time e.g. if the
+	 * threshold changes.
+	 */
+	return (1);
+}
+
+int
+register_ulp_info (struct socket *so, void *ulp_info)
+{
+	struct sctp_inpcb *inp;
+
+	inp = (struct sctp_inpcb *) so->so_pcb;
+	if (inp == NULL) {
+		return (0);
+	}
+	SCTP_INP_WLOCK(inp);
+	inp->ulp_info = ulp_info;
+	SCTP_INP_WUNLOCK(inp);
+	return (1);
+}
+#endif
diff --git a/usrsctplib/netinet/sctp_var.h b/usrsctplib/netinet/sctp_var.h
new file mode 100755
index 0000000..5c2a529
--- /dev/null
+++ b/usrsctplib/netinet/sctp_var.h
@@ -0,0 +1,513 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctp_var.h 309682 2016-12-07 19:30:59Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_VAR_H_
+#define _NETINET_SCTP_VAR_H_
+
+#include <netinet/sctp_uio.h>
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+extern struct pr_usrreqs sctp_usrreqs;
+#endif
+
+
+#define sctp_feature_on(inp, feature)  (inp->sctp_features |= feature)
+#define sctp_feature_off(inp, feature) (inp->sctp_features &= ~feature)
+#define sctp_is_feature_on(inp, feature) ((inp->sctp_features & feature) == feature)
+#define sctp_is_feature_off(inp, feature) ((inp->sctp_features & feature) == 0)
+
+#define sctp_stcb_feature_on(inp, stcb, feature) {\
+	if (stcb) { \
+		stcb->asoc.sctp_features |= feature; \
+	} else if (inp) { \
+		inp->sctp_features |= feature; \
+	} \
+}
+#define sctp_stcb_feature_off(inp, stcb, feature) {\
+	if (stcb) { \
+		stcb->asoc.sctp_features &= ~feature; \
+	} else if (inp) { \
+		inp->sctp_features &= ~feature; \
+	} \
+}
+#define sctp_stcb_is_feature_on(inp, stcb, feature) \
+	(((stcb != NULL) && \
+	  ((stcb->asoc.sctp_features & feature) == feature)) || \
+	 ((stcb == NULL) && (inp != NULL) && \
+	  ((inp->sctp_features & feature) == feature)))
+#define sctp_stcb_is_feature_off(inp, stcb, feature) \
+	(((stcb != NULL) && \
+	  ((stcb->asoc.sctp_features & feature) == 0)) || \
+	 ((stcb == NULL) && (inp != NULL) && \
+	  ((inp->sctp_features & feature) == 0)) || \
+	 ((stcb == NULL) && (inp == NULL)))
+
+/* managing mobility_feature in inpcb (by micchie) */
+#define sctp_mobility_feature_on(inp, feature)  (inp->sctp_mobility_features |= feature)
+#define sctp_mobility_feature_off(inp, feature) (inp->sctp_mobility_features &= ~feature)
+#define sctp_is_mobility_feature_on(inp, feature) (inp->sctp_mobility_features & feature)
+#define sctp_is_mobility_feature_off(inp, feature) ((inp->sctp_mobility_features & feature) == 0)
+
+#define sctp_maxspace(sb) (max((sb)->sb_hiwat,SCTP_MINIMAL_RWND))
+
+#define	sctp_sbspace(asoc, sb) ((long) ((sctp_maxspace(sb) > (asoc)->sb_cc) ? (sctp_maxspace(sb) - (asoc)->sb_cc) : 0))
+
+#define	sctp_sbspace_failedmsgs(sb) ((long) ((sctp_maxspace(sb) > (sb)->sb_cc) ? (sctp_maxspace(sb) - (sb)->sb_cc) : 0))
+
+#define sctp_sbspace_sub(a,b) (((a) > (b)) ? ((a) - (b)) : 0)
+
+/*
+ * I tried to cache the readq entries at one point. But the reality
+ * is that it did not add any performance since this meant we had to
+ * lock the STCB on read. And at that point once you have to do an
+ * extra lock, it really does not matter if the lock is in the ZONE
+ * stuff or in our code. Note that this same problem would occur with
+ * an mbuf cache as well so it is not really worth doing, at least
+ * right now :-D
+ */
+#ifdef INVARIANTS
+#define sctp_free_a_readq(_stcb, _readq) { \
+	if ((_readq)->on_strm_q) \
+		panic("On strm q stcb:%p readq:%p", (_stcb), (_readq)); \
+	SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), (_readq)); \
+	SCTP_DECR_READQ_COUNT(); \
+}
+#else
+#define sctp_free_a_readq(_stcb, _readq) { \
+	SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), (_readq)); \
+	SCTP_DECR_READQ_COUNT(); \
+}
+#endif
+
+#define sctp_alloc_a_readq(_stcb, _readq) { \
+	(_readq) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_readq), struct sctp_queued_to_read); \
+	if ((_readq)) { \
+	     SCTP_INCR_READQ_COUNT(); \
+	} \
+}
+
+#define sctp_free_a_strmoq(_stcb, _strmoq, _so_locked) { \
+	if ((_strmoq)->holds_key_ref) { \
+		sctp_auth_key_release(stcb, sp->auth_keyid, _so_locked); \
+		(_strmoq)->holds_key_ref = 0; \
+	} \
+	SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_strmoq), (_strmoq)); \
+	SCTP_DECR_STRMOQ_COUNT(); \
+}
+
+#define sctp_alloc_a_strmoq(_stcb, _strmoq) { \
+	(_strmoq) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_strmoq), struct sctp_stream_queue_pending); \
+	if ((_strmoq)) { \
+		memset(_strmoq, 0, sizeof(struct sctp_stream_queue_pending)); \
+		SCTP_INCR_STRMOQ_COUNT(); \
+		(_strmoq)->holds_key_ref = 0; \
+	} \
+}
+
+#define sctp_free_a_chunk(_stcb, _chk, _so_locked) { \
+	if ((_chk)->holds_key_ref) {\
+		sctp_auth_key_release((_stcb), (_chk)->auth_keyid, _so_locked); \
+		(_chk)->holds_key_ref = 0; \
+	} \
+	if (_stcb) { \
+		SCTP_TCB_LOCK_ASSERT((_stcb)); \
+		if ((_chk)->whoTo) { \
+			sctp_free_remote_addr((_chk)->whoTo); \
+			(_chk)->whoTo = NULL; \
+		} \
+		if (((_stcb)->asoc.free_chunk_cnt > SCTP_BASE_SYSCTL(sctp_asoc_free_resc_limit)) || \
+		    (SCTP_BASE_INFO(ipi_free_chunks) > SCTP_BASE_SYSCTL(sctp_system_free_resc_limit))) { \
+			SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), (_chk)); \
+			SCTP_DECR_CHK_COUNT(); \
+		} else { \
+			TAILQ_INSERT_TAIL(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
+			(_stcb)->asoc.free_chunk_cnt++; \
+			atomic_add_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); \
+		} \
+	} else { \
+		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_chunk), (_chk)); \
+		SCTP_DECR_CHK_COUNT(); \
+	} \
+}
+
+#define sctp_alloc_a_chunk(_stcb, _chk) { \
+	if (TAILQ_EMPTY(&(_stcb)->asoc.free_chunks)) { \
+		(_chk) = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_chunk), struct sctp_tmit_chunk); \
+		if ((_chk)) { \
+			SCTP_INCR_CHK_COUNT(); \
+			(_chk)->whoTo = NULL; \
+			(_chk)->holds_key_ref = 0; \
+		} \
+	} else { \
+		(_chk) = TAILQ_FIRST(&(_stcb)->asoc.free_chunks); \
+		TAILQ_REMOVE(&(_stcb)->asoc.free_chunks, (_chk), sctp_next); \
+		atomic_subtract_int(&SCTP_BASE_INFO(ipi_free_chunks), 1); \
+		(_chk)->holds_key_ref = 0; \
+		SCTP_STAT_INCR(sctps_cached_chk); \
+		(_stcb)->asoc.free_chunk_cnt--; \
+	} \
+}
+
+#if defined(__FreeBSD__) && __FreeBSD_version > 500000
+
+#define sctp_free_remote_addr(__net) { \
+	if ((__net)) {  \
+		if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&(__net)->ref_count)) { \
+			(void)SCTP_OS_TIMER_STOP(&(__net)->rxt_timer.timer); \
+			(void)SCTP_OS_TIMER_STOP(&(__net)->pmtu_timer.timer); \
+			(void)SCTP_OS_TIMER_STOP(&(__net)->hb_timer.timer); \
+			if ((__net)->ro.ro_rt) { \
+				RTFREE((__net)->ro.ro_rt); \
+				(__net)->ro.ro_rt = NULL; \
+			} \
+			if ((__net)->src_addr_selected) { \
+				sctp_free_ifa((__net)->ro._s_addr); \
+				(__net)->ro._s_addr = NULL; \
+			} \
+			(__net)->src_addr_selected = 0; \
+			(__net)->dest_state &= ~SCTP_ADDR_REACHABLE; \
+			SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_net), (__net)); \
+			SCTP_DECR_RADDR_COUNT(); \
+		} \
+	} \
+}
+
+#define sctp_sbfree(ctl, stcb, sb, m) { \
+	SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_cc, SCTP_BUF_LEN((m))); \
+	SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_mbcnt, MSIZE); \
+	if (((ctl)->do_not_ref_stcb == 0) && stcb) {\
+		SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
+		SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
+	} \
+	if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \
+	    SCTP_BUF_TYPE(m) != MT_OOBDATA) \
+		atomic_subtract_int(&(sb)->sb_ctl,SCTP_BUF_LEN((m))); \
+}
+
+#define sctp_sballoc(stcb, sb, m) { \
+	atomic_add_int(&(sb)->sb_cc,SCTP_BUF_LEN((m))); \
+	atomic_add_int(&(sb)->sb_mbcnt, MSIZE); \
+	if (stcb) { \
+		atomic_add_int(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
+		atomic_add_int(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
+	} \
+	if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \
+	    SCTP_BUF_TYPE(m) != MT_OOBDATA) \
+		atomic_add_int(&(sb)->sb_ctl,SCTP_BUF_LEN((m))); \
+}
+
+#else				/* FreeBSD Version <= 500000 or non-FreeBSD */
+
+#define sctp_free_remote_addr(__net) { \
+	if ((__net)) { \
+		if (SCTP_DECREMENT_AND_CHECK_REFCOUNT(&(__net)->ref_count)) { \
+			(void)SCTP_OS_TIMER_STOP(&(__net)->rxt_timer.timer); \
+			(void)SCTP_OS_TIMER_STOP(&(__net)->pmtu_timer.timer); \
+			(void)SCTP_OS_TIMER_STOP(&(__net)->hb_timer.timer); \
+			if ((__net)->ro.ro_rt) { \
+				RTFREE((__net)->ro.ro_rt); \
+				(__net)->ro.ro_rt = NULL; \
+			} \
+			if ((__net)->src_addr_selected) { \
+				sctp_free_ifa((__net)->ro._s_addr); \
+				(__net)->ro._s_addr = NULL; \
+			} \
+			(__net)->src_addr_selected = 0; \
+			(__net)->dest_state &=~SCTP_ADDR_REACHABLE; \
+			SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_net), (__net)); \
+			SCTP_DECR_RADDR_COUNT(); \
+		} \
+	} \
+}
+
+#if defined(__Panda__)
+#define sctp_sbfree(ctl, stcb, sb, m) { \
+	if ((sb)->sb_cc >= (uint32_t)SCTP_BUF_LEN((m))) { \
+		atomic_subtract_int(&(sb)->sb_cc, SCTP_BUF_LEN((m))); \
+	} else { \
+		(sb)->sb_cc = 0; \
+	} \
+	if (((ctl)->do_not_ref_stcb == 0) && stcb) { \
+		if ((stcb)->asoc.sb_cc >= (uint32_t)SCTP_BUF_LEN((m))) { \
+			atomic_subtract_int(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
+		} else { \
+			(stcb)->asoc.sb_cc = 0; \
+		} \
+	} \
+}
+
+#define sctp_sballoc(stcb, sb, m) { \
+	atomic_add_int(&(sb)->sb_cc, SCTP_BUF_LEN((m))); \
+	if (stcb) { \
+		atomic_add_int(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
+	} \
+}
+
+#else
+
+#define sctp_sbfree(ctl, stcb, sb, m) { \
+	SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_cc, SCTP_BUF_LEN((m))); \
+	SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_mbcnt, MSIZE); \
+	if (((ctl)->do_not_ref_stcb == 0) && stcb) { \
+		SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
+		SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
+	} \
+}
+
+#define sctp_sballoc(stcb, sb, m) { \
+	atomic_add_int(&(sb)->sb_cc, SCTP_BUF_LEN((m))); \
+	atomic_add_int(&(sb)->sb_mbcnt, MSIZE); \
+	if (stcb) { \
+		atomic_add_int(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
+		atomic_add_int(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
+	} \
+}
+#endif
+#endif
+
+#define sctp_ucount_incr(val) { \
+	val++; \
+}
+
+#define sctp_ucount_decr(val) { \
+	if (val > 0) { \
+		val--; \
+	} else { \
+		val = 0; \
+	} \
+}
+
+#define sctp_mbuf_crush(data) do { \
+	struct mbuf *_m; \
+	_m = (data); \
+	while (_m && (SCTP_BUF_LEN(_m) == 0)) { \
+		(data)  = SCTP_BUF_NEXT(_m); \
+		SCTP_BUF_NEXT(_m) = NULL; \
+		sctp_m_free(_m); \
+		_m = (data); \
+	} \
+} while (0)
+
+#define sctp_flight_size_decrease(tp1) do { \
+	if (tp1->whoTo->flight_size >= tp1->book_size) \
+		tp1->whoTo->flight_size -= tp1->book_size; \
+	else \
+		tp1->whoTo->flight_size = 0; \
+} while (0)
+
+#define sctp_flight_size_increase(tp1) do { \
+	(tp1)->whoTo->flight_size += (tp1)->book_size; \
+} while (0)
+
+#ifdef SCTP_FS_SPEC_LOG
+#define sctp_total_flight_decrease(stcb, tp1) do { \
+	if (stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \
+		stcb->asoc.fs_index = 0;\
+	stcb->asoc.fslog[stcb->asoc.fs_index].total_flight = stcb->asoc.total_flight; \
+	stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.tsn; \
+	stcb->asoc.fslog[stcb->asoc.fs_index].book = tp1->book_size; \
+	stcb->asoc.fslog[stcb->asoc.fs_index].sent = tp1->sent; \
+	stcb->asoc.fslog[stcb->asoc.fs_index].incr = 0; \
+	stcb->asoc.fslog[stcb->asoc.fs_index].decr = 1; \
+	stcb->asoc.fs_index++; \
+	tp1->window_probe = 0; \
+	if (stcb->asoc.total_flight >= tp1->book_size) { \
+		stcb->asoc.total_flight -= tp1->book_size; \
+		if (stcb->asoc.total_flight_count > 0) \
+			stcb->asoc.total_flight_count--; \
+	} else { \
+		stcb->asoc.total_flight = 0; \
+		stcb->asoc.total_flight_count = 0; \
+	} \
+} while (0)
+
+#define sctp_total_flight_increase(stcb, tp1) do { \
+	if (stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \
+		stcb->asoc.fs_index = 0;\
+	stcb->asoc.fslog[stcb->asoc.fs_index].total_flight = stcb->asoc.total_flight; \
+	stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.tsn; \
+	stcb->asoc.fslog[stcb->asoc.fs_index].book = tp1->book_size; \
+	stcb->asoc.fslog[stcb->asoc.fs_index].sent = tp1->sent; \
+	stcb->asoc.fslog[stcb->asoc.fs_index].incr = 1; \
+	stcb->asoc.fslog[stcb->asoc.fs_index].decr = 0; \
+	stcb->asoc.fs_index++; \
+	(stcb)->asoc.total_flight_count++; \
+	(stcb)->asoc.total_flight += (tp1)->book_size; \
+} while (0)
+
+#else
+
+#define sctp_total_flight_decrease(stcb, tp1) do { \
+	tp1->window_probe = 0; \
+	if (stcb->asoc.total_flight >= tp1->book_size) { \
+		stcb->asoc.total_flight -= tp1->book_size; \
+		if (stcb->asoc.total_flight_count > 0) \
+			stcb->asoc.total_flight_count--; \
+	} else { \
+		stcb->asoc.total_flight = 0; \
+		stcb->asoc.total_flight_count = 0; \
+	} \
+} while (0)
+
+#define sctp_total_flight_increase(stcb, tp1) do { \
+	(stcb)->asoc.total_flight_count++; \
+	(stcb)->asoc.total_flight += (tp1)->book_size; \
+} while (0)
+
+#endif
+
+#define SCTP_PF_ENABLED(_net) (_net->pf_threshold < _net->failure_threshold)
+#define SCTP_NET_IS_PF(_net) (_net->pf_threshold < _net->error_count)
+
+struct sctp_nets;
+struct sctp_inpcb;
+struct sctp_tcb;
+struct sctphdr;
+
+
+#if (defined(__FreeBSD__) && __FreeBSD_version > 690000) || defined(__Windows__) || defined(__Userspace__)
+void sctp_close(struct socket *so);
+#else
+int sctp_detach(struct socket *so);
+#endif
+int sctp_disconnect(struct socket *so);
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+#if defined(__FreeBSD__) && __FreeBSD_version < 902000
+void sctp_ctlinput __P((int, struct sockaddr *, void *));
+int sctp_ctloutput __P((struct socket *, struct sockopt *));
+#ifdef INET
+void sctp_input_with_port __P((struct mbuf *, int, uint16_t));
+void sctp_input __P((struct mbuf *, int));
+#endif
+void sctp_pathmtu_adjustment __P((struct sctp_tcb *, uint16_t));
+#else
+void sctp_ctlinput(int, struct sockaddr *, void *);
+int sctp_ctloutput(struct socket *, struct sockopt *);
+#ifdef INET
+void sctp_input_with_port(struct mbuf *, int, uint16_t);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 1100020
+int sctp_input(struct mbuf **, int *, int);
+#else
+void sctp_input(struct mbuf *, int);
+#endif
+#endif
+void sctp_pathmtu_adjustment(struct sctp_tcb *, uint16_t);
+#endif
+#else
+#if defined(__Panda__)
+void sctp_input(pakhandle_type i_pak);
+#elif defined(__Userspace__)
+void sctp_pathmtu_adjustment(struct sctp_tcb *, uint16_t);
+#else
+void sctp_input(struct mbuf *,...);
+#endif
+void *sctp_ctlinput(int, struct sockaddr *, void *);
+int sctp_ctloutput(int, struct socket *, int, int, struct mbuf **);
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_version < 902000
+void sctp_drain __P((void));
+#else
+void sctp_drain(void);
+#endif
+#if defined(__Userspace__)
+void sctp_init(uint16_t,
+               int (*)(void *addr, void *buffer, size_t length, uint8_t tos, uint8_t set_df),
+               void (*)(const char *, ...));
+#elif defined(__FreeBSD__) && __FreeBSD_version < 902000
+void sctp_init __P((void));
+#elif defined(__APPLE__) && (!defined(APPLE_LEOPARD) && !defined(APPLE_SNOWLEOPARD) &&!defined(APPLE_LION) && !defined(APPLE_MOUNTAINLION))
+void sctp_init(struct protosw *pp, struct domain *dp);
+#else
+void sctp_init(void);
+void sctp_notify(struct sctp_inpcb *, struct sctp_tcb *, struct sctp_nets *,
+    uint8_t, uint8_t, uint16_t, uint16_t);
+#endif
+#if !defined(__FreeBSD__)
+void sctp_finish(void);
+#endif
+#if defined(__FreeBSD__) || defined(__Windows__) || defined(__Userspace__)
+int sctp_flush(struct socket *, int);
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_version < 902000
+int sctp_shutdown __P((struct socket *));
+#else
+int sctp_shutdown(struct socket *);
+#endif
+int sctp_bindx(struct socket *, int, struct sockaddr_storage *,
+	int, int, struct proc *);
+/* can't use sctp_assoc_t here */
+int sctp_peeloff(struct socket *, struct socket *, int, caddr_t, int *);
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+int sctp_ingetaddr(struct socket *, struct sockaddr **);
+#elif defined(__Panda__)
+int sctp_ingetaddr(struct socket *, struct sockaddr *);
+#else
+int sctp_ingetaddr(struct socket *, struct mbuf *);
+#endif
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__)
+int sctp_peeraddr(struct socket *, struct sockaddr **);
+#elif defined(__Panda__)
+int sctp_peeraddr(struct socket *, struct sockaddr *);
+#else
+int sctp_peeraddr(struct socket *, struct mbuf *);
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
+#if __FreeBSD_version >= 700000
+int sctp_listen(struct socket *, int, struct thread *);
+#else
+int sctp_listen(struct socket *, struct thread *);
+#endif
+#elif defined(__Windows__)
+int sctp_listen(struct socket *, int, PKTHREAD);
+#elif defined(__Userspace__)
+int sctp_listen(struct socket *, int, struct proc *);
+#else
+int sctp_listen(struct socket *, struct proc *);
+#endif
+#if defined(__FreeBSD__) || defined(__APPLE__) || defined(__Windows__) || defined(__Userspace__)
+int sctp_accept(struct socket *, struct sockaddr **);
+#elif defined(__Panda__)
+int sctp_accept(struct socket *, struct sockaddr *, int *, void *, int *);
+#else
+int sctp_accept(struct socket *, struct mbuf *);
+#endif
+
+#endif /* _KERNEL */
+
+#endif /* !_NETINET_SCTP_VAR_H_ */
diff --git a/usrsctplib/netinet/sctputil.c b/usrsctplib/netinet/sctputil.c
new file mode 100755
index 0000000..87e7774
--- /dev/null
+++ b/usrsctplib/netinet/sctputil.c
@@ -0,0 +1,8174 @@
+/*-
+ * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#include <netinet/sctp_os.h>
+#include <netinet/sctp_pcb.h>
+#include <netinet/sctputil.h>
+#include <netinet/sctp_var.h>
+#include <netinet/sctp_sysctl.h>
+#ifdef INET6
+#if defined(__Userspace__) || defined(__FreeBSD__)
+#include <netinet6/sctp6_var.h>
+#endif
+#endif
+#include <netinet/sctp_header.h>
+#include <netinet/sctp_output.h>
+#include <netinet/sctp_uio.h>
+#include <netinet/sctp_timer.h>
+#include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
+#include <netinet/sctp_auth.h>
+#include <netinet/sctp_asconf.h>
+#include <netinet/sctp_bsd_addr.h>
+#if defined(__Userspace__)
+#include <netinet/sctp_constants.h>
+#endif
+#if defined(__FreeBSD__)
+#include <netinet/udp.h>
+#include <netinet/udp_var.h>
+#include <sys/proc.h>
+#ifdef INET6
+#include <netinet/icmp6.h>
+#endif
+#endif
+
+#if defined(__APPLE__)
+#define APPLE_FILE_NO 8
+#endif
+
+#if defined(__Windows__)
+#if !defined(SCTP_LOCAL_TRACE_BUF)
+#include "eventrace_netinet.h"
+#include "sctputil.tmh" /* this is the file that will be auto generated */
+#endif
+#else
+#ifndef KTR_SCTP
+#define KTR_SCTP KTR_SUBSYS
+#endif
+#endif
+
+extern const struct sctp_cc_functions sctp_cc_functions[];
+extern const struct sctp_ss_functions sctp_ss_functions[];
+
+void
+sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	sctp_clog.x.sb.stcb = stcb;
+	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
+	if (stcb)
+		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
+	else
+		sctp_clog.x.sb.stcb_sbcc = 0;
+	sctp_clog.x.sb.incr = incr;
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_EVENT_SB,
+	     from,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	sctp_clog.x.close.inp = (void *)inp;
+	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
+	if (stcb) {
+		sctp_clog.x.close.stcb = (void *)stcb;
+		sctp_clog.x.close.state = (uint16_t)stcb->asoc.state;
+	} else {
+		sctp_clog.x.close.stcb = 0;
+		sctp_clog.x.close.state = 0;
+	}
+	sctp_clog.x.close.loc = loc;
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_EVENT_CLOSE,
+	     0,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+rto_logging(struct sctp_nets *net, int from)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	memset(&sctp_clog, 0, sizeof(sctp_clog));
+	sctp_clog.x.rto.net = (void *) net;
+	sctp_clog.x.rto.rtt = net->rtt / 1000;
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_EVENT_RTT,
+	     from,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	sctp_clog.x.strlog.stcb = stcb;
+	sctp_clog.x.strlog.n_tsn = tsn;
+	sctp_clog.x.strlog.n_sseq = sseq;
+	sctp_clog.x.strlog.e_tsn = 0;
+	sctp_clog.x.strlog.e_sseq = 0;
+	sctp_clog.x.strlog.strm = stream;
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_EVENT_STRM,
+	     from,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	sctp_clog.x.nagle.stcb = (void *)stcb;
+	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
+	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
+	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
+	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_EVENT_NAGLE,
+	     action,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	sctp_clog.x.sack.cumack = cumack;
+	sctp_clog.x.sack.oldcumack = old_cumack;
+	sctp_clog.x.sack.tsn = tsn;
+	sctp_clog.x.sack.numGaps = gaps;
+	sctp_clog.x.sack.numDups = dups;
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_EVENT_SACK,
+	     from,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	memset(&sctp_clog, 0, sizeof(sctp_clog));
+	sctp_clog.x.map.base = map;
+	sctp_clog.x.map.cum = cum;
+	sctp_clog.x.map.high = high;
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_EVENT_MAP,
+	     from,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	memset(&sctp_clog, 0, sizeof(sctp_clog));
+	sctp_clog.x.fr.largest_tsn = biggest_tsn;
+	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
+	sctp_clog.x.fr.tsn = tsn;
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_EVENT_FR,
+	     from,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+
+#ifdef SCTP_MBUF_LOGGING
+void
+sctp_log_mb(struct mbuf *m, int from)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	sctp_clog.x.mb.mp = m;
+	sctp_clog.x.mb.mbuf_flags = (uint8_t)(SCTP_BUF_GET_FLAGS(m));
+	sctp_clog.x.mb.size = (uint16_t)(SCTP_BUF_LEN(m));
+	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
+	if (SCTP_BUF_IS_EXTENDED(m)) {
+		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
+#if defined(__APPLE__)
+		/* APPLE does not use a ref_cnt, but a forward/backward ref queue */
+#else
+		sctp_clog.x.mb.refcnt = (uint8_t)(SCTP_BUF_EXTEND_REFCNT(m));
+#endif
+	} else {
+		sctp_clog.x.mb.ext = 0;
+		sctp_clog.x.mb.refcnt = 0;
+	}
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_EVENT_MBUF,
+	     from,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_mbc(struct mbuf *m, int from)
+{
+	struct mbuf *mat;
+
+	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
+		sctp_log_mb(mat, from);
+	}
+}
+#endif
+
+void
+sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	if (control == NULL) {
+		SCTP_PRINTF("Gak log of NULL?\n");
+		return;
+	}
+	sctp_clog.x.strlog.stcb = control->stcb;
+	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
+	sctp_clog.x.strlog.n_sseq = (uint16_t)control->mid;
+	sctp_clog.x.strlog.strm = control->sinfo_stream;
+	if (poschk != NULL) {
+		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
+		sctp_clog.x.strlog.e_sseq = (uint16_t)poschk->mid;
+	} else {
+		sctp_clog.x.strlog.e_tsn = 0;
+		sctp_clog.x.strlog.e_sseq = 0;
+	}
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_EVENT_STRM,
+	     from,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	sctp_clog.x.cwnd.net = net;
+	if (stcb->asoc.send_queue_cnt > 255)
+		sctp_clog.x.cwnd.cnt_in_send = 255;
+	else
+		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
+	if (stcb->asoc.stream_queue_cnt > 255)
+		sctp_clog.x.cwnd.cnt_in_str = 255;
+	else
+		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
+
+	if (net) {
+		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
+		sctp_clog.x.cwnd.inflight = net->flight_size;
+		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
+		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
+		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
+	}
+	if (SCTP_CWNDLOG_PRESEND == from) {
+		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
+	}
+	sctp_clog.x.cwnd.cwnd_augment = augment;
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_EVENT_CWND,
+	     from,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+
+#ifndef __APPLE__
+void
+sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	memset(&sctp_clog, 0, sizeof(sctp_clog));
+	if (inp) {
+		sctp_clog.x.lock.sock = (void *) inp->sctp_socket;
+
+	} else {
+		sctp_clog.x.lock.sock = (void *) NULL;
+	}
+	sctp_clog.x.lock.inp = (void *) inp;
+#if (defined(__FreeBSD__) && __FreeBSD_version >= 503000) || (defined(__APPLE__))
+	if (stcb) {
+		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
+	} else {
+		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
+	}
+	if (inp) {
+		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
+		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
+	} else {
+		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
+		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
+	}
+#if (defined(__FreeBSD__) && __FreeBSD_version <= 602000)
+	sctp_clog.x.lock.info_lock = mtx_owned(&SCTP_BASE_INFO(ipi_ep_mtx));
+#else
+	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
+#endif
+	if (inp && (inp->sctp_socket)) {
+		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
+		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
+		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
+	} else {
+		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
+		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
+		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
+	}
+#endif
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_LOCK_EVENT,
+	     from,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+#endif
+
+void
+sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	memset(&sctp_clog, 0, sizeof(sctp_clog));
+	sctp_clog.x.cwnd.net = net;
+	sctp_clog.x.cwnd.cwnd_new_value = error;
+	sctp_clog.x.cwnd.inflight = net->flight_size;
+	sctp_clog.x.cwnd.cwnd_augment = burst;
+	if (stcb->asoc.send_queue_cnt > 255)
+		sctp_clog.x.cwnd.cnt_in_send = 255;
+	else
+		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
+	if (stcb->asoc.stream_queue_cnt > 255)
+		sctp_clog.x.cwnd.cnt_in_str = 255;
+	else
+		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_EVENT_MAXBURST,
+	     from,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	sctp_clog.x.rwnd.rwnd = peers_rwnd;
+	sctp_clog.x.rwnd.send_size = snd_size;
+	sctp_clog.x.rwnd.overhead = overhead;
+	sctp_clog.x.rwnd.new_rwnd = 0;
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_EVENT_RWND,
+	     from,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	sctp_clog.x.rwnd.rwnd = peers_rwnd;
+	sctp_clog.x.rwnd.send_size = flight_size;
+	sctp_clog.x.rwnd.overhead = overhead;
+	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_EVENT_RWND,
+	     from,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+
+#ifdef SCTP_MBCNT_LOGGING
+static void
+sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	sctp_clog.x.mbcnt.total_queue_size = total_oq;
+	sctp_clog.x.mbcnt.size_change = book;
+	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
+	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_EVENT_MBCNT,
+	     from,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+#endif
+
+void
+sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_MISC_EVENT,
+	     from,
+	     a, b, c, d);
+#endif
+}
+
+void
+sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	sctp_clog.x.wake.stcb = (void *)stcb;
+	sctp_clog.x.wake.wake_cnt = wake_cnt;
+	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
+	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
+	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
+
+	if (stcb->asoc.stream_queue_cnt < 0xff)
+		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
+	else
+		sctp_clog.x.wake.stream_qcnt = 0xff;
+
+	if (stcb->asoc.chunks_on_out_queue < 0xff)
+		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
+	else
+		sctp_clog.x.wake.chunks_on_oque = 0xff;
+
+	sctp_clog.x.wake.sctpflags = 0;
+	/* set in the defered mode stuff */
+	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
+		sctp_clog.x.wake.sctpflags |= 1;
+	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
+		sctp_clog.x.wake.sctpflags |= 2;
+	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
+		sctp_clog.x.wake.sctpflags |= 4;
+	/* what about the sb */
+	if (stcb->sctp_socket) {
+		struct socket *so = stcb->sctp_socket;
+
+		sctp_clog.x.wake.sbflags = (uint8_t)((so->so_snd.sb_flags & 0x00ff));
+	} else {
+		sctp_clog.x.wake.sbflags = 0xff;
+	}
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_EVENT_WAKE,
+	     from,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+
+void
+sctp_log_block(uint8_t from, struct sctp_association *asoc, size_t sendlen)
+{
+#if defined(__FreeBSD__) || defined(SCTP_LOCAL_TRACE_BUF)
+	struct sctp_cwnd_log sctp_clog;
+
+	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
+	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
+	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
+	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
+	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
+	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight/1024);
+	sctp_clog.x.blk.sndlen = (uint32_t)sendlen;
+	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
+	     SCTP_LOG_EVENT_BLOCK,
+	     from,
+	     sctp_clog.x.misc.log1,
+	     sctp_clog.x.misc.log2,
+	     sctp_clog.x.misc.log3,
+	     sctp_clog.x.misc.log4);
+#endif
+}
+
+int
+sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
+{
+	/* May need to fix this if ktrdump does not work */
+	return (0);
+}
+
+#ifdef SCTP_AUDITING_ENABLED
+uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
+static int sctp_audit_indx = 0;
+
+static
+void
+sctp_print_audit_report(void)
+{
+	int i;
+	int cnt;
+
+	cnt = 0;
+	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
+		if ((sctp_audit_data[i][0] == 0xe0) &&
+		    (sctp_audit_data[i][1] == 0x01)) {
+			cnt = 0;
+			SCTP_PRINTF("\n");
+		} else if (sctp_audit_data[i][0] == 0xf0) {
+			cnt = 0;
+			SCTP_PRINTF("\n");
+		} else if ((sctp_audit_data[i][0] == 0xc0) &&
+		    (sctp_audit_data[i][1] == 0x01)) {
+			SCTP_PRINTF("\n");
+			cnt = 0;
+		}
+		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
+			    (uint32_t) sctp_audit_data[i][1]);
+		cnt++;
+		if ((cnt % 14) == 0)
+			SCTP_PRINTF("\n");
+	}
+	for (i = 0; i < sctp_audit_indx; i++) {
+		if ((sctp_audit_data[i][0] == 0xe0) &&
+		    (sctp_audit_data[i][1] == 0x01)) {
+			cnt = 0;
+			SCTP_PRINTF("\n");
+		} else if (sctp_audit_data[i][0] == 0xf0) {
+			cnt = 0;
+			SCTP_PRINTF("\n");
+		} else if ((sctp_audit_data[i][0] == 0xc0) &&
+		    (sctp_audit_data[i][1] == 0x01)) {
+			SCTP_PRINTF("\n");
+			cnt = 0;
+		}
+		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
+			    (uint32_t) sctp_audit_data[i][1]);
+		cnt++;
+		if ((cnt % 14) == 0)
+			SCTP_PRINTF("\n");
+	}
+	SCTP_PRINTF("\n");
+}
+
+void
+sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+    struct sctp_nets *net)
+{
+	int resend_cnt, tot_out, rep, tot_book_cnt;
+	struct sctp_nets *lnet;
+	struct sctp_tmit_chunk *chk;
+
+	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
+	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
+	sctp_audit_indx++;
+	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+		sctp_audit_indx = 0;
+	}
+	if (inp == NULL) {
+		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+		sctp_audit_data[sctp_audit_indx][1] = 0x01;
+		sctp_audit_indx++;
+		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+			sctp_audit_indx = 0;
+		}
+		return;
+	}
+	if (stcb == NULL) {
+		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+		sctp_audit_data[sctp_audit_indx][1] = 0x02;
+		sctp_audit_indx++;
+		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+			sctp_audit_indx = 0;
+		}
+		return;
+	}
+	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
+	sctp_audit_data[sctp_audit_indx][1] =
+	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
+	sctp_audit_indx++;
+	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+		sctp_audit_indx = 0;
+	}
+	rep = 0;
+	tot_book_cnt = 0;
+	resend_cnt = tot_out = 0;
+	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+		if (chk->sent == SCTP_DATAGRAM_RESEND) {
+			resend_cnt++;
+		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
+			tot_out += chk->book_size;
+			tot_book_cnt++;
+		}
+	}
+	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
+		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
+		sctp_audit_indx++;
+		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+			sctp_audit_indx = 0;
+		}
+		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
+			    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
+		rep = 1;
+		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
+		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
+		sctp_audit_data[sctp_audit_indx][1] =
+		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
+		sctp_audit_indx++;
+		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+			sctp_audit_indx = 0;
+		}
+	}
+	if (tot_out != stcb->asoc.total_flight) {
+		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
+		sctp_audit_indx++;
+		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+			sctp_audit_indx = 0;
+		}
+		rep = 1;
+		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
+			    (int)stcb->asoc.total_flight);
+		stcb->asoc.total_flight = tot_out;
+	}
+	if (tot_book_cnt != stcb->asoc.total_flight_count) {
+		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
+		sctp_audit_indx++;
+		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+			sctp_audit_indx = 0;
+		}
+		rep = 1;
+		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
+
+		stcb->asoc.total_flight_count = tot_book_cnt;
+	}
+	tot_out = 0;
+	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+		tot_out += lnet->flight_size;
+	}
+	if (tot_out != stcb->asoc.total_flight) {
+		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
+		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
+		sctp_audit_indx++;
+		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+			sctp_audit_indx = 0;
+		}
+		rep = 1;
+		SCTP_PRINTF("real flight:%d net total was %d\n",
+			    stcb->asoc.total_flight, tot_out);
+		/* now corrective action */
+		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
+
+			tot_out = 0;
+			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
+				if ((chk->whoTo == lnet) &&
+				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
+					tot_out += chk->book_size;
+				}
+			}
+			if (lnet->flight_size != tot_out) {
+				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
+					    (void *)lnet, lnet->flight_size,
+					    tot_out);
+				lnet->flight_size = tot_out;
+			}
+		}
+	}
+	if (rep) {
+		sctp_print_audit_report();
+	}
+}
+
+void
+sctp_audit_log(uint8_t ev, uint8_t fd)
+{
+
+	sctp_audit_data[sctp_audit_indx][0] = ev;
+	sctp_audit_data[sctp_audit_indx][1] = fd;
+	sctp_audit_indx++;
+	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
+		sctp_audit_indx = 0;
+	}
+}
+
+#endif
+
+/*
+ * sctp_stop_timers_for_shutdown() should be called
+ * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
+ * state to make sure that all timers are stopped.
+ */
+void
+sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
+{
+	struct sctp_association *asoc;
+	struct sctp_nets *net;
+
+	asoc = &stcb->asoc;
+
+	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
+	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
+	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
+	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
+	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
+	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
+		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
+		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
+	}
+}
+
+/*
+ * a list of sizes based on typical mtu's, used only if next hop size not
+ * returned.
+ */
+static uint32_t sctp_mtu_sizes[] = {
+	68,
+	296,
+	508,
+	512,
+	544,
+	576,
+	1006,
+	1492,
+	1500,
+	1536,
+	2002,
+	2048,
+	4352,
+	4464,
+	8166,
+	17914,
+	32000,
+	65535
+};
+
+/*
+ * Return the largest MTU smaller than val. If there is no
+ * entry, just return val.
+ */
+uint32_t
+sctp_get_prev_mtu(uint32_t val)
+{
+	uint32_t i;
+
+	if (val <= sctp_mtu_sizes[0]) {
+		return (val);
+	}
+	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
+		if (val <= sctp_mtu_sizes[i]) {
+			break;
+		}
+	}
+	return (sctp_mtu_sizes[i - 1]);
+}
+
+/*
+ * Return the smallest MTU larger than val. If there is no
+ * entry, just return val.
+ */
+uint32_t
+sctp_get_next_mtu(uint32_t val)
+{
+	/* select another MTU that is just bigger than this one */
+	uint32_t i;
+
+	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
+		if (val < sctp_mtu_sizes[i]) {
+			return (sctp_mtu_sizes[i]);
+		}
+	}
+	return (val);
+}
+
+void
+sctp_fill_random_store(struct sctp_pcb *m)
+{
+	/*
+	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
+	 * our counter. The result becomes our good random numbers and we
+	 * then setup to give these out. Note that we do no locking to
+	 * protect this. This is ok, since if competing folks call this we
+	 * will get more gobbled gook in the random store which is what we
+	 * want. There is a danger that two guys will use the same random
+	 * numbers, but thats ok too since that is random as well :->
+	 */
+	m->store_at = 0;
+	(void)sctp_hmac(SCTP_HMAC, (uint8_t *)m->random_numbers,
+	    sizeof(m->random_numbers), (uint8_t *)&m->random_counter,
+	    sizeof(m->random_counter), (uint8_t *)m->random_store);
+	m->random_counter++;
+}
+
+uint32_t
+sctp_select_initial_TSN(struct sctp_pcb *inp)
+{
+	/*
+	 * A true implementation should use random selection process to get
+	 * the initial stream sequence number, using RFC1750 as a good
+	 * guideline
+	 */
+	uint32_t x, *xp;
+	uint8_t *p;
+	int store_at, new_store;
+
+	if (inp->initial_sequence_debug != 0) {
+		uint32_t ret;
+
+		ret = inp->initial_sequence_debug;
+		inp->initial_sequence_debug++;
+		return (ret);
+	}
+ retry:
+	store_at = inp->store_at;
+	new_store = store_at + sizeof(uint32_t);
+	if (new_store >= (SCTP_SIGNATURE_SIZE-3)) {
+		new_store = 0;
+	}
+	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
+		goto retry;
+	}
+	if (new_store == 0) {
+		/* Refill the random store */
+		sctp_fill_random_store(inp);
+	}
+	p = &inp->random_store[store_at];
+	xp = (uint32_t *)p;
+	x = *xp;
+	return (x);
+}
+
+uint32_t
+sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
+{
+	uint32_t x;
+	struct timeval now;
+
+	if (check) {
+		(void)SCTP_GETTIME_TIMEVAL(&now);
+	}
+	for (;;) {
+		x = sctp_select_initial_TSN(&inp->sctp_ep);
+		if (x == 0) {
+			/* we never use 0 */
+			continue;
+		}
+		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
+			break;
+		}
+	}
+	return (x);
+}
+
+int32_t
+sctp_map_assoc_state(int kernel_state)
+{
+	int32_t user_state;
+
+	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
+		user_state = SCTP_CLOSED;
+	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
+		user_state = SCTP_SHUTDOWN_PENDING;
+	} else {
+		switch (kernel_state & SCTP_STATE_MASK) {
+		case SCTP_STATE_EMPTY:
+			user_state = SCTP_CLOSED;
+			break;
+		case SCTP_STATE_INUSE:
+			user_state = SCTP_CLOSED;
+			break;
+		case SCTP_STATE_COOKIE_WAIT:
+			user_state = SCTP_COOKIE_WAIT;
+			break;
+		case SCTP_STATE_COOKIE_ECHOED:
+			user_state = SCTP_COOKIE_ECHOED;
+			break;
+		case SCTP_STATE_OPEN:
+			user_state = SCTP_ESTABLISHED;
+			break;
+		case SCTP_STATE_SHUTDOWN_SENT:
+			user_state = SCTP_SHUTDOWN_SENT;
+			break;
+		case SCTP_STATE_SHUTDOWN_RECEIVED:
+			user_state = SCTP_SHUTDOWN_RECEIVED;
+			break;
+		case SCTP_STATE_SHUTDOWN_ACK_SENT:
+			user_state = SCTP_SHUTDOWN_ACK_SENT;
+			break;
+		default:
+			user_state = SCTP_CLOSED;
+			break;
+		}
+	}
+	return (user_state);
+}
+
+int
+sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+               uint32_t override_tag, uint32_t vrf_id, uint16_t o_strms)
+{
+	struct sctp_association *asoc;
+	/*
+	 * Anything set to zero is taken care of by the allocation routine's
+	 * bzero
+	 */
+
+	/*
+	 * Up front select what scoping to apply on addresses I tell my peer
+	 * Not sure what to do with these right now, we will need to come up
+	 * with a way to set them. We may need to pass them through from the
+	 * caller in the sctp_aloc_assoc() function.
+	 */
+	int i;
+#if defined(SCTP_DETAILED_STR_STATS)
+	int j;
+#endif
+
+	asoc = &stcb->asoc;
+	/* init all variables to a known value. */
+	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
+	asoc->max_burst = inp->sctp_ep.max_burst;
+	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
+	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
+	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
+	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
+	asoc->ecn_supported = inp->ecn_supported;
+	asoc->prsctp_supported = inp->prsctp_supported;
+	asoc->idata_supported = inp->idata_supported;
+	asoc->auth_supported = inp->auth_supported;
+	asoc->asconf_supported = inp->asconf_supported;
+	asoc->reconfig_supported = inp->reconfig_supported;
+	asoc->nrsack_supported = inp->nrsack_supported;
+	asoc->pktdrop_supported = inp->pktdrop_supported;
+	asoc->idata_supported = inp->idata_supported;
+	asoc->sctp_cmt_pf = (uint8_t)0;
+	asoc->sctp_frag_point = inp->sctp_frag_point;
+	asoc->sctp_features = inp->sctp_features;
+	asoc->default_dscp = inp->sctp_ep.default_dscp;
+	asoc->max_cwnd = inp->max_cwnd;
+#ifdef INET6
+	if (inp->sctp_ep.default_flowlabel) {
+		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
+	} else {
+		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
+			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
+			asoc->default_flowlabel &= 0x000fffff;
+			asoc->default_flowlabel |= 0x80000000;
+		} else {
+			asoc->default_flowlabel = 0;
+		}
+	}
+#endif
+	asoc->sb_send_resv = 0;
+	if (override_tag) {
+		asoc->my_vtag = override_tag;
+	} else {
+		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport,  1);
+	}
+	/* Get the nonce tags */
+	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
+	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
+	asoc->vrf_id = vrf_id;
+
+#ifdef SCTP_ASOCLOG_OF_TSNS
+	asoc->tsn_in_at = 0;
+	asoc->tsn_out_at = 0;
+	asoc->tsn_in_wrapped = 0;
+	asoc->tsn_out_wrapped = 0;
+	asoc->cumack_log_at = 0;
+	asoc->cumack_log_atsnt = 0;
+#endif
+#ifdef SCTP_FS_SPEC_LOG
+	asoc->fs_index = 0;
+#endif
+	asoc->refcnt = 0;
+	asoc->assoc_up_sent = 0;
+	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
+	    sctp_select_initial_TSN(&inp->sctp_ep);
+	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
+	/* we are optimisitic here */
+	asoc->peer_supports_nat = 0;
+	asoc->sent_queue_retran_cnt = 0;
+
+	/* for CMT */
+        asoc->last_net_cmt_send_started = NULL;
+
+	/* This will need to be adjusted */
+	asoc->last_acked_seq = asoc->init_seq_number - 1;
+	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
+	asoc->asconf_seq_in = asoc->last_acked_seq;
+
+	/* here we are different, we hold the next one we expect */
+	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
+
+	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
+	asoc->initial_rto = inp->sctp_ep.initial_rto;
+
+	asoc->max_init_times = inp->sctp_ep.max_init_times;
+	asoc->max_send_times = inp->sctp_ep.max_send_times;
+	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
+	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
+	asoc->free_chunk_cnt = 0;
+
+	asoc->iam_blocking = 0;
+	asoc->context = inp->sctp_context;
+	asoc->local_strreset_support = inp->local_strreset_support;
+	asoc->def_send = inp->def_send;
+	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
+	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
+	asoc->pr_sctp_cnt = 0;
+	asoc->total_output_queue_size = 0;
+
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+		asoc->scope.ipv6_addr_legal = 1;
+		if (SCTP_IPV6_V6ONLY(inp) == 0) {
+			asoc->scope.ipv4_addr_legal = 1;
+		} else {
+			asoc->scope.ipv4_addr_legal = 0;
+		}
+#if defined(__Userspace__)
+			asoc->scope.conn_addr_legal = 0;
+#endif
+	} else {
+		asoc->scope.ipv6_addr_legal = 0;
+#if defined(__Userspace__)
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_CONN) {
+			asoc->scope.conn_addr_legal = 1;
+			asoc->scope.ipv4_addr_legal = 0;
+		} else {
+			asoc->scope.conn_addr_legal = 0;
+			asoc->scope.ipv4_addr_legal = 1;
+		}
+#else
+		asoc->scope.ipv4_addr_legal = 1;
+#endif
+	}
+
+	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
+	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
+
+	asoc->smallest_mtu = inp->sctp_frag_point;
+	asoc->minrto = inp->sctp_ep.sctp_minrto;
+	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
+
+	asoc->stream_locked_on = 0;
+	asoc->ecn_echo_cnt_onq = 0;
+	asoc->stream_locked = 0;
+
+	asoc->send_sack = 1;
+
+	LIST_INIT(&asoc->sctp_restricted_addrs);
+
+	TAILQ_INIT(&asoc->nets);
+	TAILQ_INIT(&asoc->pending_reply_queue);
+	TAILQ_INIT(&asoc->asconf_ack_sent);
+	/* Setup to fill the hb random cache at first HB */
+	asoc->hb_random_idx = 4;
+
+	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
+
+	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
+	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
+
+	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
+	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
+
+	/*
+	 * Now the stream parameters, here we allocate space for all streams
+	 * that we request by default.
+	 */
+	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
+	    o_strms;
+	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
+		    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
+		    SCTP_M_STRMO);
+	if (asoc->strmout == NULL) {
+		/* big trouble no memory */
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
+		return (ENOMEM);
+	}
+	for (i = 0; i < asoc->streamoutcnt; i++) {
+		/*
+		 * inbound side must be set to 0xffff, also NOTE when we get
+		 * the INIT-ACK back (for INIT sender) we MUST reduce the
+		 * count (streamoutcnt) but first check if we sent to any of
+		 * the upper streams that were dropped (if some were). Those
+		 * that were dropped must be notified to the upper layer as
+		 * failed to send.
+		 */
+		asoc->strmout[i].next_mid_ordered = 0;
+		asoc->strmout[i].next_mid_unordered = 0;
+		TAILQ_INIT(&asoc->strmout[i].outqueue);
+		asoc->strmout[i].chunks_on_queues = 0;
+#if defined(SCTP_DETAILED_STR_STATS)
+		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
+			asoc->strmout[i].abandoned_sent[j] = 0;
+			asoc->strmout[i].abandoned_unsent[j] = 0;
+		}
+#else
+		asoc->strmout[i].abandoned_sent[0] = 0;
+		asoc->strmout[i].abandoned_unsent[0] = 0;
+#endif
+		asoc->strmout[i].sid = i;
+		asoc->strmout[i].last_msg_incomplete = 0;
+		asoc->strmout[i].state = SCTP_STREAM_OPENING;
+		asoc->ss_functions.sctp_ss_init_stream(stcb, &asoc->strmout[i], NULL);
+	}
+	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
+
+	/* Now the mapping array */
+	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
+	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
+		    SCTP_M_MAP);
+	if (asoc->mapping_array == NULL) {
+		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
+		return (ENOMEM);
+	}
+	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
+	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
+	    SCTP_M_MAP);
+	if (asoc->nr_mapping_array == NULL) {
+		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
+		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
+		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
+		return (ENOMEM);
+	}
+	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
+
+	/* Now the init of the other outqueues */
+	TAILQ_INIT(&asoc->free_chunks);
+	TAILQ_INIT(&asoc->control_send_queue);
+	TAILQ_INIT(&asoc->asconf_send_queue);
+	TAILQ_INIT(&asoc->send_queue);
+	TAILQ_INIT(&asoc->sent_queue);
+	TAILQ_INIT(&asoc->resetHead);
+	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
+	TAILQ_INIT(&asoc->asconf_queue);
+	/* authentication fields */
+	asoc->authinfo.random = NULL;
+	asoc->authinfo.active_keyid = 0;
+	asoc->authinfo.assoc_key = NULL;
+	asoc->authinfo.assoc_keyid = 0;
+	asoc->authinfo.recv_key = NULL;
+	asoc->authinfo.recv_keyid = 0;
+	LIST_INIT(&asoc->shared_keys);
+	asoc->marked_retrans = 0;
+	asoc->port = inp->sctp_ep.port;
+	asoc->timoinit = 0;
+	asoc->timodata = 0;
+	asoc->timosack = 0;
+	asoc->timoshutdown = 0;
+	asoc->timoheartbeat = 0;
+	asoc->timocookie = 0;
+	asoc->timoshutdownack = 0;
+	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
+	asoc->discontinuity_time = asoc->start_time;
+	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
+		asoc->abandoned_unsent[i] = 0;
+		asoc->abandoned_sent[i] = 0;
+	}
+	/* sa_ignore MEMLEAK {memory is put in the assoc mapping array and freed later when
+	 * the association is freed.
+	 */
+	return (0);
+}
+
+void
+sctp_print_mapping_array(struct sctp_association *asoc)
+{
+	unsigned int i, limit;
+
+	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
+	            asoc->mapping_array_size,
+	            asoc->mapping_array_base_tsn,
+	            asoc->cumulative_tsn,
+	            asoc->highest_tsn_inside_map,
+	            asoc->highest_tsn_inside_nr_map);
+	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
+		if (asoc->mapping_array[limit - 1] != 0) {
+			break;
+		}
+	}
+	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
+	for (i = 0; i < limit; i++) {
+		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
+	}
+	if (limit % 16)
+		SCTP_PRINTF("\n");
+	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
+		if (asoc->nr_mapping_array[limit - 1]) {
+			break;
+		}
+	}
+	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
+	for (i = 0; i < limit; i++) {
+		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ': '\n');
+	}
+	if (limit % 16)
+		SCTP_PRINTF("\n");
+}
+
+int
+sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
+{
+	/* mapping array needs to grow */
+	uint8_t *new_array1, *new_array2;
+	uint32_t new_size;
+
+	new_size = asoc->mapping_array_size + ((needed+7)/8 + SCTP_MAPPING_ARRAY_INCR);
+	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
+	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
+	if ((new_array1 == NULL) || (new_array2 == NULL)) {
+		/* can't get more, forget it */
+		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
+		if (new_array1) {
+			SCTP_FREE(new_array1, SCTP_M_MAP);
+		}
+		if (new_array2) {
+			SCTP_FREE(new_array2, SCTP_M_MAP);
+		}
+		return (-1);
+	}
+	memset(new_array1, 0, new_size);
+	memset(new_array2, 0, new_size);
+	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
+	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
+	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
+	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
+	asoc->mapping_array = new_array1;
+	asoc->nr_mapping_array = new_array2;
+	asoc->mapping_array_size = new_size;
+	return (0);
+}
+
+
+static void
+sctp_iterator_work(struct sctp_iterator *it)
+{
+	int iteration_count = 0;
+	int inp_skip = 0;
+	int first_in = 1;
+	struct sctp_inpcb *tinp;
+
+	SCTP_INP_INFO_RLOCK();
+	SCTP_ITERATOR_LOCK();
+	sctp_it_ctl.cur_it = it;
+	if (it->inp) {
+		SCTP_INP_RLOCK(it->inp);
+		SCTP_INP_DECR_REF(it->inp);
+	}
+	if (it->inp == NULL) {
+		/* iterator is complete */
+done_with_iterator:
+		sctp_it_ctl.cur_it = NULL;
+		SCTP_ITERATOR_UNLOCK();
+		SCTP_INP_INFO_RUNLOCK();
+		if (it->function_atend != NULL) {
+			(*it->function_atend) (it->pointer, it->val);
+		}
+		SCTP_FREE(it, SCTP_M_ITER);
+		return;
+	}
+select_a_new_ep:
+	if (first_in) {
+		first_in = 0;
+	} else {
+		SCTP_INP_RLOCK(it->inp);
+	}
+	while (((it->pcb_flags) &&
+	        ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
+	       ((it->pcb_features) &&
+		((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
+		/* endpoint flags or features don't match, so keep looking */
+		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
+			SCTP_INP_RUNLOCK(it->inp);
+			goto done_with_iterator;
+		}
+		tinp = it->inp;
+		it->inp = LIST_NEXT(it->inp, sctp_list);
+		SCTP_INP_RUNLOCK(tinp);
+		if (it->inp == NULL) {
+			goto done_with_iterator;
+		}
+		SCTP_INP_RLOCK(it->inp);
+	}
+	/* now go through each assoc which is in the desired state */
+	if (it->done_current_ep == 0) {
+		if (it->function_inp != NULL)
+			inp_skip = (*it->function_inp)(it->inp, it->pointer, it->val);
+		it->done_current_ep = 1;
+	}
+	if (it->stcb == NULL) {
+		/* run the per instance function */
+		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
+	}
+	if ((inp_skip) || it->stcb == NULL) {
+		if (it->function_inp_end != NULL) {
+			inp_skip = (*it->function_inp_end)(it->inp,
+							   it->pointer,
+							   it->val);
+		}
+		SCTP_INP_RUNLOCK(it->inp);
+		goto no_stcb;
+	}
+	while (it->stcb) {
+		SCTP_TCB_LOCK(it->stcb);
+		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
+			/* not in the right state... keep looking */
+			SCTP_TCB_UNLOCK(it->stcb);
+			goto next_assoc;
+		}
+		/* see if we have limited out the iterator loop */
+		iteration_count++;
+		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
+			/* Pause to let others grab the lock */
+			atomic_add_int(&it->stcb->asoc.refcnt, 1);
+			SCTP_TCB_UNLOCK(it->stcb);
+			SCTP_INP_INCR_REF(it->inp);
+			SCTP_INP_RUNLOCK(it->inp);
+			SCTP_ITERATOR_UNLOCK();
+			SCTP_INP_INFO_RUNLOCK();
+			SCTP_INP_INFO_RLOCK();
+			SCTP_ITERATOR_LOCK();
+			if (sctp_it_ctl.iterator_flags) {
+				/* We won't be staying here */
+				SCTP_INP_DECR_REF(it->inp);
+				atomic_add_int(&it->stcb->asoc.refcnt, -1);
+#if !defined(__FreeBSD__)
+				if (sctp_it_ctl.iterator_flags &
+				   SCTP_ITERATOR_MUST_EXIT) {
+					goto done_with_iterator;
+				}
+#endif
+				if (sctp_it_ctl.iterator_flags &
+				   SCTP_ITERATOR_STOP_CUR_IT) {
+					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
+					goto done_with_iterator;
+				}
+				if (sctp_it_ctl.iterator_flags &
+				   SCTP_ITERATOR_STOP_CUR_INP) {
+					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
+					goto no_stcb;
+				}
+				/* If we reach here huh? */
+				SCTP_PRINTF("Unknown it ctl flag %x\n",
+					    sctp_it_ctl.iterator_flags);
+				sctp_it_ctl.iterator_flags = 0;
+			}
+			SCTP_INP_RLOCK(it->inp);
+			SCTP_INP_DECR_REF(it->inp);
+			SCTP_TCB_LOCK(it->stcb);
+			atomic_add_int(&it->stcb->asoc.refcnt, -1);
+			iteration_count = 0;
+		}
+
+		/* run function on this one */
+		(*it->function_assoc)(it->inp, it->stcb, it->pointer, it->val);
+
+		/*
+		 * we lie here, it really needs to have its own type but
+		 * first I must verify that this won't effect things :-0
+		 */
+		if (it->no_chunk_output == 0)
+			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+
+		SCTP_TCB_UNLOCK(it->stcb);
+	next_assoc:
+		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
+		if (it->stcb == NULL) {
+			/* Run last function */
+			if (it->function_inp_end != NULL) {
+				inp_skip = (*it->function_inp_end)(it->inp,
+								   it->pointer,
+								   it->val);
+			}
+		}
+	}
+	SCTP_INP_RUNLOCK(it->inp);
+ no_stcb:
+	/* done with all assocs on this endpoint, move on to next endpoint */
+	it->done_current_ep = 0;
+	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
+		it->inp = NULL;
+	} else {
+		it->inp = LIST_NEXT(it->inp, sctp_list);
+	}
+	if (it->inp == NULL) {
+		goto done_with_iterator;
+	}
+	goto select_a_new_ep;
+}
+
+void
+sctp_iterator_worker(void)
+{
+	struct sctp_iterator *it, *nit;
+
+	/* This function is called with the WQ lock in place */
+
+	sctp_it_ctl.iterator_running = 1;
+	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
+		/* now lets work on this one */
+		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
+		SCTP_IPI_ITERATOR_WQ_UNLOCK();
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+		CURVNET_SET(it->vn);
+#endif
+		sctp_iterator_work(it);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+		CURVNET_RESTORE();
+#endif
+		SCTP_IPI_ITERATOR_WQ_LOCK();
+#if !defined(__FreeBSD__)
+		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
+			break;
+		}
+#endif
+		/*sa_ignore FREED_MEMORY*/
+	}
+	sctp_it_ctl.iterator_running = 0;
+	return;
+}
+
+
+static void
+sctp_handle_addr_wq(void)
+{
+	/* deal with the ADDR wq from the rtsock calls */
+	struct sctp_laddr *wi, *nwi;
+	struct sctp_asconf_iterator *asc;
+
+	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
+		    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
+	if (asc == NULL) {
+		/* Try later, no memory */
+		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
+				 (struct sctp_inpcb *)NULL,
+				 (struct sctp_tcb *)NULL,
+				 (struct sctp_nets *)NULL);
+		return;
+	}
+	LIST_INIT(&asc->list_of_work);
+	asc->cnt = 0;
+
+	SCTP_WQ_ADDR_LOCK();
+	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
+		LIST_REMOVE(wi, sctp_nxt_addr);
+		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
+		asc->cnt++;
+	}
+	SCTP_WQ_ADDR_UNLOCK();
+
+	if (asc->cnt == 0) {
+		SCTP_FREE(asc, SCTP_M_ASC_IT);
+	} else {
+		int ret;
+
+		ret = sctp_initiate_iterator(sctp_asconf_iterator_ep,
+		                             sctp_asconf_iterator_stcb,
+		                             NULL, /* No ep end for boundall */
+		                             SCTP_PCB_FLAGS_BOUNDALL,
+		                             SCTP_PCB_ANY_FEATURES,
+		                             SCTP_ASOC_ANY_STATE,
+		                             (void *)asc, 0,
+		                             sctp_asconf_iterator_end, NULL, 0);
+		if (ret) {
+			SCTP_PRINTF("Failed to initiate iterator for handle_addr_wq\n");
+			/* Freeing if we are stopping or put back on the addr_wq. */
+			if (SCTP_BASE_VAR(sctp_pcb_initialized) == 0) {
+				sctp_asconf_iterator_end(asc, 0);
+			} else {
+				SCTP_WQ_ADDR_LOCK();
+				LIST_FOREACH(wi, &asc->list_of_work, sctp_nxt_addr) {
+					LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
+				}
+				SCTP_WQ_ADDR_UNLOCK();
+				SCTP_FREE(asc, SCTP_M_ASC_IT);
+			}
+		}
+	}
+}
+
+void
+sctp_timeout_handler(void *t)
+{
+	struct sctp_inpcb *inp;
+	struct sctp_tcb *stcb;
+	struct sctp_nets *net;
+	struct sctp_timer *tmr;
+	struct mbuf *op_err;
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	struct socket *so;
+#endif
+	int did_output;
+	int type;
+
+	tmr = (struct sctp_timer *)t;
+	inp = (struct sctp_inpcb *)tmr->ep;
+	stcb = (struct sctp_tcb *)tmr->tcb;
+	net = (struct sctp_nets *)tmr->net;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+	CURVNET_SET((struct vnet *)tmr->vnet);
+#endif
+	did_output = 1;
+
+#ifdef SCTP_AUDITING_ENABLED
+	sctp_audit_log(0xF0, (uint8_t) tmr->type);
+	sctp_auditing(3, inp, stcb, net);
+#endif
+
+	/* sanity checks... */
+	if (tmr->self != (void *)tmr) {
+		/*
+		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
+		 *             (void *)tmr);
+		 */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+		CURVNET_RESTORE();
+#endif
+		return;
+	}
+	tmr->stopped_from = 0xa001;
+	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
+		/*
+		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
+		 * tmr->type);
+		 */
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+		CURVNET_RESTORE();
+#endif
+		return;
+	}
+	tmr->stopped_from = 0xa002;
+	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+		CURVNET_RESTORE();
+#endif
+		return;
+	}
+	/* if this is an iterator timeout, get the struct and clear inp */
+	tmr->stopped_from = 0xa003;
+	if (inp) {
+		SCTP_INP_INCR_REF(inp);
+		if ((inp->sctp_socket == NULL) &&
+		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
+		     (tmr->type != SCTP_TIMER_TYPE_INIT) &&
+		     (tmr->type != SCTP_TIMER_TYPE_SEND) &&
+		     (tmr->type != SCTP_TIMER_TYPE_RECV) &&
+		     (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
+		     (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
+		     (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
+		     (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
+		     (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
+			) {
+			SCTP_INP_DECR_REF(inp);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+			CURVNET_RESTORE();
+#endif
+			return;
+		}
+	}
+	tmr->stopped_from = 0xa004;
+	if (stcb) {
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		if (stcb->asoc.state == 0) {
+			atomic_add_int(&stcb->asoc.refcnt, -1);
+			if (inp) {
+				SCTP_INP_DECR_REF(inp);
+			}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+			CURVNET_RESTORE();
+#endif
+			return;
+		}
+	}
+	type = tmr->type;
+	tmr->stopped_from = 0xa005;
+	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", type);
+	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
+		if (inp) {
+			SCTP_INP_DECR_REF(inp);
+		}
+		if (stcb) {
+			atomic_add_int(&stcb->asoc.refcnt, -1);
+		}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+		CURVNET_RESTORE();
+#endif
+		return;
+	}
+	tmr->stopped_from = 0xa006;
+
+	if (stcb) {
+		SCTP_TCB_LOCK(stcb);
+		atomic_add_int(&stcb->asoc.refcnt, -1);
+		if ((type != SCTP_TIMER_TYPE_ASOCKILL) &&
+		    ((stcb->asoc.state == 0) ||
+		     (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
+			SCTP_TCB_UNLOCK(stcb);
+			if (inp) {
+				SCTP_INP_DECR_REF(inp);
+			}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+			CURVNET_RESTORE();
+#endif
+			return;
+		}
+	}
+	/* record in stopped what t-o occurred */
+	tmr->stopped_from = type;
+
+	/* mark as being serviced now */
+	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
+		/*
+		 * Callout has been rescheduled.
+		 */
+		goto get_out;
+	}
+	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
+		/*
+		 * Not active, so no action.
+		 */
+		goto get_out;
+	}
+	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
+
+	/* call the handler for the appropriate timer type */
+	switch (type) {
+	case SCTP_TIMER_TYPE_ZERO_COPY:
+		if (inp == NULL) {
+			break;
+		}
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
+			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
+		}
+		break;
+	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
+		if (inp == NULL) {
+			break;
+		}
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
+		    SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
+		}
+                break;
+	case SCTP_TIMER_TYPE_ADDR_WQ:
+		sctp_handle_addr_wq();
+		break;
+	case SCTP_TIMER_TYPE_SEND:
+		if ((stcb == NULL) || (inp == NULL)) {
+			break;
+		}
+		SCTP_STAT_INCR(sctps_timodata);
+		stcb->asoc.timodata++;
+		stcb->asoc.num_send_timers_up--;
+		if (stcb->asoc.num_send_timers_up < 0) {
+			stcb->asoc.num_send_timers_up = 0;
+		}
+		SCTP_TCB_LOCK_ASSERT(stcb);
+		if (sctp_t3rxt_timer(inp, stcb, net)) {
+			/* no need to unlock on tcb its gone */
+
+			goto out_decr;
+		}
+		SCTP_TCB_LOCK_ASSERT(stcb);
+#ifdef SCTP_AUDITING_ENABLED
+		sctp_auditing(4, inp, stcb, net);
+#endif
+		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+		if ((stcb->asoc.num_send_timers_up == 0) &&
+		    (stcb->asoc.sent_queue_cnt > 0)) {
+			struct sctp_tmit_chunk *chk;
+
+			/*
+			 * safeguard. If there on some on the sent queue
+			 * somewhere but no timers running something is
+			 * wrong... so we start a timer on the first chunk
+			 * on the send queue on whatever net it is sent to.
+			 */
+			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
+			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
+			    chk->whoTo);
+		}
+		break;
+	case SCTP_TIMER_TYPE_INIT:
+		if ((stcb == NULL) || (inp == NULL)) {
+			break;
+		}
+		SCTP_STAT_INCR(sctps_timoinit);
+		stcb->asoc.timoinit++;
+		if (sctp_t1init_timer(inp, stcb, net)) {
+			/* no need to unlock on tcb its gone */
+			goto out_decr;
+		}
+		/* We do output but not here */
+		did_output = 0;
+		break;
+	case SCTP_TIMER_TYPE_RECV:
+		if ((stcb == NULL) || (inp == NULL)) {
+			break;
+		}
+		SCTP_STAT_INCR(sctps_timosack);
+		stcb->asoc.timosack++;
+		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
+#ifdef SCTP_AUDITING_ENABLED
+		sctp_auditing(4, inp, stcb, net);
+#endif
+		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
+		break;
+	case SCTP_TIMER_TYPE_SHUTDOWN:
+		if ((stcb == NULL) || (inp == NULL)) {
+			break;
+		}
+		if (sctp_shutdown_timer(inp, stcb, net)) {
+			/* no need to unlock on tcb its gone */
+			goto out_decr;
+		}
+		SCTP_STAT_INCR(sctps_timoshutdown);
+		stcb->asoc.timoshutdown++;
+#ifdef SCTP_AUDITING_ENABLED
+		sctp_auditing(4, inp, stcb, net);
+#endif
+		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
+		break;
+	case SCTP_TIMER_TYPE_HEARTBEAT:
+		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
+			break;
+		}
+		SCTP_STAT_INCR(sctps_timoheartbeat);
+		stcb->asoc.timoheartbeat++;
+		if (sctp_heartbeat_timer(inp, stcb, net)) {
+			/* no need to unlock on tcb its gone */
+			goto out_decr;
+		}
+#ifdef SCTP_AUDITING_ENABLED
+		sctp_auditing(4, inp, stcb, net);
+#endif
+		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
+			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
+			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
+		}
+		break;
+	case SCTP_TIMER_TYPE_COOKIE:
+		if ((stcb == NULL) || (inp == NULL)) {
+			break;
+		}
+
+		if (sctp_cookie_timer(inp, stcb, net)) {
+			/* no need to unlock on tcb its gone */
+			goto out_decr;
+		}
+		SCTP_STAT_INCR(sctps_timocookie);
+		stcb->asoc.timocookie++;
+#ifdef SCTP_AUDITING_ENABLED
+		sctp_auditing(4, inp, stcb, net);
+#endif
+		/*
+		 * We consider T3 and Cookie timer pretty much the same with
+		 * respect to where from in chunk_output.
+		 */
+		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
+		break;
+	case SCTP_TIMER_TYPE_NEWCOOKIE:
+		{
+			struct timeval tv;
+			int i, secret;
+			if (inp == NULL) {
+				break;
+			}
+			SCTP_STAT_INCR(sctps_timosecret);
+			(void)SCTP_GETTIME_TIMEVAL(&tv);
+			SCTP_INP_WLOCK(inp);
+			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
+			inp->sctp_ep.last_secret_number =
+			    inp->sctp_ep.current_secret_number;
+			inp->sctp_ep.current_secret_number++;
+			if (inp->sctp_ep.current_secret_number >=
+			    SCTP_HOW_MANY_SECRETS) {
+				inp->sctp_ep.current_secret_number = 0;
+			}
+			secret = (int)inp->sctp_ep.current_secret_number;
+			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
+				inp->sctp_ep.secret_key[secret][i] =
+				    sctp_select_initial_TSN(&inp->sctp_ep);
+			}
+			SCTP_INP_WUNLOCK(inp);
+			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
+		}
+		did_output = 0;
+		break;
+	case SCTP_TIMER_TYPE_PATHMTURAISE:
+		if ((stcb == NULL) || (inp == NULL)) {
+			break;
+		}
+		SCTP_STAT_INCR(sctps_timopathmtu);
+		sctp_pathmtu_timer(inp, stcb, net);
+		did_output = 0;
+		break;
+	case SCTP_TIMER_TYPE_SHUTDOWNACK:
+		if ((stcb == NULL) || (inp == NULL)) {
+			break;
+		}
+		if (sctp_shutdownack_timer(inp, stcb, net)) {
+			/* no need to unlock on tcb its gone */
+			goto out_decr;
+		}
+		SCTP_STAT_INCR(sctps_timoshutdownack);
+		stcb->asoc.timoshutdownack++;
+#ifdef SCTP_AUDITING_ENABLED
+		sctp_auditing(4, inp, stcb, net);
+#endif
+		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
+		break;
+	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
+		if ((stcb == NULL) || (inp == NULL)) {
+			break;
+		}
+		SCTP_STAT_INCR(sctps_timoshutdownguard);
+		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
+		                             "Shutdown guard timer expired");
+		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
+		/* no need to unlock on tcb its gone */
+		goto out_decr;
+
+	case SCTP_TIMER_TYPE_STRRESET:
+		if ((stcb == NULL) || (inp == NULL)) {
+			break;
+		}
+		if (sctp_strreset_timer(inp, stcb, net)) {
+			/* no need to unlock on tcb its gone */
+			goto out_decr;
+		}
+		SCTP_STAT_INCR(sctps_timostrmrst);
+		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
+		break;
+	case SCTP_TIMER_TYPE_ASCONF:
+		if ((stcb == NULL) || (inp == NULL)) {
+			break;
+		}
+		if (sctp_asconf_timer(inp, stcb, net)) {
+			/* no need to unlock on tcb its gone */
+			goto out_decr;
+		}
+		SCTP_STAT_INCR(sctps_timoasconf);
+#ifdef SCTP_AUDITING_ENABLED
+		sctp_auditing(4, inp, stcb, net);
+#endif
+		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
+		break;
+	case SCTP_TIMER_TYPE_PRIM_DELETED:
+		if ((stcb == NULL) || (inp == NULL)) {
+			break;
+		}
+		sctp_delete_prim_timer(inp, stcb, net);
+		SCTP_STAT_INCR(sctps_timodelprim);
+		break;
+
+	case SCTP_TIMER_TYPE_AUTOCLOSE:
+		if ((stcb == NULL) || (inp == NULL)) {
+			break;
+		}
+		SCTP_STAT_INCR(sctps_timoautoclose);
+		sctp_autoclose_timer(inp, stcb, net);
+		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
+		did_output = 0;
+		break;
+	case SCTP_TIMER_TYPE_ASOCKILL:
+		if ((stcb == NULL) || (inp == NULL)) {
+			break;
+		}
+		SCTP_STAT_INCR(sctps_timoassockill);
+		/* Can we free it yet? */
+		SCTP_INP_DECR_REF(inp);
+		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
+		                SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		so = SCTP_INP_SO(inp);
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_SOCKET_LOCK(so, 1);
+		SCTP_TCB_LOCK(stcb);
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+		                      SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+		/*
+		 * free asoc, always unlocks (or destroy's) so prevent
+		 * duplicate unlock or unlock of a free mtx :-0
+		 */
+		stcb = NULL;
+		goto out_no_decr;
+	case SCTP_TIMER_TYPE_INPKILL:
+		SCTP_STAT_INCR(sctps_timoinpkill);
+		if (inp == NULL) {
+			break;
+		}
+		/*
+		 * special case, take away our increment since WE are the
+		 * killer
+		 */
+		SCTP_INP_DECR_REF(inp);
+		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
+		                SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
+#if defined(__APPLE__)
+		SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
+#endif
+		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+				SCTP_CALLED_FROM_INPKILL_TIMER);
+#if defined(__APPLE__)
+		SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
+#endif
+		inp = NULL;
+		goto out_no_decr;
+	default:
+		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
+			type);
+		break;
+	}
+#ifdef SCTP_AUDITING_ENABLED
+	sctp_audit_log(0xF1, (uint8_t) type);
+	if (inp)
+		sctp_auditing(5, inp, stcb, net);
+#endif
+	if ((did_output) && stcb) {
+		/*
+		 * Now we need to clean up the control chunk chain if an
+		 * ECNE is on it. It must be marked as UNSENT again so next
+		 * call will continue to send it until such time that we get
+		 * a CWR, to remove it. It is, however, less likely that we
+		 * will find a ecn echo on the chain though.
+		 */
+		sctp_fix_ecn_echo(&stcb->asoc);
+	}
+get_out:
+	if (stcb) {
+		SCTP_TCB_UNLOCK(stcb);
+	}
+
+out_decr:
+	if (inp) {
+		SCTP_INP_DECR_REF(inp);
+	}
+
+out_no_decr:
+	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type = %d)\n", type);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 801000
+	CURVNET_RESTORE();
+#endif
+}
+
+void
+sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+    struct sctp_nets *net)
+{
+	uint32_t to_ticks;
+	struct sctp_timer *tmr;
+
+	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
+		return;
+
+	tmr = NULL;
+	if (stcb) {
+		SCTP_TCB_LOCK_ASSERT(stcb);
+	}
+	switch (t_type) {
+	case SCTP_TIMER_TYPE_ZERO_COPY:
+		tmr = &inp->sctp_ep.zero_copy_timer;
+		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
+		break;
+	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
+		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
+		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
+		break;
+	case SCTP_TIMER_TYPE_ADDR_WQ:
+		/* Only 1 tick away :-) */
+		tmr = &SCTP_BASE_INFO(addr_wq_timer);
+		to_ticks = SCTP_ADDRESS_TICK_DELAY;
+		break;
+	case SCTP_TIMER_TYPE_SEND:
+		/* Here we use the RTO timer */
+		{
+			int rto_val;
+
+			if ((stcb == NULL) || (net == NULL)) {
+				return;
+			}
+			tmr = &net->rxt_timer;
+			if (net->RTO == 0) {
+				rto_val = stcb->asoc.initial_rto;
+			} else {
+				rto_val = net->RTO;
+			}
+			to_ticks = MSEC_TO_TICKS(rto_val);
+		}
+		break;
+	case SCTP_TIMER_TYPE_INIT:
+		/*
+		 * Here we use the INIT timer default usually about 1
+		 * minute.
+		 */
+		if ((stcb == NULL) || (net == NULL)) {
+			return;
+		}
+		tmr = &net->rxt_timer;
+		if (net->RTO == 0) {
+			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+		} else {
+			to_ticks = MSEC_TO_TICKS(net->RTO);
+		}
+		break;
+	case SCTP_TIMER_TYPE_RECV:
+		/*
+		 * Here we use the Delayed-Ack timer value from the inp
+		 * ususually about 200ms.
+		 */
+		if (stcb == NULL) {
+			return;
+		}
+		tmr = &stcb->asoc.dack_timer;
+		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
+		break;
+	case SCTP_TIMER_TYPE_SHUTDOWN:
+		/* Here we use the RTO of the destination. */
+		if ((stcb == NULL) || (net == NULL)) {
+			return;
+		}
+		if (net->RTO == 0) {
+			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+		} else {
+			to_ticks = MSEC_TO_TICKS(net->RTO);
+		}
+		tmr = &net->rxt_timer;
+		break;
+	case SCTP_TIMER_TYPE_HEARTBEAT:
+		/*
+		 * the net is used here so that we can add in the RTO. Even
+		 * though we use a different timer. We also add the HB timer
+		 * PLUS a random jitter.
+		 */
+		if ((stcb == NULL) || (net == NULL)) {
+			return;
+		} else {
+			uint32_t rndval;
+			uint32_t jitter;
+
+			if ((net->dest_state & SCTP_ADDR_NOHB) &&
+			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
+				return;
+			}
+			if (net->RTO == 0) {
+				to_ticks = stcb->asoc.initial_rto;
+			} else {
+				to_ticks = net->RTO;
+			}
+			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
+			jitter = rndval % to_ticks;
+			if (jitter >= (to_ticks >> 1)) {
+				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
+			} else {
+				to_ticks = to_ticks - jitter;
+			}
+			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
+			    !(net->dest_state & SCTP_ADDR_PF)) {
+				to_ticks += net->heart_beat_delay;
+			}
+			/*
+			 * Now we must convert the to_ticks that are now in
+			 * ms to ticks.
+			 */
+			to_ticks = MSEC_TO_TICKS(to_ticks);
+			tmr = &net->hb_timer;
+		}
+		break;
+	case SCTP_TIMER_TYPE_COOKIE:
+		/*
+		 * Here we can use the RTO timer from the network since one
+		 * RTT was compelete. If a retran happened then we will be
+		 * using the RTO initial value.
+		 */
+		if ((stcb == NULL) || (net == NULL)) {
+			return;
+		}
+		if (net->RTO == 0) {
+			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+		} else {
+			to_ticks = MSEC_TO_TICKS(net->RTO);
+		}
+		tmr = &net->rxt_timer;
+		break;
+	case SCTP_TIMER_TYPE_NEWCOOKIE:
+		/*
+		 * nothing needed but the endpoint here ususually about 60
+		 * minutes.
+		 */
+		tmr = &inp->sctp_ep.signature_change;
+		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
+		break;
+	case SCTP_TIMER_TYPE_ASOCKILL:
+		if (stcb == NULL) {
+			return;
+		}
+		tmr = &stcb->asoc.strreset_timer;
+		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
+		break;
+	case SCTP_TIMER_TYPE_INPKILL:
+		/*
+		 * The inp is setup to die. We re-use the signature_chage
+		 * timer since that has stopped and we are in the GONE
+		 * state.
+		 */
+		tmr = &inp->sctp_ep.signature_change;
+		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
+		break;
+	case SCTP_TIMER_TYPE_PATHMTURAISE:
+		/*
+		 * Here we use the value found in the EP for PMTU ususually
+		 * about 10 minutes.
+		 */
+		if ((stcb == NULL) || (net == NULL)) {
+			return;
+		}
+		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
+			return;
+		}
+		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
+		tmr = &net->pmtu_timer;
+		break;
+	case SCTP_TIMER_TYPE_SHUTDOWNACK:
+		/* Here we use the RTO of the destination */
+		if ((stcb == NULL) || (net == NULL)) {
+			return;
+		}
+		if (net->RTO == 0) {
+			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+		} else {
+			to_ticks = MSEC_TO_TICKS(net->RTO);
+		}
+		tmr = &net->rxt_timer;
+		break;
+	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
+		/*
+		 * Here we use the endpoints shutdown guard timer usually
+		 * about 3 minutes.
+		 */
+		if (stcb == NULL) {
+			return;
+		}
+		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
+			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
+		} else {
+			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
+		}
+		tmr = &stcb->asoc.shut_guard_timer;
+		break;
+	case SCTP_TIMER_TYPE_STRRESET:
+		/*
+		 * Here the timer comes from the stcb but its value is from
+		 * the net's RTO.
+		 */
+		if ((stcb == NULL) || (net == NULL)) {
+			return;
+		}
+		if (net->RTO == 0) {
+			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+		} else {
+			to_ticks = MSEC_TO_TICKS(net->RTO);
+		}
+		tmr = &stcb->asoc.strreset_timer;
+		break;
+	case SCTP_TIMER_TYPE_ASCONF:
+		/*
+		 * Here the timer comes from the stcb but its value is from
+		 * the net's RTO.
+		 */
+		if ((stcb == NULL) || (net == NULL)) {
+			return;
+		}
+		if (net->RTO == 0) {
+			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+		} else {
+			to_ticks = MSEC_TO_TICKS(net->RTO);
+		}
+		tmr = &stcb->asoc.asconf_timer;
+		break;
+	case SCTP_TIMER_TYPE_PRIM_DELETED:
+		if ((stcb == NULL) || (net != NULL)) {
+			return;
+		}
+		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
+		tmr = &stcb->asoc.delete_prim_timer;
+		break;
+	case SCTP_TIMER_TYPE_AUTOCLOSE:
+		if (stcb == NULL) {
+			return;
+		}
+		if (stcb->asoc.sctp_autoclose_ticks == 0) {
+			/*
+			 * Really an error since stcb is NOT set to
+			 * autoclose
+			 */
+			return;
+		}
+		to_ticks = stcb->asoc.sctp_autoclose_ticks;
+		tmr = &stcb->asoc.autoclose_timer;
+		break;
+	default:
+		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
+			__func__, t_type);
+		return;
+		break;
+	}
+	if ((to_ticks <= 0) || (tmr == NULL)) {
+		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
+			__func__, t_type, to_ticks, (void *)tmr);
+		return;
+	}
+	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
+		/*
+		 * we do NOT allow you to have it already running. if it is
+		 * we leave the current one up unchanged
+		 */
+		return;
+	}
+	/* At this point we can proceed */
+	if (t_type == SCTP_TIMER_TYPE_SEND) {
+		stcb->asoc.num_send_timers_up++;
+	}
+	tmr->stopped_from = 0;
+	tmr->type = t_type;
+	tmr->ep = (void *)inp;
+	tmr->tcb = (void *)stcb;
+	tmr->net = (void *)net;
+	tmr->self = (void *)tmr;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 800000
+	tmr->vnet = (void *)curvnet;
+#endif
+#ifndef __Panda__
+	tmr->ticks = sctp_get_tick_count();
+#endif
+	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
+	return;
+}
+
+void
+sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+    struct sctp_nets *net, uint32_t from)
+{
+	struct sctp_timer *tmr;
+
+	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
+	    (inp == NULL))
+		return;
+
+	tmr = NULL;
+	if (stcb) {
+		SCTP_TCB_LOCK_ASSERT(stcb);
+	}
+	switch (t_type) {
+	case SCTP_TIMER_TYPE_ZERO_COPY:
+		tmr = &inp->sctp_ep.zero_copy_timer;
+		break;
+	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
+		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
+		break;
+	case SCTP_TIMER_TYPE_ADDR_WQ:
+		tmr = &SCTP_BASE_INFO(addr_wq_timer);
+		break;
+	case SCTP_TIMER_TYPE_SEND:
+		if ((stcb == NULL) || (net == NULL)) {
+			return;
+		}
+		tmr = &net->rxt_timer;
+		break;
+	case SCTP_TIMER_TYPE_INIT:
+		if ((stcb == NULL) || (net == NULL)) {
+			return;
+		}
+		tmr = &net->rxt_timer;
+		break;
+	case SCTP_TIMER_TYPE_RECV:
+		if (stcb == NULL) {
+			return;
+		}
+		tmr = &stcb->asoc.dack_timer;
+		break;
+	case SCTP_TIMER_TYPE_SHUTDOWN:
+		if ((stcb == NULL) || (net == NULL)) {
+			return;
+		}
+		tmr = &net->rxt_timer;
+		break;
+	case SCTP_TIMER_TYPE_HEARTBEAT:
+		if ((stcb == NULL) || (net == NULL)) {
+			return;
+		}
+		tmr = &net->hb_timer;
+		break;
+	case SCTP_TIMER_TYPE_COOKIE:
+		if ((stcb == NULL) || (net == NULL)) {
+			return;
+		}
+		tmr = &net->rxt_timer;
+		break;
+	case SCTP_TIMER_TYPE_NEWCOOKIE:
+		/* nothing needed but the endpoint here */
+		tmr = &inp->sctp_ep.signature_change;
+		/*
+		 * We re-use the newcookie timer for the INP kill timer. We
+		 * must assure that we do not kill it by accident.
+		 */
+		break;
+	case SCTP_TIMER_TYPE_ASOCKILL:
+		/*
+		 * Stop the asoc kill timer.
+		 */
+		if (stcb == NULL) {
+			return;
+		}
+		tmr = &stcb->asoc.strreset_timer;
+		break;
+
+	case SCTP_TIMER_TYPE_INPKILL:
+		/*
+		 * The inp is setup to die. We re-use the signature_chage
+		 * timer since that has stopped and we are in the GONE
+		 * state.
+		 */
+		tmr = &inp->sctp_ep.signature_change;
+		break;
+	case SCTP_TIMER_TYPE_PATHMTURAISE:
+		if ((stcb == NULL) || (net == NULL)) {
+			return;
+		}
+		tmr = &net->pmtu_timer;
+		break;
+	case SCTP_TIMER_TYPE_SHUTDOWNACK:
+		if ((stcb == NULL) || (net == NULL)) {
+			return;
+		}
+		tmr = &net->rxt_timer;
+		break;
+	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
+		if (stcb == NULL) {
+			return;
+		}
+		tmr = &stcb->asoc.shut_guard_timer;
+		break;
+	case SCTP_TIMER_TYPE_STRRESET:
+		if (stcb == NULL) {
+			return;
+		}
+		tmr = &stcb->asoc.strreset_timer;
+		break;
+	case SCTP_TIMER_TYPE_ASCONF:
+		if (stcb == NULL) {
+			return;
+		}
+		tmr = &stcb->asoc.asconf_timer;
+		break;
+	case SCTP_TIMER_TYPE_PRIM_DELETED:
+		if (stcb == NULL) {
+			return;
+		}
+		tmr = &stcb->asoc.delete_prim_timer;
+		break;
+	case SCTP_TIMER_TYPE_AUTOCLOSE:
+		if (stcb == NULL) {
+			return;
+		}
+		tmr = &stcb->asoc.autoclose_timer;
+		break;
+	default:
+		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
+			__func__, t_type);
+		break;
+	}
+	if (tmr == NULL) {
+		return;
+	}
+	if ((tmr->type != t_type) && tmr->type) {
+		/*
+		 * Ok we have a timer that is under joint use. Cookie timer
+		 * per chance with the SEND timer. We therefore are NOT
+		 * running the timer that the caller wants stopped.  So just
+		 * return.
+		 */
+		return;
+	}
+	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
+		stcb->asoc.num_send_timers_up--;
+		if (stcb->asoc.num_send_timers_up < 0) {
+			stcb->asoc.num_send_timers_up = 0;
+		}
+	}
+	tmr->self = NULL;
+	tmr->stopped_from = from;
+	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
+	return;
+}
+
+uint32_t
+sctp_calculate_len(struct mbuf *m)
+{
+	uint32_t tlen = 0;
+	struct mbuf *at;
+
+	at = m;
+	while (at) {
+		tlen += SCTP_BUF_LEN(at);
+		at = SCTP_BUF_NEXT(at);
+	}
+	return (tlen);
+}
+
+void
+sctp_mtu_size_reset(struct sctp_inpcb *inp,
+    struct sctp_association *asoc, uint32_t mtu)
+{
+	/*
+	 * Reset the P-MTU size on this association, this involves changing
+	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
+	 * allow the DF flag to be cleared.
+	 */
+	struct sctp_tmit_chunk *chk;
+	unsigned int eff_mtu, ovh;
+
+	asoc->smallest_mtu = mtu;
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
+		ovh = SCTP_MIN_OVERHEAD;
+	} else {
+		ovh = SCTP_MIN_V4_OVERHEAD;
+	}
+	eff_mtu = mtu - ovh;
+	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
+		if (chk->send_size > eff_mtu) {
+			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+		}
+	}
+	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
+		if (chk->send_size > eff_mtu) {
+			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
+		}
+	}
+}
+
+
+/*
+ * given an association and starting time of the current RTT period return
+ * RTO in number of msecs net should point to the current network
+ */
+
+uint32_t
+sctp_calculate_rto(struct sctp_tcb *stcb,
+		   struct sctp_association *asoc,
+		   struct sctp_nets *net,
+		   struct timeval *told,
+		   int safe, int rtt_from_sack)
+{
+	/*-
+	 * given an association and the starting time of the current RTT
+	 * period (in value1/value2) return RTO in number of msecs.
+	 */
+	int32_t rtt; /* RTT in ms */
+	uint32_t new_rto;
+	int first_measure = 0;
+	struct timeval now, then, *old;
+
+	/* Copy it out for sparc64 */
+	if (safe == sctp_align_unsafe_makecopy) {
+		old = &then;
+		memcpy(&then, told, sizeof(struct timeval));
+	} else if (safe == sctp_align_safe_nocopy) {
+		old = told;
+	} else {
+		/* error */
+		SCTP_PRINTF("Huh, bad rto calc call\n");
+		return (0);
+	}
+	/************************/
+	/* 1. calculate new RTT */
+	/************************/
+	/* get the current time */
+	if (stcb->asoc.use_precise_time) {
+		(void)SCTP_GETPTIME_TIMEVAL(&now);
+	} else {
+		(void)SCTP_GETTIME_TIMEVAL(&now);
+	}
+	timevalsub(&now, old);
+	/* store the current RTT in us */
+	net->rtt = (uint64_t)1000000 * (uint64_t)now.tv_sec +
+	           (uint64_t)now.tv_usec;
+	/* compute rtt in ms */
+	rtt = (int32_t)(net->rtt / 1000);
+	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
+		/* Tell the CC module that a new update has just occurred from a sack */
+		(*asoc->cc_functions.sctp_rtt_calculated)(stcb, net, &now);
+	}
+	/* Do we need to determine the lan? We do this only
+	 * on sacks i.e. RTT being determined from data not
+	 * non-data (HB/INIT->INITACK).
+	 */
+	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
+	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
+		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
+			net->lan_type = SCTP_LAN_INTERNET;
+		} else {
+			net->lan_type = SCTP_LAN_LOCAL;
+		}
+	}
+
+	/***************************/
+	/* 2. update RTTVAR & SRTT */
+	/***************************/
+	/*-
+	 * Compute the scaled average lastsa and the
+	 * scaled variance lastsv as described in van Jacobson
+	 * Paper "Congestion Avoidance and Control", Annex A.
+	 *
+	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
+	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
+	 */
+	if (net->RTO_measured) {
+		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
+		net->lastsa += rtt;
+		if (rtt < 0) {
+			rtt = -rtt;
+		}
+		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
+		net->lastsv += rtt;
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
+			rto_logging(net, SCTP_LOG_RTTVAR);
+		}
+	} else {
+		/* First RTO measurment */
+		net->RTO_measured = 1;
+		first_measure = 1;
+		net->lastsa = rtt << SCTP_RTT_SHIFT;
+		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
+			rto_logging(net, SCTP_LOG_INITIAL_RTT);
+		}
+	}
+	if (net->lastsv == 0) {
+		net->lastsv = SCTP_CLOCK_GRANULARITY;
+	}
+	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
+	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
+	    (stcb->asoc.sat_network_lockout == 0)) {
+		stcb->asoc.sat_network = 1;
+	} else if ((!first_measure) && stcb->asoc.sat_network) {
+		stcb->asoc.sat_network = 0;
+		stcb->asoc.sat_network_lockout = 1;
+	}
+	/* bound it, per C6/C7 in Section 5.3.1 */
+	if (new_rto < stcb->asoc.minrto) {
+		new_rto = stcb->asoc.minrto;
+	}
+	if (new_rto > stcb->asoc.maxrto) {
+		new_rto = stcb->asoc.maxrto;
+	}
+	/* we are now returning the RTO */
+	return (new_rto);
+}
+
+/*
+ * return a pointer to a contiguous piece of data from the given mbuf chain
+ * starting at 'off' for 'len' bytes.  If the desired piece spans more than
+ * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
+ * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
+ */
+caddr_t
+sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
+{
+	uint32_t count;
+	uint8_t *ptr;
+
+	ptr = in_ptr;
+	if ((off < 0) || (len <= 0))
+		return (NULL);
+
+	/* find the desired start location */
+	while ((m != NULL) && (off > 0)) {
+		if (off < SCTP_BUF_LEN(m))
+			break;
+		off -= SCTP_BUF_LEN(m);
+		m = SCTP_BUF_NEXT(m);
+	}
+	if (m == NULL)
+		return (NULL);
+
+	/* is the current mbuf large enough (eg. contiguous)? */
+	if ((SCTP_BUF_LEN(m) - off) >= len) {
+		return (mtod(m, caddr_t) + off);
+	} else {
+		/* else, it spans more than one mbuf, so save a temp copy... */
+		while ((m != NULL) && (len > 0)) {
+			count = min(SCTP_BUF_LEN(m) - off, len);
+			bcopy(mtod(m, caddr_t) + off, ptr, count);
+			len -= count;
+			ptr += count;
+			off = 0;
+			m = SCTP_BUF_NEXT(m);
+		}
+		if ((m == NULL) && (len > 0))
+			return (NULL);
+		else
+			return ((caddr_t)in_ptr);
+	}
+}
+
+
+
+struct sctp_paramhdr *
+sctp_get_next_param(struct mbuf *m,
+    int offset,
+    struct sctp_paramhdr *pull,
+    int pull_limit)
+{
+	/* This just provides a typed signature to Peter's Pull routine */
+	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
+	    (uint8_t *) pull));
+}
+
+
+struct mbuf *
+sctp_add_pad_tombuf(struct mbuf *m, int padlen)
+{
+	struct mbuf *m_last;
+	caddr_t dp;
+
+	if (padlen > 3) {
+		return (NULL);
+	}
+	if (padlen <= M_TRAILINGSPACE(m)) {
+		/*
+		 * The easy way. We hope the majority of the time we hit
+		 * here :)
+		 */
+		m_last = m;
+	} else {
+		/* Hard way we must grow the mbuf chain */
+		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
+		if (m_last == NULL) {
+			return (NULL);
+		}
+		SCTP_BUF_LEN(m_last) = 0;
+		SCTP_BUF_NEXT(m_last) = NULL;
+		SCTP_BUF_NEXT(m) = m_last;
+	}
+	dp = mtod(m_last, caddr_t) + SCTP_BUF_LEN(m_last);
+	SCTP_BUF_LEN(m_last) += padlen;
+	memset(dp, 0, padlen);
+	return (m_last);
+}
+
+struct mbuf *
+sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
+{
+	/* find the last mbuf in chain and pad it */
+	struct mbuf *m_at;
+
+	if (last_mbuf != NULL) {
+		return (sctp_add_pad_tombuf(last_mbuf, padval));
+	} else {
+		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
+			if (SCTP_BUF_NEXT(m_at) == NULL) {
+				return (sctp_add_pad_tombuf(m_at, padval));
+			}
+		}
+	}
+	return (NULL);
+}
+
+static void
+sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
+    uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+    )
+{
+	struct mbuf *m_notify;
+	struct sctp_assoc_change *sac;
+	struct sctp_queued_to_read *control;
+	unsigned int notif_len;
+	uint16_t abort_len;
+	unsigned int i;
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	struct socket *so;
+#endif
+
+	if (stcb == NULL) {
+		return;
+	}
+	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
+		notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
+		if (abort != NULL) {
+			abort_len = ntohs(abort->ch.chunk_length);
+		} else {
+			abort_len = 0;
+		}
+		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
+			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
+		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
+			notif_len += abort_len;
+		}
+		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
+		if (m_notify == NULL) {
+			/* Retry with smaller value. */
+			notif_len = (unsigned int)sizeof(struct sctp_assoc_change);
+			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
+			if (m_notify == NULL) {
+				goto set_error;
+			}
+		}
+		SCTP_BUF_NEXT(m_notify) = NULL;
+		sac = mtod(m_notify, struct sctp_assoc_change *);
+		memset(sac, 0, notif_len);
+		sac->sac_type = SCTP_ASSOC_CHANGE;
+		sac->sac_flags = 0;
+		sac->sac_length = sizeof(struct sctp_assoc_change);
+		sac->sac_state = state;
+		sac->sac_error = error;
+		/* XXX verify these stream counts */
+		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
+		sac->sac_inbound_streams = stcb->asoc.streamincnt;
+		sac->sac_assoc_id = sctp_get_associd(stcb);
+		if (notif_len > sizeof(struct sctp_assoc_change)) {
+			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
+				i = 0;
+				if (stcb->asoc.prsctp_supported == 1) {
+					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
+				}
+				if (stcb->asoc.auth_supported == 1) {
+					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
+				}
+				if (stcb->asoc.asconf_supported == 1) {
+					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
+				}
+				if (stcb->asoc.idata_supported == 1) {
+					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_INTERLEAVING;
+				}
+				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
+				if (stcb->asoc.reconfig_supported == 1) {
+					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
+				}
+				sac->sac_length += i;
+			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
+				memcpy(sac->sac_info, abort, abort_len);
+				sac->sac_length += abort_len;
+			}
+		}
+		SCTP_BUF_LEN(m_notify) = sac->sac_length;
+		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+		                                 0, 0, stcb->asoc.context, 0, 0, 0,
+		                                 m_notify);
+		if (control != NULL) {
+			control->length = SCTP_BUF_LEN(m_notify);
+			/* not that we need this */
+			control->tail_mbuf = m_notify;
+			control->spec_flags = M_NOTIFICATION;
+			sctp_add_to_readq(stcb->sctp_ep, stcb,
+			                  control,
+			                  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
+			                  so_locked);
+		} else {
+			sctp_m_freem(m_notify);
+		}
+	}
+	/*
+	 * For 1-to-1 style sockets, we send up and error when an ABORT
+	 * comes in.
+	 */
+set_error:
+	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
+		SOCK_LOCK(stcb->sctp_socket);
+		if (from_peer) {
+			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
+				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
+				stcb->sctp_socket->so_error = ECONNREFUSED;
+			} else {
+				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
+				stcb->sctp_socket->so_error = ECONNRESET;
+			}
+		} else {
+			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
+			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
+				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
+				stcb->sctp_socket->so_error = ETIMEDOUT;
+			} else {
+				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
+				stcb->sctp_socket->so_error = ECONNABORTED;
+			}
+		}
+	}
+	/* Wake ANY sleepers */
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	so = SCTP_INP_SO(stcb->sctp_ep);
+	if (!so_locked) {
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_SOCKET_LOCK(so, 1);
+		SCTP_TCB_LOCK(stcb);
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+			SCTP_SOCKET_UNLOCK(so, 1);
+			return;
+		}
+	}
+#endif
+	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
+	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
+#if defined(__APPLE__)
+		socantrcvmore(stcb->sctp_socket);
+#else
+		socantrcvmore_locked(stcb->sctp_socket);
+#endif
+	}
+	sorwakeup(stcb->sctp_socket);
+	sowwakeup(stcb->sctp_socket);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	if (!so_locked) {
+		SCTP_SOCKET_UNLOCK(so, 1);
+	}
+#endif
+}
+
+static void
+sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
+    struct sockaddr *sa, uint32_t error, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+)
+{
+	struct mbuf *m_notify;
+	struct sctp_paddr_change *spc;
+	struct sctp_queued_to_read *control;
+
+	if ((stcb == NULL) ||
+	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
+		/* event not enabled */
+		return;
+	}
+	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
+	if (m_notify == NULL)
+		return;
+	SCTP_BUF_LEN(m_notify) = 0;
+	spc = mtod(m_notify, struct sctp_paddr_change *);
+	memset(spc, 0, sizeof(struct sctp_paddr_change));
+	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
+	spc->spc_flags = 0;
+	spc->spc_length = sizeof(struct sctp_paddr_change);
+	switch (sa->sa_family) {
+#ifdef INET
+	case AF_INET:
+#ifdef INET6
+		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
+			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
+			                    (struct sockaddr_in6 *)&spc->spc_aaddr);
+		} else {
+			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
+		}
+#else
+		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
+#endif
+		break;
+#endif
+#ifdef INET6
+	case AF_INET6:
+	{
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+		struct sockaddr_in6 *sin6;
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
+
+#ifdef SCTP_EMBEDDED_V6_SCOPE
+		sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
+		if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
+			if (sin6->sin6_scope_id == 0) {
+				/* recover scope_id for user */
+#ifdef SCTP_KAME
+				(void)sa6_recoverscope(sin6);
+#else
+				(void)in6_recoverscope(sin6, &sin6->sin6_addr,
+						       NULL);
+#endif
+			} else {
+				/* clear embedded scope_id for user */
+				in6_clearscope(&sin6->sin6_addr);
+			}
+		}
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+		break;
+	}
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_conn));
+		break;
+#endif
+	default:
+		/* TSNH */
+		break;
+	}
+	spc->spc_state = state;
+	spc->spc_error = error;
+	spc->spc_assoc_id = sctp_get_associd(stcb);
+
+	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
+	SCTP_BUF_NEXT(m_notify) = NULL;
+
+	/* append to socket */
+	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+	                                 0, 0, stcb->asoc.context, 0, 0, 0,
+	                                 m_notify);
+	if (control == NULL) {
+		/* no memory */
+		sctp_m_freem(m_notify);
+		return;
+	}
+	control->length = SCTP_BUF_LEN(m_notify);
+	control->spec_flags = M_NOTIFICATION;
+	/* not that we need this */
+	control->tail_mbuf = m_notify;
+	sctp_add_to_readq(stcb->sctp_ep, stcb,
+	                  control,
+	                  &stcb->sctp_socket->so_rcv, 1,
+	                  SCTP_READ_LOCK_NOT_HELD,
+	                  so_locked);
+}
+
+
+static void
+sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
+    struct sctp_tmit_chunk *chk, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+    )
+{
+	struct mbuf *m_notify;
+	struct sctp_send_failed *ssf;
+	struct sctp_send_failed_event *ssfe;
+	struct sctp_queued_to_read *control;
+	struct sctp_chunkhdr *chkhdr;
+	int notifhdr_len, chk_len, chkhdr_len, padding_len, payload_len;
+
+	if ((stcb == NULL) ||
+	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
+	     sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
+		/* event not enabled */
+		return;
+	}
+
+	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
+		notifhdr_len = sizeof(struct sctp_send_failed_event);
+	} else {
+		notifhdr_len = sizeof(struct sctp_send_failed);
+	}
+	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
+	if (m_notify == NULL)
+		/* no space left */
+		return;
+	SCTP_BUF_LEN(m_notify) = notifhdr_len;
+	if (stcb->asoc.idata_supported) {
+		chkhdr_len = sizeof(struct sctp_idata_chunk);
+	} else {
+		chkhdr_len = sizeof(struct sctp_data_chunk);
+	}
+	/* Use some defaults in case we can't access the chunk header */
+	if (chk->send_size >= chkhdr_len) {
+		payload_len = chk->send_size - chkhdr_len;
+	} else {
+		payload_len = 0;
+	}
+	padding_len = 0;
+	if (chk->data != NULL) {
+		chkhdr = mtod(chk->data, struct sctp_chunkhdr *);
+		if (chkhdr != NULL) {
+			chk_len = ntohs(chkhdr->chunk_length);
+			if ((chk_len >= chkhdr_len) &&
+			    (chk->send_size >= chk_len) &&
+			    (chk->send_size - chk_len < 4)) {
+				padding_len = chk->send_size - chk_len;
+				payload_len = chk->send_size - chkhdr_len - padding_len;
+			}
+		}
+	}
+	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
+		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
+		memset(ssfe, 0, notifhdr_len);
+		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
+		if (sent) {
+			ssfe->ssfe_flags = SCTP_DATA_SENT;
+		} else {
+			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
+		}
+		ssfe->ssfe_length = (uint32_t)(notifhdr_len + payload_len);
+		ssfe->ssfe_error = error;
+		/* not exactly what the user sent in, but should be close :) */
+		ssfe->ssfe_info.snd_sid = chk->rec.data.sid;
+		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
+		ssfe->ssfe_info.snd_ppid = chk->rec.data.ppid;
+		ssfe->ssfe_info.snd_context = chk->rec.data.context;
+		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
+		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
+	} else {
+		ssf = mtod(m_notify, struct sctp_send_failed *);
+		memset(ssf, 0, notifhdr_len);
+		ssf->ssf_type = SCTP_SEND_FAILED;
+		if (sent) {
+			ssf->ssf_flags = SCTP_DATA_SENT;
+		} else {
+			ssf->ssf_flags = SCTP_DATA_UNSENT;
+		}
+		ssf->ssf_length = (uint32_t)(notifhdr_len + payload_len);
+		ssf->ssf_error = error;
+		/* not exactly what the user sent in, but should be close :) */
+		ssf->ssf_info.sinfo_stream = chk->rec.data.sid;
+		ssf->ssf_info.sinfo_ssn = (uint16_t)chk->rec.data.mid;
+		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
+		ssf->ssf_info.sinfo_ppid = chk->rec.data.ppid;
+		ssf->ssf_info.sinfo_context = chk->rec.data.context;
+		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
+		ssf->ssf_assoc_id = sctp_get_associd(stcb);
+	}
+	if (chk->data != NULL) {
+		/* Trim off the sctp chunk header (it should be there) */
+		if (chk->send_size == chkhdr_len + payload_len + padding_len) {
+			m_adj(chk->data, chkhdr_len);
+			m_adj(chk->data, -padding_len);
+			sctp_mbuf_crush(chk->data);
+			chk->send_size -= (chkhdr_len + padding_len);
+		}
+	}
+	SCTP_BUF_NEXT(m_notify) = chk->data;
+	/* Steal off the mbuf */
+	chk->data = NULL;
+	/*
+	 * For this case, we check the actual socket buffer, since the assoc
+	 * is going away we don't want to overfill the socket buffer for a
+	 * non-reader
+	 */
+	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
+		sctp_m_freem(m_notify);
+		return;
+	}
+	/* append to socket */
+	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+	                                 0, 0, stcb->asoc.context, 0, 0, 0,
+	                                 m_notify);
+	if (control == NULL) {
+		/* no memory */
+		sctp_m_freem(m_notify);
+		return;
+	}
+	control->spec_flags = M_NOTIFICATION;
+	sctp_add_to_readq(stcb->sctp_ep, stcb,
+	                  control,
+	                  &stcb->sctp_socket->so_rcv, 1,
+	                  SCTP_READ_LOCK_NOT_HELD,
+	                  so_locked);
+}
+
+
+static void
+sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
+			 struct sctp_stream_queue_pending *sp, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+                         SCTP_UNUSED
+#endif
+                         )
+{
+	struct mbuf *m_notify;
+	struct sctp_send_failed *ssf;
+	struct sctp_send_failed_event *ssfe;
+	struct sctp_queued_to_read *control;
+	int notifhdr_len;
+
+	if ((stcb == NULL) ||
+	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
+	     sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
+		/* event not enabled */
+		return;
+	}
+	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
+		notifhdr_len = sizeof(struct sctp_send_failed_event);
+	} else {
+		notifhdr_len = sizeof(struct sctp_send_failed);
+	}
+	m_notify = sctp_get_mbuf_for_msg(notifhdr_len, 0, M_NOWAIT, 1, MT_DATA);
+	if (m_notify == NULL) {
+		/* no space left */
+		return;
+	}
+	SCTP_BUF_LEN(m_notify) = notifhdr_len;
+	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
+		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
+		memset(ssfe, 0, notifhdr_len);
+		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
+		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
+		ssfe->ssfe_length = (uint32_t)(notifhdr_len + sp->length);
+		ssfe->ssfe_error = error;
+		/* not exactly what the user sent in, but should be close :) */
+		ssfe->ssfe_info.snd_sid = sp->sid;
+		if (sp->some_taken) {
+			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
+		} else {
+			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
+		}
+		ssfe->ssfe_info.snd_ppid = sp->ppid;
+		ssfe->ssfe_info.snd_context = sp->context;
+		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
+		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
+	} else {
+		ssf = mtod(m_notify, struct sctp_send_failed *);
+		memset(ssf, 0, notifhdr_len);
+		ssf->ssf_type = SCTP_SEND_FAILED;
+		ssf->ssf_flags = SCTP_DATA_UNSENT;
+		ssf->ssf_length = (uint32_t)(notifhdr_len + sp->length);
+		ssf->ssf_error = error;
+		/* not exactly what the user sent in, but should be close :) */
+		ssf->ssf_info.sinfo_stream = sp->sid;
+		ssf->ssf_info.sinfo_ssn = 0;
+		if (sp->some_taken) {
+			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
+		} else {
+			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
+		}
+		ssf->ssf_info.sinfo_ppid = sp->ppid;
+		ssf->ssf_info.sinfo_context = sp->context;
+		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
+		ssf->ssf_assoc_id = sctp_get_associd(stcb);
+	}
+	SCTP_BUF_NEXT(m_notify) = sp->data;
+
+	/* Steal off the mbuf */
+	sp->data = NULL;
+	/*
+	 * For this case, we check the actual socket buffer, since the assoc
+	 * is going away we don't want to overfill the socket buffer for a
+	 * non-reader
+	 */
+	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
+		sctp_m_freem(m_notify);
+		return;
+	}
+	/* append to socket */
+	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+	                                 0, 0, stcb->asoc.context, 0, 0, 0,
+	                                 m_notify);
+	if (control == NULL) {
+		/* no memory */
+		sctp_m_freem(m_notify);
+		return;
+	}
+	control->spec_flags = M_NOTIFICATION;
+	sctp_add_to_readq(stcb->sctp_ep, stcb,
+	    control,
+	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
+}
+
+
+
+static void
+sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
+{
+	struct mbuf *m_notify;
+	struct sctp_adaptation_event *sai;
+	struct sctp_queued_to_read *control;
+
+	if ((stcb == NULL) ||
+	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
+		/* event not enabled */
+		return;
+	}
+
+	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
+	if (m_notify == NULL)
+		/* no space left */
+		return;
+	SCTP_BUF_LEN(m_notify) = 0;
+	sai = mtod(m_notify, struct sctp_adaptation_event *);
+	memset(sai, 0, sizeof(struct sctp_adaptation_event));
+	sai->sai_type = SCTP_ADAPTATION_INDICATION;
+	sai->sai_flags = 0;
+	sai->sai_length = sizeof(struct sctp_adaptation_event);
+	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
+	sai->sai_assoc_id = sctp_get_associd(stcb);
+
+	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
+	SCTP_BUF_NEXT(m_notify) = NULL;
+
+	/* append to socket */
+	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+	                                 0, 0, stcb->asoc.context, 0, 0, 0,
+	                                 m_notify);
+	if (control == NULL) {
+		/* no memory */
+		sctp_m_freem(m_notify);
+		return;
+	}
+	control->length = SCTP_BUF_LEN(m_notify);
+	control->spec_flags = M_NOTIFICATION;
+	/* not that we need this */
+	control->tail_mbuf = m_notify;
+	sctp_add_to_readq(stcb->sctp_ep, stcb,
+	    control,
+	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+}
+
+/* This always must be called with the read-queue LOCKED in the INP */
+static void
+sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
+					uint32_t val, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+                             SCTP_UNUSED
+#endif
+                                        )
+{
+	struct mbuf *m_notify;
+	struct sctp_pdapi_event *pdapi;
+	struct sctp_queued_to_read *control;
+	struct sockbuf *sb;
+
+	if ((stcb == NULL) ||
+	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
+		/* event not enabled */
+		return;
+	}
+	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
+		return;
+	}
+
+	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
+	if (m_notify == NULL)
+		/* no space left */
+		return;
+	SCTP_BUF_LEN(m_notify) = 0;
+	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
+	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
+	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
+	pdapi->pdapi_flags = 0;
+	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
+	pdapi->pdapi_indication = error;
+	pdapi->pdapi_stream = (val >> 16);
+	pdapi->pdapi_seq = (val & 0x0000ffff);
+	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
+
+	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
+	SCTP_BUF_NEXT(m_notify) = NULL;
+	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+					 0, 0, stcb->asoc.context, 0, 0, 0,
+					 m_notify);
+	if (control == NULL) {
+		/* no memory */
+		sctp_m_freem(m_notify);
+		return;
+	}
+	control->spec_flags = M_NOTIFICATION;
+	control->length = SCTP_BUF_LEN(m_notify);
+	/* not that we need this */
+	control->tail_mbuf = m_notify;
+	control->held_length = 0;
+	control->length = 0;
+	sb = &stcb->sctp_socket->so_rcv;
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+		sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
+	}
+	sctp_sballoc(stcb, sb, m_notify);
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+		sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
+	}
+	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
+	control->end_added = 1;
+	if (stcb->asoc.control_pdapi)
+		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi,  control, next);
+	else {
+		/* we really should not see this case */
+		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
+	}
+	if (stcb->sctp_ep && stcb->sctp_socket) {
+		/* This should always be the case */
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		struct socket *so;
+
+		so = SCTP_INP_SO(stcb->sctp_ep);
+		if (!so_locked) {
+			atomic_add_int(&stcb->asoc.refcnt, 1);
+			SCTP_TCB_UNLOCK(stcb);
+			SCTP_SOCKET_LOCK(so, 1);
+			SCTP_TCB_LOCK(stcb);
+			atomic_subtract_int(&stcb->asoc.refcnt, 1);
+			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+				SCTP_SOCKET_UNLOCK(so, 1);
+				return;
+			}
+		}
+#endif
+		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		if (!so_locked) {
+			SCTP_SOCKET_UNLOCK(so, 1);
+		}
+#endif
+	}
+}
+
+static void
+sctp_notify_shutdown_event(struct sctp_tcb *stcb)
+{
+	struct mbuf *m_notify;
+	struct sctp_shutdown_event *sse;
+	struct sctp_queued_to_read *control;
+
+	/*
+	 * For TCP model AND UDP connected sockets we will send an error up
+	 * when an SHUTDOWN completes
+	 */
+	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
+		/* mark socket closed for read/write and wakeup! */
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		struct socket *so;
+
+		so = SCTP_INP_SO(stcb->sctp_ep);
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_SOCKET_LOCK(so, 1);
+		SCTP_TCB_LOCK(stcb);
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+			SCTP_SOCKET_UNLOCK(so, 1);
+			return;
+		}
+#endif
+		socantsendmore(stcb->sctp_socket);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+	}
+	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
+		/* event not enabled */
+		return;
+	}
+
+	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
+	if (m_notify == NULL)
+		/* no space left */
+		return;
+	sse = mtod(m_notify, struct sctp_shutdown_event *);
+	memset(sse, 0, sizeof(struct sctp_shutdown_event));
+	sse->sse_type = SCTP_SHUTDOWN_EVENT;
+	sse->sse_flags = 0;
+	sse->sse_length = sizeof(struct sctp_shutdown_event);
+	sse->sse_assoc_id = sctp_get_associd(stcb);
+
+	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
+	SCTP_BUF_NEXT(m_notify) = NULL;
+
+	/* append to socket */
+	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+	                                 0, 0, stcb->asoc.context, 0, 0, 0,
+	                                 m_notify);
+	if (control == NULL) {
+		/* no memory */
+		sctp_m_freem(m_notify);
+		return;
+	}
+	control->spec_flags = M_NOTIFICATION;
+	control->length = SCTP_BUF_LEN(m_notify);
+	/* not that we need this */
+	control->tail_mbuf = m_notify;
+	sctp_add_to_readq(stcb->sctp_ep, stcb,
+	    control,
+	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+}
+
+static void
+sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
+                             int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+                             SCTP_UNUSED
+#endif
+                             )
+{
+	struct mbuf *m_notify;
+	struct sctp_sender_dry_event *event;
+	struct sctp_queued_to_read *control;
+
+	if ((stcb == NULL) ||
+	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
+		/* event not enabled */
+		return;
+	}
+
+	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
+	if (m_notify == NULL) {
+		/* no space left */
+		return;
+	}
+	SCTP_BUF_LEN(m_notify) = 0;
+	event = mtod(m_notify, struct sctp_sender_dry_event *);
+	memset(event, 0, sizeof(struct sctp_sender_dry_event));
+	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
+	event->sender_dry_flags = 0;
+	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
+	event->sender_dry_assoc_id = sctp_get_associd(stcb);
+
+	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
+	SCTP_BUF_NEXT(m_notify) = NULL;
+
+	/* append to socket */
+	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+	                                 0, 0, stcb->asoc.context, 0, 0, 0,
+	                                 m_notify);
+	if (control == NULL) {
+		/* no memory */
+		sctp_m_freem(m_notify);
+		return;
+	}
+	control->length = SCTP_BUF_LEN(m_notify);
+	control->spec_flags = M_NOTIFICATION;
+	/* not that we need this */
+	control->tail_mbuf = m_notify;
+	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
+	                  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
+}
+
+
+void
+sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
+{
+	struct mbuf *m_notify;
+	struct sctp_queued_to_read *control;
+	struct sctp_stream_change_event *stradd;
+
+	if ((stcb == NULL) ||
+	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
+		/* event not enabled */
+		return;
+	}
+	if ((stcb->asoc.peer_req_out) && flag) {
+		/* Peer made the request, don't tell the local user */
+		stcb->asoc.peer_req_out = 0;
+		return;
+	}
+	stcb->asoc.peer_req_out = 0;
+	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
+	if (m_notify == NULL)
+		/* no space left */
+		return;
+	SCTP_BUF_LEN(m_notify) = 0;
+	stradd = mtod(m_notify, struct sctp_stream_change_event *);
+	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
+	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
+	stradd->strchange_flags = flag;
+	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
+	stradd->strchange_assoc_id = sctp_get_associd(stcb);
+	stradd->strchange_instrms = numberin;
+	stradd->strchange_outstrms = numberout;
+	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
+	SCTP_BUF_NEXT(m_notify) = NULL;
+	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
+		/* no space */
+		sctp_m_freem(m_notify);
+		return;
+	}
+	/* append to socket */
+	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+	                                 0, 0, stcb->asoc.context, 0, 0, 0,
+	                                 m_notify);
+	if (control == NULL) {
+		/* no memory */
+		sctp_m_freem(m_notify);
+		return;
+	}
+	control->spec_flags = M_NOTIFICATION;
+	control->length = SCTP_BUF_LEN(m_notify);
+	/* not that we need this */
+	control->tail_mbuf = m_notify;
+	sctp_add_to_readq(stcb->sctp_ep, stcb,
+	    control,
+	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+}
+
+void
+sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
+{
+	struct mbuf *m_notify;
+	struct sctp_queued_to_read *control;
+	struct sctp_assoc_reset_event *strasoc;
+
+	if ((stcb == NULL) ||
+	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
+		/* event not enabled */
+		return;
+	}
+	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
+	if (m_notify == NULL)
+		/* no space left */
+		return;
+	SCTP_BUF_LEN(m_notify) = 0;
+	strasoc = mtod(m_notify, struct sctp_assoc_reset_event  *);
+	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
+	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
+	strasoc->assocreset_flags = flag;
+	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
+	strasoc->assocreset_assoc_id= sctp_get_associd(stcb);
+	strasoc->assocreset_local_tsn = sending_tsn;
+	strasoc->assocreset_remote_tsn = recv_tsn;
+	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
+	SCTP_BUF_NEXT(m_notify) = NULL;
+	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
+		/* no space */
+		sctp_m_freem(m_notify);
+		return;
+	}
+	/* append to socket */
+	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+	                                 0, 0, stcb->asoc.context, 0, 0, 0,
+	                                 m_notify);
+	if (control == NULL) {
+		/* no memory */
+		sctp_m_freem(m_notify);
+		return;
+	}
+	control->spec_flags = M_NOTIFICATION;
+	control->length = SCTP_BUF_LEN(m_notify);
+	/* not that we need this */
+	control->tail_mbuf = m_notify;
+	sctp_add_to_readq(stcb->sctp_ep, stcb,
+	    control,
+	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+}
+
+
+
+static void
+sctp_notify_stream_reset(struct sctp_tcb *stcb,
+    int number_entries, uint16_t * list, int flag)
+{
+	struct mbuf *m_notify;
+	struct sctp_queued_to_read *control;
+	struct sctp_stream_reset_event *strreset;
+	int len;
+
+	if ((stcb == NULL) ||
+	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
+		/* event not enabled */
+		return;
+	}
+
+	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
+	if (m_notify == NULL)
+		/* no space left */
+		return;
+	SCTP_BUF_LEN(m_notify) = 0;
+	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
+	if (len > M_TRAILINGSPACE(m_notify)) {
+		/* never enough room */
+		sctp_m_freem(m_notify);
+		return;
+	}
+	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
+	memset(strreset, 0, len);
+	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
+	strreset->strreset_flags = flag;
+	strreset->strreset_length = len;
+	strreset->strreset_assoc_id = sctp_get_associd(stcb);
+	if (number_entries) {
+		int i;
+
+		for (i = 0; i < number_entries; i++) {
+			strreset->strreset_stream_list[i] = ntohs(list[i]);
+		}
+	}
+	SCTP_BUF_LEN(m_notify) = len;
+	SCTP_BUF_NEXT(m_notify) = NULL;
+	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
+		/* no space */
+		sctp_m_freem(m_notify);
+		return;
+	}
+	/* append to socket */
+	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+	                                 0, 0, stcb->asoc.context, 0, 0, 0,
+	                                 m_notify);
+	if (control == NULL) {
+		/* no memory */
+		sctp_m_freem(m_notify);
+		return;
+	}
+	control->spec_flags = M_NOTIFICATION;
+	control->length = SCTP_BUF_LEN(m_notify);
+	/* not that we need this */
+	control->tail_mbuf = m_notify;
+	sctp_add_to_readq(stcb->sctp_ep, stcb,
+	                  control,
+	                  &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+}
+
+
+static void
+sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
+{
+	struct mbuf *m_notify;
+	struct sctp_remote_error *sre;
+	struct sctp_queued_to_read *control;
+	unsigned int notif_len;
+	uint16_t chunk_len;
+
+	if ((stcb == NULL) ||
+	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
+		return;
+	}
+	if (chunk != NULL) {
+		chunk_len = ntohs(chunk->ch.chunk_length);
+	} else {
+		chunk_len = 0;
+	}
+	notif_len = (unsigned int)(sizeof(struct sctp_remote_error) + chunk_len);
+	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
+	if (m_notify == NULL) {
+		/* Retry with smaller value. */
+		notif_len = (unsigned int)sizeof(struct sctp_remote_error);
+		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
+		if (m_notify == NULL) {
+			return;
+		}
+	}
+	SCTP_BUF_NEXT(m_notify) = NULL;
+	sre = mtod(m_notify, struct sctp_remote_error *);
+	memset(sre, 0, notif_len);
+	sre->sre_type = SCTP_REMOTE_ERROR;
+	sre->sre_flags = 0;
+	sre->sre_length = sizeof(struct sctp_remote_error);
+	sre->sre_error = error;
+	sre->sre_assoc_id = sctp_get_associd(stcb);
+	if (notif_len > sizeof(struct sctp_remote_error)) {
+		memcpy(sre->sre_data, chunk, chunk_len);
+		sre->sre_length += chunk_len;
+	}
+	SCTP_BUF_LEN(m_notify) = sre->sre_length;
+	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
+	                                 0, 0, stcb->asoc.context, 0, 0, 0,
+	                                 m_notify);
+	if (control != NULL) {
+		control->length = SCTP_BUF_LEN(m_notify);
+		/* not that we need this */
+		control->tail_mbuf = m_notify;
+		control->spec_flags = M_NOTIFICATION;
+		sctp_add_to_readq(stcb->sctp_ep, stcb,
+		                  control,
+		                  &stcb->sctp_socket->so_rcv, 1,
+				  SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
+	} else {
+		sctp_m_freem(m_notify);
+	}
+}
+
+
+void
+sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
+    uint32_t error, void *data, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+    )
+{
+	if ((stcb == NULL) ||
+	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
+		/* If the socket is gone we are out of here */
+		return;
+	}
+#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
+	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
+#else
+	if (stcb->sctp_socket->so_state & SS_CANTRCVMORE) {
+#endif
+		return;
+	}
+#if defined(__APPLE__)
+	if (so_locked) {
+		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
+	} else {
+		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
+	}
+#endif
+	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
+	    (stcb->asoc.state &  SCTP_STATE_COOKIE_ECHOED)) {
+		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
+		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
+		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
+			/* Don't report these in front states */
+			return;
+		}
+	}
+	switch (notification) {
+	case SCTP_NOTIFY_ASSOC_UP:
+		if (stcb->asoc.assoc_up_sent == 0) {
+			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
+			stcb->asoc.assoc_up_sent = 1;
+		}
+		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
+			sctp_notify_adaptation_layer(stcb);
+		}
+		if (stcb->asoc.auth_supported == 0) {
+			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
+			                NULL, so_locked);
+		}
+		break;
+	case SCTP_NOTIFY_ASSOC_DOWN:
+		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
+#if defined(__Userspace__)
+		if (stcb->sctp_ep->recv_callback) {
+			if (stcb->sctp_socket) {
+				union sctp_sockstore addr;
+				struct sctp_rcvinfo rcv;
+
+				memset(&addr, 0, sizeof(union sctp_sockstore));
+				memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
+				atomic_add_int(&stcb->asoc.refcnt, 1);
+				SCTP_TCB_UNLOCK(stcb);
+				stcb->sctp_ep->recv_callback(stcb->sctp_socket, addr, NULL, 0, rcv, 0, stcb->sctp_ep->ulp_info);
+				SCTP_TCB_LOCK(stcb);
+				atomic_subtract_int(&stcb->asoc.refcnt, 1);
+			}
+		}
+#endif
+		break;
+	case SCTP_NOTIFY_INTERFACE_DOWN:
+		{
+			struct sctp_nets *net;
+
+			net = (struct sctp_nets *)data;
+			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
+			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
+			break;
+		}
+	case SCTP_NOTIFY_INTERFACE_UP:
+		{
+			struct sctp_nets *net;
+
+			net = (struct sctp_nets *)data;
+			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
+			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
+			break;
+		}
+	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
+		{
+			struct sctp_nets *net;
+
+			net = (struct sctp_nets *)data;
+			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
+			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
+			break;
+		}
+	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
+		sctp_notify_send_failed2(stcb, error,
+		                         (struct sctp_stream_queue_pending *)data, so_locked);
+		break;
+	case SCTP_NOTIFY_SENT_DG_FAIL:
+		sctp_notify_send_failed(stcb, 1, error,
+		    (struct sctp_tmit_chunk *)data, so_locked);
+		break;
+	case SCTP_NOTIFY_UNSENT_DG_FAIL:
+		sctp_notify_send_failed(stcb, 0, error,
+		                        (struct sctp_tmit_chunk *)data, so_locked);
+		break;
+	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
+		{
+			uint32_t val;
+			val = *((uint32_t *)data);
+
+			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
+		break;
+		}
+	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
+		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
+		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
+			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
+		} else {
+			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
+		}
+		break;
+	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
+		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
+		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
+			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
+		} else {
+			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
+		}
+		break;
+	case SCTP_NOTIFY_ASSOC_RESTART:
+		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
+		if (stcb->asoc.auth_supported == 0) {
+			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
+			                NULL, so_locked);
+		}
+		break;
+	case SCTP_NOTIFY_STR_RESET_SEND:
+		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
+		break;
+	case SCTP_NOTIFY_STR_RESET_RECV:
+		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
+		break;
+	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
+		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
+		                         (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_FAILED));
+		break;
+	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
+		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
+		                         (SCTP_STREAM_RESET_OUTGOING_SSN|SCTP_STREAM_RESET_DENIED));
+		break;
+	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
+		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
+		                         (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_FAILED));
+		break;
+	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
+		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
+		                         (SCTP_STREAM_RESET_INCOMING|SCTP_STREAM_RESET_DENIED));
+		break;
+	case SCTP_NOTIFY_ASCONF_ADD_IP:
+		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
+		    error, so_locked);
+		break;
+	case SCTP_NOTIFY_ASCONF_DELETE_IP:
+		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
+		                             error, so_locked);
+		break;
+	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
+		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
+		                             error, so_locked);
+		break;
+	case SCTP_NOTIFY_PEER_SHUTDOWN:
+		sctp_notify_shutdown_event(stcb);
+		break;
+	case SCTP_NOTIFY_AUTH_NEW_KEY:
+		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
+		                           (uint16_t)(uintptr_t)data,
+		                           so_locked);
+		break;
+	case SCTP_NOTIFY_AUTH_FREE_KEY:
+		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
+		                           (uint16_t)(uintptr_t)data,
+		                           so_locked);
+		break;
+	case SCTP_NOTIFY_NO_PEER_AUTH:
+		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
+		                           (uint16_t)(uintptr_t)data,
+		                           so_locked);
+		break;
+	case SCTP_NOTIFY_SENDER_DRY:
+		sctp_notify_sender_dry_event(stcb, so_locked);
+		break;
+	case SCTP_NOTIFY_REMOTE_ERROR:
+		sctp_notify_remote_error(stcb, error, data);
+		break;
+	default:
+		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
+			__func__, notification, notification);
+		break;
+	}			/* end switch */
+}
+
+void
+sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+    )
+{
+	struct sctp_association *asoc;
+	struct sctp_stream_out *outs;
+	struct sctp_tmit_chunk *chk, *nchk;
+	struct sctp_stream_queue_pending *sp, *nsp;
+	int i;
+
+	if (stcb == NULL) {
+		return;
+	}
+	asoc = &stcb->asoc;
+	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+		/* already being freed */
+		return;
+	}
+#if defined(__APPLE__)
+	if (so_locked) {
+		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
+	} else {
+		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
+	}
+#endif
+	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
+		return;
+	}
+	/* now through all the gunk freeing chunks */
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_LOCK(stcb);
+	}
+	/* sent queue SHOULD be empty */
+	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
+		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
+		asoc->sent_queue_cnt--;
+		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
+			if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+				asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
+#ifdef INVARIANTS
+			} else {
+				panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
+#endif
+			}
+		}
+		if (chk->data != NULL) {
+			sctp_free_bufspace(stcb, asoc, chk, 1);
+			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
+			                error, chk, so_locked);
+			if (chk->data) {
+				sctp_m_freem(chk->data);
+				chk->data = NULL;
+			}
+		}
+		sctp_free_a_chunk(stcb, chk, so_locked);
+		/*sa_ignore FREED_MEMORY*/
+	}
+	/* pending send queue SHOULD be empty */
+	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
+		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
+		asoc->send_queue_cnt--;
+		if (asoc->strmout[chk->rec.data.sid].chunks_on_queues > 0) {
+			asoc->strmout[chk->rec.data.sid].chunks_on_queues--;
+#ifdef INVARIANTS
+		} else {
+			panic("No chunks on the queues for sid %u.", chk->rec.data.sid);
+#endif
+		}
+		if (chk->data != NULL) {
+			sctp_free_bufspace(stcb, asoc, chk, 1);
+			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
+			                error, chk, so_locked);
+			if (chk->data) {
+				sctp_m_freem(chk->data);
+				chk->data = NULL;
+			}
+		}
+		sctp_free_a_chunk(stcb, chk, so_locked);
+		/*sa_ignore FREED_MEMORY*/
+	}
+	for (i = 0; i < asoc->streamoutcnt; i++) {
+		/* For each stream */
+		outs = &asoc->strmout[i];
+		/* clean up any sends there */
+		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
+			atomic_subtract_int(&asoc->stream_queue_cnt, 1);
+			TAILQ_REMOVE(&outs->outqueue, sp, next);
+			stcb->asoc.ss_functions.sctp_ss_remove_from_stream(stcb, asoc, outs, sp, holds_lock);
+			sctp_free_spbufspace(stcb, asoc, sp);
+			if (sp->data) {
+				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
+						error, (void *)sp, so_locked);
+				if (sp->data) {
+					sctp_m_freem(sp->data);
+					sp->data = NULL;
+					sp->tail_mbuf = NULL;
+					sp->length = 0;
+				}
+			}
+			if (sp->net) {
+				sctp_free_remote_addr(sp->net);
+				sp->net = NULL;
+			}
+			/* Free the chunk */
+			sctp_free_a_strmoq(stcb, sp, so_locked);
+			/*sa_ignore FREED_MEMORY*/
+		}
+	}
+
+	if (holds_lock == 0) {
+		SCTP_TCB_SEND_UNLOCK(stcb);
+	}
+}
+
+void
+sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
+			struct sctp_abort_chunk *abort, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+    )
+{
+	if (stcb == NULL) {
+		return;
+	}
+#if defined(__APPLE__)
+	if (so_locked) {
+		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
+	} else {
+		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
+	}
+#endif
+	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
+	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
+	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
+		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
+	}
+	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
+	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
+		return;
+	}
+	/* Tell them we lost the asoc */
+	sctp_report_all_outbound(stcb, error, 1, so_locked);
+	if (from_peer) {
+		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
+	} else {
+		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
+	}
+}
+
+void
+sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+                       struct mbuf *m, int iphlen,
+                       struct sockaddr *src, struct sockaddr *dst,
+                       struct sctphdr *sh, struct mbuf *op_err,
+#if defined(__FreeBSD__)
+                       uint8_t mflowtype, uint32_t mflowid,
+#endif
+                       uint32_t vrf_id, uint16_t port)
+{
+	uint32_t vtag;
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	struct socket *so;
+#endif
+
+	vtag = 0;
+	if (stcb != NULL) {
+		vtag = stcb->asoc.peer_vtag;
+		vrf_id = stcb->asoc.vrf_id;
+	}
+	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
+#if defined(__FreeBSD__)
+	                mflowtype, mflowid, inp->fibnum,
+#endif
+	                vrf_id, port);
+	if (stcb != NULL) {
+		/* We have a TCB to abort, send notification too */
+		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
+		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
+		/* Ok, now lets free it */
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		so = SCTP_INP_SO(inp);
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_SOCKET_LOCK(so, 1);
+		SCTP_TCB_LOCK(stcb);
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+#endif
+		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+		}
+		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+		                      SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+	}
+}
+#ifdef SCTP_ASOCLOG_OF_TSNS
+void
+sctp_print_out_track_log(struct sctp_tcb *stcb)
+{
+#ifdef NOSIY_PRINTS
+	int i;
+	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
+	SCTP_PRINTF("IN bound TSN log-aaa\n");
+	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
+		SCTP_PRINTF("None rcvd\n");
+		goto none_in;
+	}
+	if (stcb->asoc.tsn_in_wrapped) {
+		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
+			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
+				    stcb->asoc.in_tsnlog[i].tsn,
+				    stcb->asoc.in_tsnlog[i].strm,
+				    stcb->asoc.in_tsnlog[i].seq,
+				    stcb->asoc.in_tsnlog[i].flgs,
+				    stcb->asoc.in_tsnlog[i].sz);
+		}
+	}
+	if (stcb->asoc.tsn_in_at) {
+		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
+			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
+				    stcb->asoc.in_tsnlog[i].tsn,
+				    stcb->asoc.in_tsnlog[i].strm,
+				    stcb->asoc.in_tsnlog[i].seq,
+				    stcb->asoc.in_tsnlog[i].flgs,
+				    stcb->asoc.in_tsnlog[i].sz);
+		}
+	}
+ none_in:
+	SCTP_PRINTF("OUT bound TSN log-aaa\n");
+	if ((stcb->asoc.tsn_out_at == 0) &&
+	    (stcb->asoc.tsn_out_wrapped == 0)) {
+		SCTP_PRINTF("None sent\n");
+	}
+	if (stcb->asoc.tsn_out_wrapped) {
+		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
+			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
+				    stcb->asoc.out_tsnlog[i].tsn,
+				    stcb->asoc.out_tsnlog[i].strm,
+				    stcb->asoc.out_tsnlog[i].seq,
+				    stcb->asoc.out_tsnlog[i].flgs,
+				    stcb->asoc.out_tsnlog[i].sz);
+		}
+	}
+	if (stcb->asoc.tsn_out_at) {
+		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
+			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
+				    stcb->asoc.out_tsnlog[i].tsn,
+				    stcb->asoc.out_tsnlog[i].strm,
+				    stcb->asoc.out_tsnlog[i].seq,
+				    stcb->asoc.out_tsnlog[i].flgs,
+				    stcb->asoc.out_tsnlog[i].sz);
+		}
+	}
+#endif
+}
+#endif
+
+void
+sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+                          struct mbuf *op_err,
+                          int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+                          SCTP_UNUSED
+#endif
+)
+{
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	struct socket *so;
+#endif
+
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	so = SCTP_INP_SO(inp);
+#endif
+#if defined(__APPLE__)
+	if (so_locked) {
+		sctp_lock_assert(SCTP_INP_SO(inp));
+	} else {
+		sctp_unlock_assert(SCTP_INP_SO(inp));
+	}
+#endif
+	if (stcb == NULL) {
+		/* Got to have a TCB */
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
+#if defined(__APPLE__)
+				if (!so_locked) {
+					SCTP_SOCKET_LOCK(so, 1);
+				}
+#endif
+				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+						SCTP_CALLED_DIRECTLY_NOCMPSET);
+#if defined(__APPLE__)
+				if (!so_locked) {
+					SCTP_SOCKET_UNLOCK(so, 1);
+				}
+#endif
+			}
+		}
+		return;
+	} else {
+		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
+	}
+	/* notify the peer */
+	sctp_send_abort_tcb(stcb, op_err, so_locked);
+	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
+	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
+	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
+		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
+	}
+	/* notify the ulp */
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
+		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
+	}
+	/* now free the asoc */
+#ifdef SCTP_ASOCLOG_OF_TSNS
+	sctp_print_out_track_log(stcb);
+#endif
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	if (!so_locked) {
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		SCTP_TCB_UNLOCK(stcb);
+		SCTP_SOCKET_LOCK(so, 1);
+		SCTP_TCB_LOCK(stcb);
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+	}
+#endif
+	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+	                      SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+	if (!so_locked) {
+		SCTP_SOCKET_UNLOCK(so, 1);
+	}
+#endif
+}
+
+void
+sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
+                 struct sockaddr *src, struct sockaddr *dst,
+                 struct sctphdr *sh, struct sctp_inpcb *inp,
+                 struct mbuf *cause,
+#if defined(__FreeBSD__)
+                 uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
+#endif
+                 uint32_t vrf_id, uint16_t port)
+{
+	struct sctp_chunkhdr *ch, chunk_buf;
+	unsigned int chk_length;
+	int contains_init_chunk;
+
+	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
+	/* Generate a TO address for future reference */
+	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
+		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
+#if defined(__APPLE__)
+			SCTP_SOCKET_LOCK(SCTP_INP_SO(inp), 1);
+#endif
+			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
+					SCTP_CALLED_DIRECTLY_NOCMPSET);
+#if defined(__APPLE__)
+			SCTP_SOCKET_UNLOCK(SCTP_INP_SO(inp), 1);
+#endif
+		}
+	}
+	contains_init_chunk = 0;
+	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+	    sizeof(*ch), (uint8_t *) & chunk_buf);
+	while (ch != NULL) {
+		chk_length = ntohs(ch->chunk_length);
+		if (chk_length < sizeof(*ch)) {
+			/* break to abort land */
+			break;
+		}
+		switch (ch->chunk_type) {
+		case SCTP_INIT:
+			contains_init_chunk = 1;
+			break;
+		case SCTP_PACKET_DROPPED:
+			/* we don't respond to pkt-dropped */
+			return;
+		case SCTP_ABORT_ASSOCIATION:
+			/* we don't respond with an ABORT to an ABORT */
+			return;
+		case SCTP_SHUTDOWN_COMPLETE:
+			/*
+			 * we ignore it since we are not waiting for it and
+			 * peer is gone
+			 */
+			return;
+		case SCTP_SHUTDOWN_ACK:
+			sctp_send_shutdown_complete2(src, dst, sh,
+#if defined(__FreeBSD__)
+			                             mflowtype, mflowid, fibnum,
+#endif
+			                             vrf_id, port);
+			return;
+		default:
+			break;
+		}
+		offset += SCTP_SIZE32(chk_length);
+		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+		    sizeof(*ch), (uint8_t *) & chunk_buf);
+	}
+	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
+	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
+	     (contains_init_chunk == 0))) {
+		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
+#if defined(__FreeBSD__)
+		                mflowtype, mflowid, fibnum,
+#endif
+		                vrf_id, port);
+	}
+}
+
+/*
+ * check the inbound datagram to make sure there is not an abort inside it,
+ * if there is return 1, else return 0.
+ */
+int
+sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
+{
+	struct sctp_chunkhdr *ch;
+	struct sctp_init_chunk *init_chk, chunk_buf;
+	int offset;
+	unsigned int chk_length;
+
+	offset = iphlen + sizeof(struct sctphdr);
+	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
+	    (uint8_t *) & chunk_buf);
+	while (ch != NULL) {
+		chk_length = ntohs(ch->chunk_length);
+		if (chk_length < sizeof(*ch)) {
+			/* packet is probably corrupt */
+			break;
+		}
+		/* we seem to be ok, is it an abort? */
+		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
+			/* yep, tell them */
+			return (1);
+		}
+		if (ch->chunk_type == SCTP_INITIATION) {
+			/* need to update the Vtag */
+			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
+			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
+			if (init_chk != NULL) {
+				*vtagfill = ntohl(init_chk->init.initiate_tag);
+			}
+		}
+		/* Nope, move to the next chunk */
+		offset += SCTP_SIZE32(chk_length);
+		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
+		    sizeof(*ch), (uint8_t *) & chunk_buf);
+	}
+	return (0);
+}
+
+/*
+ * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
+ * set (i.e. it's 0) so, create this function to compare link local scopes
+ */
+#ifdef INET6
+uint32_t
+sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
+{
+#if defined(__Userspace__)
+    /*__Userspace__ Returning 1 here always */
+#endif
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+	struct sockaddr_in6 a, b;
+
+	/* save copies */
+	a = *addr1;
+	b = *addr2;
+
+	if (a.sin6_scope_id == 0)
+#ifdef SCTP_KAME
+		if (sa6_recoverscope(&a)) {
+#else
+		if (in6_recoverscope(&a, &a.sin6_addr, NULL)) {
+#endif				/* SCTP_KAME */
+			/* can't get scope, so can't match */
+			return (0);
+		}
+	if (b.sin6_scope_id == 0)
+#ifdef SCTP_KAME
+		if (sa6_recoverscope(&b)) {
+#else
+		if (in6_recoverscope(&b, &b.sin6_addr, NULL)) {
+#endif				/* SCTP_KAME */
+			/* can't get scope, so can't match */
+			return (0);
+		}
+	if (a.sin6_scope_id != b.sin6_scope_id)
+		return (0);
+#else
+	if (addr1->sin6_scope_id != addr2->sin6_scope_id)
+		return (0);
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+
+	return (1);
+}
+
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+/*
+ * returns a sockaddr_in6 with embedded scope recovered and removed
+ */
+struct sockaddr_in6 *
+sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
+{
+	/* check and strip embedded scope junk */
+	if (addr->sin6_family == AF_INET6) {
+		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
+			if (addr->sin6_scope_id == 0) {
+				*store = *addr;
+#ifdef SCTP_KAME
+				if (!sa6_recoverscope(store)) {
+#else
+				if (!in6_recoverscope(store, &store->sin6_addr,
+				    NULL)) {
+#endif /* SCTP_KAME */
+					/* use the recovered scope */
+					addr = store;
+				}
+			} else {
+				/* else, return the original "to" addr */
+				in6_clearscope(&addr->sin6_addr);
+			}
+		}
+	}
+	return (addr);
+}
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+#endif
+
+/*
+ * are the two addresses the same?  currently a "scopeless" check returns: 1
+ * if same, 0 if not
+ */
+int
+sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
+{
+
+	/* must be valid */
+	if (sa1 == NULL || sa2 == NULL)
+		return (0);
+
+	/* must be the same family */
+	if (sa1->sa_family != sa2->sa_family)
+		return (0);
+
+	switch (sa1->sa_family) {
+#ifdef INET6
+	case AF_INET6:
+	{
+		/* IPv6 addresses */
+		struct sockaddr_in6 *sin6_1, *sin6_2;
+
+		sin6_1 = (struct sockaddr_in6 *)sa1;
+		sin6_2 = (struct sockaddr_in6 *)sa2;
+		return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
+		    sin6_2));
+	}
+#endif
+#ifdef INET
+	case AF_INET:
+	{
+		/* IPv4 addresses */
+		struct sockaddr_in *sin_1, *sin_2;
+
+		sin_1 = (struct sockaddr_in *)sa1;
+		sin_2 = (struct sockaddr_in *)sa2;
+		return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
+	}
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+	{
+		struct sockaddr_conn *sconn_1, *sconn_2;
+
+		sconn_1 = (struct sockaddr_conn *)sa1;
+		sconn_2 = (struct sockaddr_conn *)sa2;
+		return (sconn_1->sconn_addr == sconn_2->sconn_addr);
+	}
+#endif
+	default:
+		/* we don't do these... */
+		return (0);
+	}
+}
+
+void
+sctp_print_address(struct sockaddr *sa)
+{
+#ifdef INET6
+#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
+	char ip6buf[INET6_ADDRSTRLEN];
+#endif
+#endif
+
+	switch (sa->sa_family) {
+#ifdef INET6
+	case AF_INET6:
+	{
+		struct sockaddr_in6 *sin6;
+
+		sin6 = (struct sockaddr_in6 *)sa;
+#if defined(__Userspace__)
+		SCTP_PRINTF("IPv6 address: %x:%x:%x:%x:%x:%x:%x:%x:port:%d scope:%u\n",
+			    ntohs(sin6->sin6_addr.s6_addr16[0]),
+			    ntohs(sin6->sin6_addr.s6_addr16[1]),
+			    ntohs(sin6->sin6_addr.s6_addr16[2]),
+			    ntohs(sin6->sin6_addr.s6_addr16[3]),
+			    ntohs(sin6->sin6_addr.s6_addr16[4]),
+			    ntohs(sin6->sin6_addr.s6_addr16[5]),
+			    ntohs(sin6->sin6_addr.s6_addr16[6]),
+			    ntohs(sin6->sin6_addr.s6_addr16[7]),
+			    ntohs(sin6->sin6_port),
+			    sin6->sin6_scope_id);
+#else
+#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
+		SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
+			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
+			    ntohs(sin6->sin6_port),
+			    sin6->sin6_scope_id);
+#else
+		SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
+			    ip6_sprintf(&sin6->sin6_addr),
+			    ntohs(sin6->sin6_port),
+			    sin6->sin6_scope_id);
+#endif
+#endif
+		break;
+	}
+#endif
+#ifdef INET
+	case AF_INET:
+	{
+		struct sockaddr_in *sin;
+		unsigned char *p;
+
+		sin = (struct sockaddr_in *)sa;
+		p = (unsigned char *)&sin->sin_addr;
+		SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
+			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
+		break;
+	}
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+	{
+		struct sockaddr_conn *sconn;
+
+		sconn = (struct sockaddr_conn *)sa;
+		SCTP_PRINTF("AF_CONN address: %p\n", sconn->sconn_addr);
+		break;
+	}
+#endif
+	default:
+		SCTP_PRINTF("?\n");
+		break;
+	}
+}
+
+void
+sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
+    struct sctp_inpcb *new_inp,
+    struct sctp_tcb *stcb,
+    int waitflags)
+{
+	/*
+	 * go through our old INP and pull off any control structures that
+	 * belong to stcb and move then to the new inp.
+	 */
+	struct socket *old_so, *new_so;
+	struct sctp_queued_to_read *control, *nctl;
+	struct sctp_readhead tmp_queue;
+	struct mbuf *m;
+	int error = 0;
+
+	old_so = old_inp->sctp_socket;
+	new_so = new_inp->sctp_socket;
+	TAILQ_INIT(&tmp_queue);
+#if defined(__FreeBSD__) && __FreeBSD_version < 700000
+	SOCKBUF_LOCK(&(old_so->so_rcv));
+#endif
+#if defined(__FreeBSD__) || defined(__APPLE__)
+	error = sblock(&old_so->so_rcv, waitflags);
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_version < 700000
+	SOCKBUF_UNLOCK(&(old_so->so_rcv));
+#endif
+	if (error) {
+		/* Gak, can't get sblock, we have a problem.
+		 * data will be left stranded.. and we
+		 * don't dare look at it since the
+		 * other thread may be reading something.
+		 * Oh well, its a screwed up app that does
+		 * a peeloff OR a accept while reading
+		 * from the main socket... actually its
+		 * only the peeloff() case, since I think
+		 * read will fail on a listening socket..
+		 */
+		return;
+	}
+	/* lock the socket buffers */
+	SCTP_INP_READ_LOCK(old_inp);
+	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
+		/* Pull off all for out target stcb */
+		if (control->stcb == stcb) {
+			/* remove it we want it */
+			TAILQ_REMOVE(&old_inp->read_queue, control, next);
+			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
+			m = control->data;
+			while (m) {
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE,SCTP_BUF_LEN(m));
+				}
+				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
+				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
+				}
+				m = SCTP_BUF_NEXT(m);
+			}
+		}
+	}
+	SCTP_INP_READ_UNLOCK(old_inp);
+	/* Remove the sb-lock on the old socket */
+#if defined(__FreeBSD__) && __FreeBSD_version < 700000
+	SOCKBUF_LOCK(&(old_so->so_rcv));
+#endif
+#if defined(__APPLE__)
+	sbunlock(&old_so->so_rcv, 1);
+#endif
+
+#if defined(__FreeBSD__)
+	sbunlock(&old_so->so_rcv);
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_version < 700000
+	SOCKBUF_UNLOCK(&(old_so->so_rcv));
+#endif
+	/* Now we move them over to the new socket buffer */
+	SCTP_INP_READ_LOCK(new_inp);
+	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
+		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
+		m = control->data;
+		while (m) {
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
+			}
+			sctp_sballoc(stcb, &new_so->so_rcv, m);
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
+			}
+			m = SCTP_BUF_NEXT(m);
+		}
+	}
+	SCTP_INP_READ_UNLOCK(new_inp);
+}
+
+void
+sctp_wakeup_the_read_socket(struct sctp_inpcb *inp,
+    struct sctp_tcb *stcb,
+    int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+)
+{
+	if ((inp != NULL) && (inp->sctp_socket != NULL)) {
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
+			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
+		} else {
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+			struct socket *so;
+
+			so = SCTP_INP_SO(inp);
+			if (!so_locked) {
+				if (stcb) {
+					atomic_add_int(&stcb->asoc.refcnt, 1);
+					SCTP_TCB_UNLOCK(stcb);
+				}
+				SCTP_SOCKET_LOCK(so, 1);
+				if (stcb) {
+					SCTP_TCB_LOCK(stcb);
+					atomic_subtract_int(&stcb->asoc.refcnt, 1);
+				}
+				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+					SCTP_SOCKET_UNLOCK(so, 1);
+					return;
+				}
+			}
+#endif
+			sctp_sorwakeup(inp, inp->sctp_socket);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+			if (!so_locked) {
+				SCTP_SOCKET_UNLOCK(so, 1);
+			}
+#endif
+		}
+	}
+}
+#if defined(__Userspace__)
+
+void
+sctp_invoke_recv_callback(struct sctp_inpcb *inp,
+                          struct sctp_tcb *stcb,
+                          struct sctp_queued_to_read *control,
+                          int inp_read_lock_held)
+{
+	uint32_t pd_point, length;
+
+	if ((inp->recv_callback == NULL) ||
+	    (stcb == NULL) ||
+	    (stcb->sctp_socket == NULL)) {
+		return;
+	}
+
+	length = control->length;
+	if (stcb != NULL && stcb->sctp_socket != NULL) {
+		pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
+			       stcb->sctp_ep->partial_delivery_point);
+	} else {
+		pd_point = inp->partial_delivery_point;
+	}
+	if ((control->end_added == 1) || (length >= pd_point)) {
+		struct socket *so;
+		struct mbuf *m;
+		char *buffer;
+		struct sctp_rcvinfo rcv;
+		union sctp_sockstore addr;
+		int flags;
+
+		if ((buffer = malloc(length)) == NULL) {
+			return;
+		}
+		if (inp_read_lock_held == 0) {
+			SCTP_INP_READ_LOCK(inp);
+		}
+		so = stcb->sctp_socket;
+		for (m = control->data; m; m = SCTP_BUF_NEXT(m)) {
+			sctp_sbfree(control, control->stcb, &so->so_rcv, m);
+		}
+		m_copydata(control->data, 0, length, buffer);
+		memset(&rcv, 0, sizeof(struct sctp_rcvinfo));
+		rcv.rcv_sid = control->sinfo_stream;
+		rcv.rcv_ssn = (uint16_t)control->mid;
+		rcv.rcv_flags = control->sinfo_flags;
+		rcv.rcv_ppid = control->sinfo_ppid;
+		rcv.rcv_tsn = control->sinfo_tsn;
+		rcv.rcv_cumtsn = control->sinfo_cumtsn;
+		rcv.rcv_context = control->sinfo_context;
+		rcv.rcv_assoc_id = control->sinfo_assoc_id;
+		memset(&addr, 0, sizeof(union sctp_sockstore));
+		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
+#ifdef INET
+		case AF_INET:
+			addr.sin = control->whoFrom->ro._l_addr.sin;
+			break;
+#endif
+#ifdef INET6
+		case AF_INET6:
+			addr.sin6 = control->whoFrom->ro._l_addr.sin6;
+			break;
+#endif
+		case AF_CONN:
+			addr.sconn = control->whoFrom->ro._l_addr.sconn;
+			break;
+		default:
+			addr.sa = control->whoFrom->ro._l_addr.sa;
+			break;
+		}
+		flags = 0;
+		if (control->end_added == 1) {
+			flags |= MSG_EOR;
+		}
+		if (control->spec_flags & M_NOTIFICATION) {
+			flags |= MSG_NOTIFICATION;
+		}
+		sctp_m_freem(control->data);
+		control->data = NULL;
+		control->tail_mbuf = NULL;
+		control->length = 0;
+		if (control->end_added) {
+			TAILQ_REMOVE(&stcb->sctp_ep->read_queue, control, next);
+			control->on_read_q = 0;
+			sctp_free_remote_addr(control->whoFrom);
+			control->whoFrom = NULL;
+			sctp_free_a_readq(stcb, control);
+		}
+		atomic_add_int(&stcb->asoc.refcnt, 1);
+		SCTP_TCB_UNLOCK(stcb);
+		if (inp_read_lock_held == 0) {
+			SCTP_INP_READ_UNLOCK(inp);
+		}
+		inp->recv_callback(so, addr, buffer, length, rcv, flags, inp->ulp_info);
+		SCTP_TCB_LOCK(stcb);
+		atomic_subtract_int(&stcb->asoc.refcnt, 1);
+	}
+}
+#endif
+
+void
+sctp_add_to_readq(struct sctp_inpcb *inp,
+    struct sctp_tcb *stcb,
+    struct sctp_queued_to_read *control,
+    struct sockbuf *sb,
+    int end,
+    int inp_read_lock_held,
+    int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+    )
+{
+	/*
+	 * Here we must place the control on the end of the socket read
+	 * queue AND increment sb_cc so that select will work properly on
+	 * read.
+	 */
+	struct mbuf *m, *prev = NULL;
+
+	if (inp == NULL) {
+		/* Gak, TSNH!! */
+#ifdef INVARIANTS
+		panic("Gak, inp NULL on add_to_readq");
+#endif
+		return;
+	}
+#if defined(__APPLE__)
+	if (so_locked) {
+		sctp_lock_assert(SCTP_INP_SO(inp));
+	} else {
+		sctp_unlock_assert(SCTP_INP_SO(inp));
+	}
+#endif
+	if (inp_read_lock_held == 0)
+		SCTP_INP_READ_LOCK(inp);
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
+		sctp_free_remote_addr(control->whoFrom);
+		if (control->data) {
+			sctp_m_freem(control->data);
+			control->data = NULL;
+		}
+		sctp_free_a_readq(stcb, control);
+		if (inp_read_lock_held == 0)
+			SCTP_INP_READ_UNLOCK(inp);
+		return;
+	}
+	if (!(control->spec_flags & M_NOTIFICATION)) {
+		atomic_add_int(&inp->total_recvs, 1);
+		if (!control->do_not_ref_stcb) {
+			atomic_add_int(&stcb->total_recvs, 1);
+		}
+	}
+	m = control->data;
+	control->held_length = 0;
+	control->length = 0;
+	while (m) {
+		if (SCTP_BUF_LEN(m) == 0) {
+			/* Skip mbufs with NO length */
+			if (prev == NULL) {
+				/* First one */
+				control->data = sctp_m_free(m);
+				m = control->data;
+			} else {
+				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
+				m = SCTP_BUF_NEXT(prev);
+			}
+			if (m == NULL) {
+				control->tail_mbuf = prev;
+			}
+			continue;
+		}
+		prev = m;
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+			sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
+		}
+		sctp_sballoc(stcb, sb, m);
+		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+			sctp_sblog(sb, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
+		}
+		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
+		m = SCTP_BUF_NEXT(m);
+	}
+	if (prev != NULL) {
+		control->tail_mbuf = prev;
+	} else {
+		/* Everything got collapsed out?? */
+		sctp_free_remote_addr(control->whoFrom);
+		sctp_free_a_readq(stcb, control);
+		if (inp_read_lock_held == 0)
+			SCTP_INP_READ_UNLOCK(inp);
+		return;
+	}
+	if (end) {
+		control->end_added = 1;
+	}
+	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
+	control->on_read_q = 1;
+	if (inp_read_lock_held == 0)
+		SCTP_INP_READ_UNLOCK(inp);
+#if defined(__Userspace__)
+	sctp_invoke_recv_callback(inp, stcb, control, inp_read_lock_held);
+#endif
+	if (inp && inp->sctp_socket) {
+		sctp_wakeup_the_read_socket(inp, stcb, so_locked);
+	}
+}
+
+/*************HOLD THIS COMMENT FOR PATCH FILE OF
+ *************ALTERNATE ROUTING CODE
+ */
+
+/*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
+ *************ALTERNATE ROUTING CODE
+ */
+
+struct mbuf *
+sctp_generate_cause(uint16_t code, char *info)
+{
+	struct mbuf *m;
+	struct sctp_gen_error_cause *cause;
+	size_t info_len;
+	uint16_t len;
+
+	if ((code == 0) || (info == NULL)) {
+		return (NULL);
+	}
+	info_len = strlen(info);
+	if (info_len > (SCTP_MAX_CAUSE_LENGTH - sizeof(struct sctp_paramhdr))) {
+		return (NULL);
+	}
+	len = (uint16_t)(sizeof(struct sctp_paramhdr) + info_len);
+	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
+	if (m != NULL) {
+		SCTP_BUF_LEN(m) = len;
+		cause = mtod(m, struct sctp_gen_error_cause *);
+		cause->code = htons(code);
+		cause->length = htons(len);
+		memcpy(cause->info, info, info_len);
+	}
+	return (m);
+}
+
+struct mbuf *
+sctp_generate_no_user_data_cause(uint32_t tsn)
+{
+	struct mbuf *m;
+	struct sctp_error_no_user_data *no_user_data_cause;
+	uint16_t len;
+
+	len = (uint16_t)sizeof(struct sctp_error_no_user_data);
+	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
+	if (m != NULL) {
+		SCTP_BUF_LEN(m) = len;
+		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
+		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
+		no_user_data_cause->cause.length = htons(len);
+		no_user_data_cause->tsn = htonl(tsn);
+	}
+	return (m);
+}
+
+#ifdef SCTP_MBCNT_LOGGING
+void
+sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
+    struct sctp_tmit_chunk *tp1, int chk_cnt)
+{
+	if (tp1->data == NULL) {
+		return;
+	}
+	asoc->chunks_on_out_queue -= chk_cnt;
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
+		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
+			       asoc->total_output_queue_size,
+			       tp1->book_size,
+			       0,
+			       tp1->mbcnt);
+	}
+	if (asoc->total_output_queue_size >= tp1->book_size) {
+		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
+	} else {
+		asoc->total_output_queue_size = 0;
+	}
+
+	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
+				  ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
+		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
+			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
+		} else {
+			stcb->sctp_socket->so_snd.sb_cc = 0;
+
+		}
+	}
+}
+
+#endif
+
+int
+sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
+			   uint8_t sent, int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+			   SCTP_UNUSED
+#endif
+	)
+{
+	struct sctp_stream_out *strq;
+	struct sctp_tmit_chunk *chk = NULL, *tp2;
+	struct sctp_stream_queue_pending *sp;
+	uint32_t mid;
+	uint16_t sid;
+	uint8_t foundeom = 0;
+	int ret_sz = 0;
+	int notdone;
+	int do_wakeup_routine = 0;
+
+#if defined(__APPLE__)
+	if (so_locked) {
+		sctp_lock_assert(SCTP_INP_SO(stcb->sctp_ep));
+	} else {
+		sctp_unlock_assert(SCTP_INP_SO(stcb->sctp_ep));
+	}
+#endif
+	sid = tp1->rec.data.sid;
+	mid = tp1->rec.data.mid;
+	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
+		stcb->asoc.abandoned_sent[0]++;
+		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
+		stcb->asoc.strmout[sid].abandoned_sent[0]++;
+#if defined(SCTP_DETAILED_STR_STATS)
+		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
+#endif
+	} else {
+		stcb->asoc.abandoned_unsent[0]++;
+		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
+		stcb->asoc.strmout[sid].abandoned_unsent[0]++;
+#if defined(SCTP_DETAILED_STR_STATS)
+		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
+#endif
+	}
+	do {
+		ret_sz += tp1->book_size;
+		if (tp1->data != NULL) {
+			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
+				sctp_flight_size_decrease(tp1);
+				sctp_total_flight_decrease(stcb, tp1);
+			}
+			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
+			stcb->asoc.peers_rwnd += tp1->send_size;
+			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
+			if (sent) {
+				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
+			} else {
+				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
+			}
+			if (tp1->data) {
+				sctp_m_freem(tp1->data);
+				tp1->data = NULL;
+			}
+			do_wakeup_routine = 1;
+			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
+				stcb->asoc.sent_queue_cnt_removeable--;
+			}
+		}
+		tp1->sent = SCTP_FORWARD_TSN_SKIP;
+		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
+		    SCTP_DATA_NOT_FRAG) {
+			/* not frag'ed we ae done   */
+			notdone = 0;
+			foundeom = 1;
+		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+			/* end of frag, we are done */
+			notdone = 0;
+			foundeom = 1;
+		} else {
+			/*
+			 * Its a begin or middle piece, we must mark all of
+			 * it
+			 */
+			notdone = 1;
+			tp1 = TAILQ_NEXT(tp1, sctp_next);
+		}
+	} while (tp1 && notdone);
+	if (foundeom == 0) {
+		/*
+		 * The multi-part message was scattered across the send and
+		 * sent queue.
+		 */
+		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
+			if ((tp1->rec.data.sid != sid) ||
+			    (!SCTP_MID_EQ(stcb->asoc.idata_supported, tp1->rec.data.mid, mid))) {
+				break;
+			}
+			/* save to chk in case we have some on stream out
+			 * queue. If so and we have an un-transmitted one
+			 * we don't have to fudge the TSN.
+			 */
+			chk = tp1;
+			ret_sz += tp1->book_size;
+			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
+			if (sent) {
+				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
+			} else {
+				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
+			}
+			if (tp1->data) {
+				sctp_m_freem(tp1->data);
+				tp1->data = NULL;
+			}
+			/* No flight involved here book the size to 0 */
+			tp1->book_size = 0;
+			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
+				foundeom = 1;
+			}
+			do_wakeup_routine = 1;
+			tp1->sent = SCTP_FORWARD_TSN_SKIP;
+			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
+			/* on to the sent queue so we can wait for it to be passed by. */
+			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
+					  sctp_next);
+			stcb->asoc.send_queue_cnt--;
+			stcb->asoc.sent_queue_cnt++;
+		}
+	}
+	if (foundeom == 0) {
+		/*
+		 * Still no eom found. That means there
+		 * is stuff left on the stream out queue.. yuck.
+		 */
+		SCTP_TCB_SEND_LOCK(stcb);
+		strq = &stcb->asoc.strmout[sid];
+		sp = TAILQ_FIRST(&strq->outqueue);
+		if (sp != NULL) {
+			sp->discard_rest = 1;
+			/*
+			 * We may need to put a chunk on the
+			 * queue that holds the TSN that
+			 * would have been sent with the LAST
+			 * bit.
+			 */
+			if (chk == NULL) {
+				/* Yep, we have to */
+				sctp_alloc_a_chunk(stcb, chk);
+				if (chk == NULL) {
+					/* we are hosed. All we can
+					 * do is nothing.. which will
+					 * cause an abort if the peer is
+					 * paying attention.
+					 */
+					goto oh_well;
+				}
+				memset(chk, 0, sizeof(*chk));
+				chk->rec.data.rcv_flags = 0;
+				chk->sent = SCTP_FORWARD_TSN_SKIP;
+				chk->asoc = &stcb->asoc;
+				if (stcb->asoc.idata_supported == 0) {
+					if (sp->sinfo_flags & SCTP_UNORDERED) {
+						chk->rec.data.mid = 0;
+					} else {
+						chk->rec.data.mid = strq->next_mid_ordered;
+					}
+				} else {
+					if (sp->sinfo_flags & SCTP_UNORDERED) {
+						chk->rec.data.mid = strq->next_mid_unordered;
+					} else {
+						chk->rec.data.mid = strq->next_mid_ordered;
+					}
+				}
+				chk->rec.data.sid = sp->sid;
+				chk->rec.data.ppid = sp->ppid;
+				chk->rec.data.context = sp->context;
+				chk->flags = sp->act_flags;
+				chk->whoTo = NULL;
+#if defined(__FreeBSD__) || defined(__Panda__)
+				chk->rec.data.tsn = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
+#else
+				chk->rec.data.tsn = stcb->asoc.sending_seq++;
+#endif
+				strq->chunks_on_queues++;
+				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
+				stcb->asoc.sent_queue_cnt++;
+				stcb->asoc.pr_sctp_cnt++;
+			}
+			chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
+			if (sp->sinfo_flags & SCTP_UNORDERED) {
+				chk->rec.data.rcv_flags |= SCTP_DATA_UNORDERED;
+			}
+			if (stcb->asoc.idata_supported == 0) {
+				if ((sp->sinfo_flags & SCTP_UNORDERED) == 0) {
+					strq->next_mid_ordered++;
+				}
+			} else {
+				if (sp->sinfo_flags & SCTP_UNORDERED) {
+					strq->next_mid_unordered++;
+				} else {
+					strq->next_mid_ordered++;
+				}
+			}
+		oh_well:
+			if (sp->data) {
+				/* Pull any data to free up the SB and
+				 * allow sender to "add more" while we
+				 * will throw away :-)
+				 */
+				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
+				ret_sz += sp->length;
+				do_wakeup_routine = 1;
+				sp->some_taken = 1;
+				sctp_m_freem(sp->data);
+				sp->data = NULL;
+				sp->tail_mbuf = NULL;
+				sp->length = 0;
+			}
+		}
+		SCTP_TCB_SEND_UNLOCK(stcb);
+	}
+	if (do_wakeup_routine) {
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		struct socket *so;
+
+		so = SCTP_INP_SO(stcb->sctp_ep);
+		if (!so_locked) {
+			atomic_add_int(&stcb->asoc.refcnt, 1);
+			SCTP_TCB_UNLOCK(stcb);
+			SCTP_SOCKET_LOCK(so, 1);
+			SCTP_TCB_LOCK(stcb);
+			atomic_subtract_int(&stcb->asoc.refcnt, 1);
+			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
+				/* assoc was freed while we were unlocked */
+				SCTP_SOCKET_UNLOCK(so, 1);
+				return (ret_sz);
+			}
+		}
+#endif
+		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
+#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
+		if (!so_locked) {
+			SCTP_SOCKET_UNLOCK(so, 1);
+		}
+#endif
+	}
+	return (ret_sz);
+}
+
+/*
+ * checks to see if the given address, sa, is one that is currently known by
+ * the kernel note: can't distinguish the same address on multiple interfaces
+ * and doesn't handle multiple addresses with different zone/scope id's note:
+ * ifa_ifwithaddr() compares the entire sockaddr struct
+ */
+struct sctp_ifa *
+sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
+		    int holds_lock)
+{
+	struct sctp_laddr *laddr;
+
+	if (holds_lock == 0) {
+		SCTP_INP_RLOCK(inp);
+	}
+
+	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
+		if (laddr->ifa == NULL)
+			continue;
+		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
+			continue;
+#ifdef INET
+		if (addr->sa_family == AF_INET) {
+			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
+			    laddr->ifa->address.sin.sin_addr.s_addr) {
+				/* found him. */
+				if (holds_lock == 0) {
+					SCTP_INP_RUNLOCK(inp);
+				}
+				return (laddr->ifa);
+				break;
+			}
+		}
+#endif
+#ifdef INET6
+		if (addr->sa_family == AF_INET6) {
+			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
+						 &laddr->ifa->address.sin6)) {
+				/* found him. */
+				if (holds_lock == 0) {
+					SCTP_INP_RUNLOCK(inp);
+				}
+				return (laddr->ifa);
+				break;
+			}
+		}
+#endif
+#if defined(__Userspace__)
+		if (addr->sa_family == AF_CONN) {
+			if (((struct sockaddr_conn *)addr)->sconn_addr == laddr->ifa->address.sconn.sconn_addr) {
+				/* found him. */
+				if (holds_lock == 0) {
+					SCTP_INP_RUNLOCK(inp);
+				}
+				return (laddr->ifa);
+				break;
+			}
+		}
+#endif
+	}
+	if (holds_lock == 0) {
+		SCTP_INP_RUNLOCK(inp);
+	}
+	return (NULL);
+}
+
+uint32_t
+sctp_get_ifa_hash_val(struct sockaddr *addr)
+{
+	switch (addr->sa_family) {
+#ifdef INET
+	case AF_INET:
+	{
+		struct sockaddr_in *sin;
+
+		sin = (struct sockaddr_in *)addr;
+		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
+	}
+#endif
+#ifdef INET6
+	case AF_INET6:
+	{
+		struct sockaddr_in6 *sin6;
+		uint32_t hash_of_addr;
+
+		sin6 = (struct sockaddr_in6 *)addr;
+#if !defined(__Windows__) && !defined(__Userspace_os_FreeBSD) && !defined(__Userspace_os_Darwin) && !defined(__Userspace_os_Windows)
+		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
+				sin6->sin6_addr.s6_addr32[1] +
+				sin6->sin6_addr.s6_addr32[2] +
+				sin6->sin6_addr.s6_addr32[3]);
+#else
+		hash_of_addr = (((uint32_t *)&sin6->sin6_addr)[0] +
+				((uint32_t *)&sin6->sin6_addr)[1] +
+				((uint32_t *)&sin6->sin6_addr)[2] +
+				((uint32_t *)&sin6->sin6_addr)[3]);
+#endif
+		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
+		return (hash_of_addr);
+	}
+#endif
+#if defined(__Userspace__)
+	case AF_CONN:
+	{
+		struct sockaddr_conn *sconn;
+		uintptr_t temp;
+
+		sconn = (struct sockaddr_conn *)addr;
+		temp = (uintptr_t)sconn->sconn_addr;
+		return ((uint32_t)(temp ^ (temp >> 16)));
+	}
+#endif
+	default:
+		break;
+	}
+	return (0);
+}
+
+struct sctp_ifa *
+sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
+{
+	struct sctp_ifa *sctp_ifap;
+	struct sctp_vrf *vrf;
+	struct sctp_ifalist *hash_head;
+	uint32_t hash_of_addr;
+
+	if (holds_lock == 0)
+		SCTP_IPI_ADDR_RLOCK();
+
+	vrf = sctp_find_vrf(vrf_id);
+	if (vrf == NULL) {
+		if (holds_lock == 0)
+			SCTP_IPI_ADDR_RUNLOCK();
+		return (NULL);
+	}
+
+	hash_of_addr = sctp_get_ifa_hash_val(addr);
+
+	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
+	if (hash_head == NULL) {
+		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
+			    hash_of_addr, (uint32_t)vrf->vrf_addr_hashmark,
+			    (uint32_t)(hash_of_addr & vrf->vrf_addr_hashmark));
+		sctp_print_address(addr);
+		SCTP_PRINTF("No such bucket for address\n");
+		if (holds_lock == 0)
+			SCTP_IPI_ADDR_RUNLOCK();
+
+		return (NULL);
+	}
+	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
+		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
+			continue;
+#ifdef INET
+		if (addr->sa_family == AF_INET) {
+			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
+			    sctp_ifap->address.sin.sin_addr.s_addr) {
+				/* found him. */
+				if (holds_lock == 0)
+					SCTP_IPI_ADDR_RUNLOCK();
+				return (sctp_ifap);
+				break;
+			}
+		}
+#endif
+#ifdef INET6
+		if (addr->sa_family == AF_INET6) {
+			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
+						 &sctp_ifap->address.sin6)) {
+				/* found him. */
+				if (holds_lock == 0)
+					SCTP_IPI_ADDR_RUNLOCK();
+				return (sctp_ifap);
+				break;
+			}
+		}
+#endif
+#if defined(__Userspace__)
+		if (addr->sa_family == AF_CONN) {
+			if (((struct sockaddr_conn *)addr)->sconn_addr == sctp_ifap->address.sconn.sconn_addr) {
+				/* found him. */
+				if (holds_lock == 0)
+					SCTP_IPI_ADDR_RUNLOCK();
+				return (sctp_ifap);
+				break;
+			}
+		}
+#endif
+	}
+	if (holds_lock == 0)
+		SCTP_IPI_ADDR_RUNLOCK();
+	return (NULL);
+}
+
+static void
+sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t *freed_so_far, int hold_rlock,
+	       uint32_t rwnd_req)
+{
+	/* User pulled some data, do we need a rwnd update? */
+	int r_unlocked = 0;
+	uint32_t dif, rwnd;
+	struct socket *so = NULL;
+
+	if (stcb == NULL)
+		return;
+
+	atomic_add_int(&stcb->asoc.refcnt, 1);
+
+	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
+				SCTP_STATE_SHUTDOWN_RECEIVED |
+				SCTP_STATE_SHUTDOWN_ACK_SENT)) {
+		/* Pre-check If we are freeing no update */
+		goto no_lock;
+	}
+	SCTP_INP_INCR_REF(stcb->sctp_ep);
+	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+		goto out;
+	}
+	so = stcb->sctp_socket;
+	if (so == NULL) {
+		goto out;
+	}
+	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
+	/* Have you have freed enough to look */
+	*freed_so_far = 0;
+	/* Yep, its worth a look and the lock overhead */
+
+	/* Figure out what the rwnd would be */
+	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
+	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
+		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
+	} else {
+		dif = 0;
+	}
+	if (dif >= rwnd_req) {
+		if (hold_rlock) {
+			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
+			r_unlocked = 1;
+		}
+		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+			/*
+			 * One last check before we allow the guy possibly
+			 * to get in. There is a race, where the guy has not
+			 * reached the gate. In that case
+			 */
+			goto out;
+		}
+		SCTP_TCB_LOCK(stcb);
+		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+			/* No reports here */
+			SCTP_TCB_UNLOCK(stcb);
+			goto out;
+		}
+		SCTP_STAT_INCR(sctps_wu_sacks_sent);
+		sctp_send_sack(stcb, SCTP_SO_LOCKED);
+
+		sctp_chunk_output(stcb->sctp_ep, stcb,
+				  SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
+		/* make sure no timer is running */
+		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
+		                SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
+		SCTP_TCB_UNLOCK(stcb);
+	} else {
+		/* Update how much we have pending */
+		stcb->freed_by_sorcv_sincelast = dif;
+	}
+ out:
+	if (so && r_unlocked && hold_rlock) {
+		SCTP_INP_READ_LOCK(stcb->sctp_ep);
+	}
+
+	SCTP_INP_DECR_REF(stcb->sctp_ep);
+ no_lock:
+	atomic_add_int(&stcb->asoc.refcnt, -1);
+	return;
+}
+
+int
+sctp_sorecvmsg(struct socket *so,
+    struct uio *uio,
+    struct mbuf **mp,
+    struct sockaddr *from,
+    int fromlen,
+    int *msg_flags,
+    struct sctp_sndrcvinfo *sinfo,
+    int filling_sinfo)
+{
+	/*
+	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
+	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
+	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
+	 * On the way out we may send out any combination of:
+	 * MSG_NOTIFICATION MSG_EOR
+	 *
+	 */
+	struct sctp_inpcb *inp = NULL;
+	int my_len = 0;
+	int cp_len = 0, error = 0;
+	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
+	struct mbuf *m = NULL;
+	struct sctp_tcb *stcb = NULL;
+	int wakeup_read_socket = 0;
+	int freecnt_applied = 0;
+	int out_flags = 0, in_flags = 0;
+	int block_allowed = 1;
+	uint32_t freed_so_far = 0;
+	uint32_t copied_so_far = 0;
+	int in_eeor_mode = 0;
+	int no_rcv_needed = 0;
+	uint32_t rwnd_req = 0;
+	int hold_sblock = 0;
+	int hold_rlock = 0;
+	ssize_t slen = 0;
+	uint32_t held_length = 0;
+#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
+	int sockbuf_lock = 0;
+#endif
+
+	if (uio == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+		return (EINVAL);
+	}
+
+	if (msg_flags) {
+		in_flags = *msg_flags;
+		if (in_flags & MSG_PEEK)
+			SCTP_STAT_INCR(sctps_read_peeks);
+	} else {
+		in_flags = 0;
+	}
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+	slen = uio->uio_resid;
+#else
+	slen = uio_resid(uio);
+#endif
+#else
+	slen = uio->uio_resid;
+#endif
+
+	/* Pull in and set up our int flags */
+	if (in_flags & MSG_OOB) {
+		/* Out of band's NOT supported */
+		return (EOPNOTSUPP);
+	}
+	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+		return (EINVAL);
+	}
+	if ((in_flags & (MSG_DONTWAIT
+#if defined(__FreeBSD__) && __FreeBSD_version > 500000
+			 | MSG_NBIO
+#endif
+		     )) ||
+	    SCTP_SO_IS_NBIO(so)) {
+		block_allowed = 0;
+	}
+	/* setup the endpoint */
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	if (inp == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
+		return (EFAULT);
+	}
+	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
+	/* Must be at least a MTU's worth */
+	if (rwnd_req < SCTP_MIN_RWND)
+		rwnd_req = SCTP_MIN_RWND;
+	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+		sctp_misc_ints(SCTP_SORECV_ENTER,
+			       rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
+#else
+		sctp_misc_ints(SCTP_SORECV_ENTER,
+			       rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio_resid(uio));
+#endif
+#else
+		sctp_misc_ints(SCTP_SORECV_ENTER,
+			       rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
+#endif
+	}
+#if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
+	SOCKBUF_LOCK(&so->so_rcv);
+	hold_sblock = 1;
+#endif
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+		sctp_misc_ints(SCTP_SORECV_ENTERPL,
+			       rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
+#else
+		sctp_misc_ints(SCTP_SORECV_ENTERPL,
+			       rwnd_req, block_allowed, so->so_rcv.sb_cc, uio_resid(uio));
+#endif
+#else
+		sctp_misc_ints(SCTP_SORECV_ENTERPL,
+			       rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
+#endif
+	}
+
+#if defined(__APPLE__)
+	error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
+#endif
+
+#if defined(__FreeBSD__)
+	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
+#endif
+	if (error) {
+		goto release_unlocked;
+	}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
+        sockbuf_lock = 1;
+#endif
+ restart:
+#if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
+	if (hold_sblock == 0) {
+		SOCKBUF_LOCK(&so->so_rcv);
+		hold_sblock = 1;
+	}
+#endif
+#if defined(__APPLE__)
+	sbunlock(&so->so_rcv, 1);
+#endif
+
+#if defined(__FreeBSD__) && __FreeBSD_version < 700000
+	sbunlock(&so->so_rcv);
+#endif
+
+ restart_nosblocks:
+	if (hold_sblock == 0) {
+		SOCKBUF_LOCK(&so->so_rcv);
+		hold_sblock = 1;
+	}
+	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
+	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
+		goto out;
+	}
+#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
+	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
+#else
+	if ((so->so_state & SS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
+#endif
+		if (so->so_error) {
+			error = so->so_error;
+			if ((in_flags & MSG_PEEK) == 0)
+				so->so_error = 0;
+			goto out;
+		} else {
+			if (so->so_rcv.sb_cc == 0) {
+				/* indicate EOF */
+				error = 0;
+				goto out;
+			}
+		}
+	}
+	if (so->so_rcv.sb_cc <= held_length) {
+		if (so->so_error) {
+			error = so->so_error;
+			if ((in_flags & MSG_PEEK) == 0) {
+				so->so_error = 0;
+			}
+			goto out;
+		}
+		if ((so->so_rcv.sb_cc == 0) &&
+		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
+		     (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
+				/* For active open side clear flags for re-use
+				 * passive open is blocked by connect.
+				 */
+				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
+					/* You were aborted, passive side always hits here */
+					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
+					error = ECONNRESET;
+				}
+				so->so_state &= ~(SS_ISCONNECTING |
+						  SS_ISDISCONNECTING |
+						  SS_ISCONFIRMING |
+						  SS_ISCONNECTED);
+				if (error == 0) {
+					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
+						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
+						error = ENOTCONN;
+					}
+				}
+				goto out;
+			}
+		}
+		if (block_allowed) {
+			error = sbwait(&so->so_rcv);
+			if (error) {
+				goto out;
+			}
+			held_length = 0;
+			goto restart_nosblocks;
+		} else {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
+			error = EWOULDBLOCK;
+			goto out;
+		}
+	}
+	if (hold_sblock == 1) {
+		SOCKBUF_UNLOCK(&so->so_rcv);
+		hold_sblock = 0;
+	}
+#if defined(__APPLE__)
+	error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
+#endif
+#if defined(__FreeBSD__) && __FreeBSD_version < 700000
+	error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
+#endif
+	/* we possibly have data we can read */
+	/*sa_ignore FREED_MEMORY*/
+	control = TAILQ_FIRST(&inp->read_queue);
+	if (control == NULL) {
+		/* This could be happening since
+		 * the appender did the increment but as not
+		 * yet did the tailq insert onto the read_queue
+		 */
+		if (hold_rlock == 0) {
+			SCTP_INP_READ_LOCK(inp);
+		}
+		control = TAILQ_FIRST(&inp->read_queue);
+		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
+#ifdef INVARIANTS
+			panic("Huh, its non zero and nothing on control?");
+#endif
+			so->so_rcv.sb_cc = 0;
+		}
+		SCTP_INP_READ_UNLOCK(inp);
+		hold_rlock = 0;
+		goto restart;
+	}
+
+	if ((control->length == 0) &&
+	    (control->do_not_ref_stcb)) {
+		/* Clean up code for freeing assoc that left behind a pdapi..
+		 * maybe a peer in EEOR that just closed after sending and
+		 * never indicated a EOR.
+		 */
+		if (hold_rlock == 0) {
+			hold_rlock = 1;
+			SCTP_INP_READ_LOCK(inp);
+		}
+		control->held_length = 0;
+		if (control->data) {
+			/* Hmm there is data here .. fix */
+			struct mbuf *m_tmp;
+			int cnt = 0;
+			m_tmp = control->data;
+			while (m_tmp) {
+				cnt += SCTP_BUF_LEN(m_tmp);
+				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
+					control->tail_mbuf = m_tmp;
+					control->end_added = 1;
+				}
+				m_tmp = SCTP_BUF_NEXT(m_tmp);
+			}
+			control->length = cnt;
+		} else {
+			/* remove it */
+			TAILQ_REMOVE(&inp->read_queue, control, next);
+			/* Add back any hiddend data */
+			sctp_free_remote_addr(control->whoFrom);
+			sctp_free_a_readq(stcb, control);
+		}
+		if (hold_rlock) {
+			hold_rlock = 0;
+			SCTP_INP_READ_UNLOCK(inp);
+		}
+		goto restart;
+	}
+	if ((control->length == 0) &&
+	    (control->end_added == 1)) {
+		/* Do we also need to check for (control->pdapi_aborted == 1)? */
+		if (hold_rlock == 0) {
+			hold_rlock = 1;
+			SCTP_INP_READ_LOCK(inp);
+		}
+		TAILQ_REMOVE(&inp->read_queue, control, next);
+		if (control->data) {
+#ifdef INVARIANTS
+			panic("control->data not null but control->length == 0");
+#else
+			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
+			sctp_m_freem(control->data);
+			control->data = NULL;
+#endif
+		}
+		if (control->aux_data) {
+			sctp_m_free (control->aux_data);
+			control->aux_data = NULL;
+		}
+#ifdef INVARIANTS
+		if (control->on_strm_q) {
+			panic("About to free ctl:%p so:%p and its in %d",
+			      control, so, control->on_strm_q);
+		}
+#endif
+		sctp_free_remote_addr(control->whoFrom);
+		sctp_free_a_readq(stcb, control);
+		if (hold_rlock) {
+			hold_rlock = 0;
+			SCTP_INP_READ_UNLOCK(inp);
+		}
+		goto restart;
+	}
+	if (control->length == 0) {
+		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
+		    (filling_sinfo)) {
+			/* find a more suitable one then this */
+			ctl = TAILQ_NEXT(control, next);
+			while (ctl) {
+				if ((ctl->stcb != control->stcb) && (ctl->length) &&
+				    (ctl->some_taken ||
+				     (ctl->spec_flags & M_NOTIFICATION) ||
+				     ((ctl->do_not_ref_stcb == 0) &&
+				      (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
+					) {
+					/*-
+					 * If we have a different TCB next, and there is data
+					 * present. If we have already taken some (pdapi), OR we can
+					 * ref the tcb and no delivery as started on this stream, we
+					 * take it. Note we allow a notification on a different
+					 * assoc to be delivered..
+					 */
+					control = ctl;
+					goto found_one;
+				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
+					   (ctl->length) &&
+					   ((ctl->some_taken) ||
+					    ((ctl->do_not_ref_stcb == 0) &&
+					     ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
+					     (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
+					/*-
+					 * If we have the same tcb, and there is data present, and we
+					 * have the strm interleave feature present. Then if we have
+					 * taken some (pdapi) or we can refer to tht tcb AND we have
+					 * not started a delivery for this stream, we can take it.
+					 * Note we do NOT allow a notificaiton on the same assoc to
+					 * be delivered.
+					 */
+					control = ctl;
+					goto found_one;
+				}
+				ctl = TAILQ_NEXT(ctl, next);
+			}
+		}
+		/*
+		 * if we reach here, not suitable replacement is available
+		 * <or> fragment interleave is NOT on. So stuff the sb_cc
+		 * into the our held count, and its time to sleep again.
+		 */
+		held_length = so->so_rcv.sb_cc;
+		control->held_length = so->so_rcv.sb_cc;
+		goto restart;
+	}
+	/* Clear the held length since there is something to read */
+	control->held_length = 0;
+ found_one:
+	/*
+	 * If we reach here, control has a some data for us to read off.
+	 * Note that stcb COULD be NULL.
+	 */
+	if (hold_rlock == 0) {
+		hold_rlock = 1;
+		SCTP_INP_READ_LOCK(inp);
+	}
+	control->some_taken++;
+	stcb = control->stcb;
+	if (stcb) {
+		if ((control->do_not_ref_stcb == 0) &&
+		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
+			if (freecnt_applied == 0)
+				stcb = NULL;
+		} else if (control->do_not_ref_stcb == 0) {
+			/* you can't free it on me please */
+			/*
+			 * The lock on the socket buffer protects us so the
+			 * free code will stop. But since we used the socketbuf
+			 * lock and the sender uses the tcb_lock to increment,
+			 * we need to use the atomic add to the refcnt
+			 */
+			if (freecnt_applied) {
+#ifdef INVARIANTS
+				panic("refcnt already incremented");
+#else
+				SCTP_PRINTF("refcnt already incremented?\n");
+#endif
+			} else {
+				atomic_add_int(&stcb->asoc.refcnt, 1);
+				freecnt_applied = 1;
+			}
+			/*
+			 * Setup to remember how much we have not yet told
+			 * the peer our rwnd has opened up. Note we grab
+			 * the value from the tcb from last time.
+			 * Note too that sack sending clears this when a sack
+			 * is sent, which is fine. Once we hit the rwnd_req,
+			 * we then will go to the sctp_user_rcvd() that will
+			 * not lock until it KNOWs it MUST send a WUP-SACK.
+			 */
+			freed_so_far = stcb->freed_by_sorcv_sincelast;
+			stcb->freed_by_sorcv_sincelast = 0;
+		}
+	}
+	if (stcb &&
+	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
+	    control->do_not_ref_stcb == 0) {
+		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
+	}
+
+	/* First lets get off the sinfo and sockaddr info */
+	if ((sinfo != NULL) && (filling_sinfo != 0)) {
+		sinfo->sinfo_stream = control->sinfo_stream;
+		sinfo->sinfo_ssn = (uint16_t)control->mid;
+		sinfo->sinfo_flags = control->sinfo_flags;
+		sinfo->sinfo_ppid = control->sinfo_ppid;
+		sinfo->sinfo_context =control->sinfo_context;
+		sinfo->sinfo_timetolive = control->sinfo_timetolive;
+		sinfo->sinfo_tsn = control->sinfo_tsn;
+		sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
+		sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
+		nxt = TAILQ_NEXT(control, next);
+		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
+		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
+			struct sctp_extrcvinfo *s_extra;
+			s_extra = (struct sctp_extrcvinfo *)sinfo;
+			if ((nxt) &&
+			    (nxt->length)) {
+				s_extra->serinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
+				if (nxt->sinfo_flags & SCTP_UNORDERED) {
+					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
+				}
+				if (nxt->spec_flags & M_NOTIFICATION) {
+					s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
+				}
+				s_extra->serinfo_next_aid = nxt->sinfo_assoc_id;
+				s_extra->serinfo_next_length = nxt->length;
+				s_extra->serinfo_next_ppid = nxt->sinfo_ppid;
+				s_extra->serinfo_next_stream = nxt->sinfo_stream;
+				if (nxt->tail_mbuf != NULL) {
+					if (nxt->end_added) {
+						s_extra->serinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
+					}
+				}
+			} else {
+				/* we explicitly 0 this, since the memcpy got
+				 * some other things beyond the older sinfo_
+				 * that is on the control's structure :-D
+				 */
+				nxt = NULL;
+				s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
+				s_extra->serinfo_next_aid = 0;
+				s_extra->serinfo_next_length = 0;
+				s_extra->serinfo_next_ppid = 0;
+				s_extra->serinfo_next_stream = 0;
+			}
+		}
+		/*
+		 * update off the real current cum-ack, if we have an stcb.
+		 */
+		if ((control->do_not_ref_stcb == 0) && stcb)
+			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
+		/*
+		 * mask off the high bits, we keep the actual chunk bits in
+		 * there.
+		 */
+		sinfo->sinfo_flags &= 0x00ff;
+		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
+			sinfo->sinfo_flags |= SCTP_UNORDERED;
+		}
+	}
+#ifdef SCTP_ASOCLOG_OF_TSNS
+	{
+		int index, newindex;
+		struct sctp_pcbtsn_rlog *entry;
+		do {
+			index = inp->readlog_index;
+			newindex = index + 1;
+			if (newindex >= SCTP_READ_LOG_SIZE) {
+				newindex = 0;
+			}
+		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
+		entry = &inp->readlog[index];
+		entry->vtag = control->sinfo_assoc_id;
+		entry->strm = control->sinfo_stream;
+		entry->seq = (uint16_t)control->mid;
+		entry->sz = control->length;
+		entry->flgs = control->sinfo_flags;
+	}
+#endif
+	if ((fromlen > 0) && (from != NULL)) {
+		union sctp_sockstore store;
+		size_t len;
+
+		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
+#ifdef INET6
+			case AF_INET6:
+				len = sizeof(struct sockaddr_in6);
+				store.sin6 = control->whoFrom->ro._l_addr.sin6;
+				store.sin6.sin6_port = control->port_from;
+				break;
+#endif
+#ifdef INET
+			case AF_INET:
+#ifdef INET6
+				if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
+					len = sizeof(struct sockaddr_in6);
+					in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
+							    &store.sin6);
+					store.sin6.sin6_port = control->port_from;
+				} else {
+					len = sizeof(struct sockaddr_in);
+					store.sin = control->whoFrom->ro._l_addr.sin;
+					store.sin.sin_port = control->port_from;
+				}
+#else
+				len = sizeof(struct sockaddr_in);
+				store.sin = control->whoFrom->ro._l_addr.sin;
+				store.sin.sin_port = control->port_from;
+#endif
+				break;
+#endif
+#if defined(__Userspace__)
+			case AF_CONN:
+				len = sizeof(struct sockaddr_conn);
+				store.sconn = control->whoFrom->ro._l_addr.sconn;
+				store.sconn.sconn_port = control->port_from;
+				break;
+#endif
+			default:
+				len = 0;
+				break;
+		}
+		memcpy(from, &store, min((size_t)fromlen, len));
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+#ifdef INET6
+		{
+			struct sockaddr_in6 lsa6, *from6;
+
+			from6 = (struct sockaddr_in6 *)from;
+			sctp_recover_scope_mac(from6, (&lsa6));
+		}
+#endif
+#endif
+	}
+	if (hold_rlock) {
+		SCTP_INP_READ_UNLOCK(inp);
+		hold_rlock = 0;
+	}
+	if (hold_sblock) {
+		SOCKBUF_UNLOCK(&so->so_rcv);
+		hold_sblock = 0;
+	}
+	/* now copy out what data we can */
+	if (mp == NULL) {
+		/* copy out each mbuf in the chain up to length */
+	get_more_data:
+		m = control->data;
+		while (m) {
+			/* Move out all we can */
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+			cp_len = (int)uio->uio_resid;
+#else
+			cp_len = (int)uio_resid(uio);
+#endif
+#else
+			cp_len = (int)uio->uio_resid;
+#endif
+			my_len = (int)SCTP_BUF_LEN(m);
+			if (cp_len > my_len) {
+				/* not enough in this buf */
+				cp_len = my_len;
+			}
+			if (hold_rlock) {
+				SCTP_INP_READ_UNLOCK(inp);
+				hold_rlock = 0;
+			}
+#if defined(__APPLE__)
+			SCTP_SOCKET_UNLOCK(so, 0);
+#endif
+			if (cp_len > 0)
+				error = uiomove(mtod(m, char *), cp_len, uio);
+#if defined(__APPLE__)
+			SCTP_SOCKET_LOCK(so, 0);
+#endif
+			/* re-read */
+			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
+				goto release;
+			}
+
+			if ((control->do_not_ref_stcb == 0) && stcb &&
+			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
+				no_rcv_needed = 1;
+			}
+			if (error) {
+				/* error we are out of here */
+				goto release;
+			}
+			SCTP_INP_READ_LOCK(inp);
+			hold_rlock = 1;
+			if (cp_len == SCTP_BUF_LEN(m)) {
+				if ((SCTP_BUF_NEXT(m)== NULL) &&
+				    (control->end_added)) {
+					out_flags |= MSG_EOR;
+					if ((control->do_not_ref_stcb == 0)  &&
+					    (control->stcb != NULL) &&
+					    ((control->spec_flags & M_NOTIFICATION) == 0))
+						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
+				}
+				if (control->spec_flags & M_NOTIFICATION) {
+					out_flags |= MSG_NOTIFICATION;
+				}
+				/* we ate up the mbuf */
+				if (in_flags & MSG_PEEK) {
+					/* just looking */
+					m = SCTP_BUF_NEXT(m);
+					copied_so_far += cp_len;
+				} else {
+					/* dispose of the mbuf */
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+						sctp_sblog(&so->so_rcv,
+						   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
+					}
+					sctp_sbfree(control, stcb, &so->so_rcv, m);
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+						sctp_sblog(&so->so_rcv,
+						   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
+					}
+					copied_so_far += cp_len;
+					freed_so_far += cp_len;
+					freed_so_far += MSIZE;
+					atomic_subtract_int(&control->length, cp_len);
+					control->data = sctp_m_free(m);
+					m = control->data;
+					/* been through it all, must hold sb lock ok to null tail */
+					if (control->data == NULL) {
+#ifdef INVARIANTS
+#if defined(__FreeBSD__)
+						if ((control->end_added == 0) ||
+						    (TAILQ_NEXT(control, next) == NULL)) {
+							/* If the end is not added, OR the
+							 * next is NOT null we MUST have the lock.
+							 */
+							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
+								panic("Hmm we don't own the lock?");
+							}
+						}
+#endif
+#endif
+						control->tail_mbuf = NULL;
+#ifdef INVARIANTS
+						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
+							panic("end_added, nothing left and no MSG_EOR");
+						}
+#endif
+					}
+				}
+			} else {
+				/* Do we need to trim the mbuf? */
+				if (control->spec_flags & M_NOTIFICATION) {
+					out_flags |= MSG_NOTIFICATION;
+				}
+				if ((in_flags & MSG_PEEK) == 0) {
+					SCTP_BUF_RESV_UF(m, cp_len);
+					SCTP_BUF_LEN(m) -= cp_len;
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, cp_len);
+					}
+					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
+					if ((control->do_not_ref_stcb == 0) &&
+					    stcb) {
+						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
+					}
+					copied_so_far += cp_len;
+					freed_so_far += cp_len;
+					freed_so_far += MSIZE;
+					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb?NULL:stcb,
+							   SCTP_LOG_SBRESULT, 0);
+					}
+					atomic_subtract_int(&control->length, cp_len);
+				} else {
+					copied_so_far += cp_len;
+				}
+			}
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
+#else
+			if ((out_flags & MSG_EOR) || (uio_resid(uio) == 0)) {
+#endif
+#else
+			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
+#endif
+				break;
+			}
+			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
+			    (control->do_not_ref_stcb == 0) &&
+			    (freed_so_far >= rwnd_req)) {
+				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
+			}
+		} /* end while(m) */
+		/*
+		 * At this point we have looked at it all and we either have
+		 * a MSG_EOR/or read all the user wants... <OR>
+		 * control->length == 0.
+		 */
+		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
+			/* we are done with this control */
+			if (control->length == 0) {
+				if (control->data) {
+#ifdef INVARIANTS
+					panic("control->data not null at read eor?");
+#else
+					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
+					sctp_m_freem(control->data);
+					control->data = NULL;
+#endif
+				}
+			done_with_control:
+				if (hold_rlock == 0) {
+					SCTP_INP_READ_LOCK(inp);
+					hold_rlock = 1;
+				}
+				TAILQ_REMOVE(&inp->read_queue, control, next);
+				/* Add back any hiddend data */
+				if (control->held_length) {
+					held_length = 0;
+					control->held_length = 0;
+					wakeup_read_socket = 1;
+				}
+				if (control->aux_data) {
+					sctp_m_free (control->aux_data);
+					control->aux_data = NULL;
+				}
+				no_rcv_needed = control->do_not_ref_stcb;
+				sctp_free_remote_addr(control->whoFrom);
+				control->data = NULL;
+#ifdef INVARIANTS
+				if (control->on_strm_q) {
+					panic("About to free ctl:%p so:%p and its in %d",
+					      control, so, control->on_strm_q);
+				}
+#endif
+				sctp_free_a_readq(stcb, control);
+				control = NULL;
+				if ((freed_so_far >= rwnd_req) &&
+				    (no_rcv_needed == 0))
+					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
+
+			} else {
+				/*
+				 * The user did not read all of this
+				 * message, turn off the returned MSG_EOR
+				 * since we are leaving more behind on the
+				 * control to read.
+				 */
+#ifdef INVARIANTS
+				if (control->end_added &&
+				    (control->data == NULL) &&
+				    (control->tail_mbuf == NULL)) {
+					panic("Gak, control->length is corrupt?");
+				}
+#endif
+				no_rcv_needed = control->do_not_ref_stcb;
+				out_flags &= ~MSG_EOR;
+			}
+		}
+		if (out_flags & MSG_EOR) {
+			goto release;
+		}
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+		if ((uio->uio_resid == 0) ||
+#else
+		if ((uio_resid(uio) == 0) ||
+#endif
+#else
+		if ((uio->uio_resid == 0) ||
+#endif
+		    ((in_eeor_mode) &&
+		     (copied_so_far >= (uint32_t)max(so->so_rcv.sb_lowat, 1)))) {
+			goto release;
+		}
+		/*
+		 * If I hit here the receiver wants more and this message is
+		 * NOT done (pd-api). So two questions. Can we block? if not
+		 * we are done. Did the user NOT set MSG_WAITALL?
+		 */
+		if (block_allowed == 0) {
+			goto release;
+		}
+		/*
+		 * We need to wait for more data a few things: - We don't
+		 * sbunlock() so we don't get someone else reading. - We
+		 * must be sure to account for the case where what is added
+		 * is NOT to our control when we wakeup.
+		 */
+
+		/* Do we need to tell the transport a rwnd update might be
+		 * needed before we go to sleep?
+		 */
+		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
+		    ((freed_so_far >= rwnd_req) &&
+		     (control->do_not_ref_stcb == 0) &&
+		     (no_rcv_needed == 0))) {
+			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
+		}
+	wait_some_more:
+#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
+		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
+			goto release;
+		}
+#else
+		if (so->so_state & SS_CANTRCVMORE) {
+			goto release;
+		}
+#endif
+
+		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
+			goto release;
+
+		if (hold_rlock == 1) {
+			SCTP_INP_READ_UNLOCK(inp);
+			hold_rlock = 0;
+		}
+		if (hold_sblock == 0) {
+			SOCKBUF_LOCK(&so->so_rcv);
+			hold_sblock = 1;
+		}
+		if ((copied_so_far) && (control->length == 0) &&
+		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
+			goto release;
+		}
+#if defined(__APPLE__)
+		sbunlock(&so->so_rcv, 1);
+#endif
+		if (so->so_rcv.sb_cc <= control->held_length) {
+			error = sbwait(&so->so_rcv);
+			if (error) {
+#if defined(__FreeBSD__)
+				goto release;
+#else
+				goto release_unlocked;
+#endif
+			}
+			control->held_length = 0;
+		}
+#if defined(__APPLE__)
+		error = sblock(&so->so_rcv, SBLOCKWAIT(in_flags));
+#endif
+		if (hold_sblock) {
+			SOCKBUF_UNLOCK(&so->so_rcv);
+			hold_sblock = 0;
+		}
+		if (control->length == 0) {
+			/* still nothing here */
+			if (control->end_added == 1) {
+				/* he aborted, or is done i.e.did a shutdown */
+				out_flags |= MSG_EOR;
+				if (control->pdapi_aborted) {
+					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
+						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
+
+					out_flags |= MSG_TRUNC;
+				} else {
+					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
+						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
+				}
+				goto done_with_control;
+			}
+			if (so->so_rcv.sb_cc > held_length) {
+				control->held_length = so->so_rcv.sb_cc;
+				held_length = 0;
+			}
+			goto wait_some_more;
+		} else if (control->data == NULL) {
+			/* we must re-sync since data
+			 * is probably being added
+			 */
+			SCTP_INP_READ_LOCK(inp);
+			if ((control->length > 0) && (control->data == NULL)) {
+				/* big trouble.. we have the lock and its corrupt? */
+#ifdef INVARIANTS
+				panic ("Impossible data==NULL length !=0");
+#endif
+				out_flags |= MSG_EOR;
+				out_flags |= MSG_TRUNC;
+				control->length = 0;
+				SCTP_INP_READ_UNLOCK(inp);
+				goto done_with_control;
+			}
+			SCTP_INP_READ_UNLOCK(inp);
+			/* We will fall around to get more data */
+		}
+		goto get_more_data;
+	} else {
+		/*-
+		 * Give caller back the mbuf chain,
+		 * store in uio_resid the length
+		 */
+		wakeup_read_socket = 0;
+		if ((control->end_added == 0) ||
+		    (TAILQ_NEXT(control, next) == NULL)) {
+			/* Need to get rlock */
+			if (hold_rlock == 0) {
+				SCTP_INP_READ_LOCK(inp);
+				hold_rlock = 1;
+			}
+		}
+		if (control->end_added) {
+			out_flags |= MSG_EOR;
+			if ((control->do_not_ref_stcb == 0) &&
+			    (control->stcb != NULL) &&
+			    ((control->spec_flags & M_NOTIFICATION) == 0))
+				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
+		}
+		if (control->spec_flags & M_NOTIFICATION) {
+			out_flags |= MSG_NOTIFICATION;
+		}
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+		uio->uio_resid = control->length;
+#else
+		uio_setresid(uio, control->length);
+#endif
+#else
+		uio->uio_resid = control->length;
+#endif
+		*mp = control->data;
+		m = control->data;
+		while (m) {
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+				sctp_sblog(&so->so_rcv,
+				   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
+			}
+			sctp_sbfree(control, stcb, &so->so_rcv, m);
+			freed_so_far += SCTP_BUF_LEN(m);
+			freed_so_far += MSIZE;
+			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
+				sctp_sblog(&so->so_rcv,
+				   control->do_not_ref_stcb?NULL:stcb, SCTP_LOG_SBRESULT, 0);
+			}
+			m = SCTP_BUF_NEXT(m);
+		}
+		control->data = control->tail_mbuf = NULL;
+		control->length = 0;
+		if (out_flags & MSG_EOR) {
+			/* Done with this control */
+			goto done_with_control;
+		}
+	}
+ release:
+	if (hold_rlock == 1) {
+		SCTP_INP_READ_UNLOCK(inp);
+		hold_rlock = 0;
+	}
+#if (defined(__FreeBSD__) && __FreeBSD_version < 700000) || defined(__Userspace__)
+	if (hold_sblock == 0) {
+		SOCKBUF_LOCK(&so->so_rcv);
+		hold_sblock = 1;
+	}
+#else
+	if (hold_sblock == 1) {
+		SOCKBUF_UNLOCK(&so->so_rcv);
+		hold_sblock = 0;
+	}
+#endif
+#if defined(__APPLE__)
+	sbunlock(&so->so_rcv, 1);
+#endif
+
+#if defined(__FreeBSD__)
+	sbunlock(&so->so_rcv);
+#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
+	sockbuf_lock = 0;
+#endif
+#endif
+
+ release_unlocked:
+	if (hold_sblock) {
+		SOCKBUF_UNLOCK(&so->so_rcv);
+		hold_sblock = 0;
+	}
+	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
+		if ((freed_so_far >= rwnd_req) &&
+		    (control && (control->do_not_ref_stcb == 0)) &&
+		    (no_rcv_needed == 0))
+			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
+	}
+ out:
+	if (msg_flags) {
+		*msg_flags = out_flags;
+	}
+	if (((out_flags & MSG_EOR) == 0) &&
+	    ((in_flags & MSG_PEEK) == 0) &&
+	    (sinfo) &&
+	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
+	     sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
+		struct sctp_extrcvinfo *s_extra;
+		s_extra = (struct sctp_extrcvinfo *)sinfo;
+		s_extra->serinfo_next_flags = SCTP_NO_NEXT_MSG;
+	}
+	if (hold_rlock == 1) {
+		SCTP_INP_READ_UNLOCK(inp);
+	}
+	if (hold_sblock) {
+		SOCKBUF_UNLOCK(&so->so_rcv);
+	}
+#if defined(__FreeBSD__) && __FreeBSD_version >= 700000
+	if (sockbuf_lock) {
+		sbunlock(&so->so_rcv);
+	}
+#endif
+
+	if (freecnt_applied) {
+		/*
+		 * The lock on the socket buffer protects us so the free
+		 * code will stop. But since we used the socketbuf lock and
+		 * the sender uses the tcb_lock to increment, we need to use
+		 * the atomic add to the refcnt.
+		 */
+		if (stcb == NULL) {
+#ifdef INVARIANTS
+			panic("stcb for refcnt has gone NULL?");
+			goto stage_left;
+#else
+			goto stage_left;
+#endif
+		}
+		/* Save the value back for next time */
+		stcb->freed_by_sorcv_sincelast = freed_so_far;
+		atomic_add_int(&stcb->asoc.refcnt, -1);
+	}
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) &SCTP_RECV_RWND_LOGGING_ENABLE) {
+		if (stcb) {
+			sctp_misc_ints(SCTP_SORECV_DONE,
+				       freed_so_far,
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+				       ((uio) ? (slen - uio->uio_resid) : slen),
+#else
+				       ((uio) ? (slen - uio_resid(uio)) : slen),
+#endif
+#else
+				       (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
+#endif
+				       stcb->asoc.my_rwnd,
+				       so->so_rcv.sb_cc);
+		} else {
+			sctp_misc_ints(SCTP_SORECV_DONE,
+				       freed_so_far,
+#if defined(__APPLE__)
+#if defined(APPLE_LEOPARD)
+				       ((uio) ? (slen - uio->uio_resid) : slen),
+#else
+				       ((uio) ? (slen - uio_resid(uio)) : slen),
+#endif
+#else
+				       (uint32_t)((uio) ? (slen - uio->uio_resid) : slen),
+#endif
+				       0,
+				       so->so_rcv.sb_cc);
+		}
+	}
+ stage_left:
+	if (wakeup_read_socket) {
+		sctp_sorwakeup(inp, so);
+	}
+	return (error);
+}
+
+
+#ifdef SCTP_MBUF_LOGGING
+struct mbuf *
+sctp_m_free(struct mbuf *m)
+{
+	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
+		sctp_log_mb(m, SCTP_MBUF_IFREE);
+	}
+	return (m_free(m));
+}
+
+void sctp_m_freem(struct mbuf *mb)
+{
+	while (mb != NULL)
+		mb = sctp_m_free(mb);
+}
+
+#endif
+
+int
+sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
+{
+	/* Given a local address. For all associations
+	 * that holds the address, request a peer-set-primary.
+	 */
+	struct sctp_ifa *ifa;
+	struct sctp_laddr *wi;
+
+	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
+	if (ifa == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
+		return (EADDRNOTAVAIL);
+	}
+	/* Now that we have the ifa we must awaken the
+	 * iterator with this message.
+	 */
+	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
+	if (wi == NULL) {
+		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
+		return (ENOMEM);
+	}
+	/* Now incr the count and int wi structure */
+	SCTP_INCR_LADDR_COUNT();
+	bzero(wi, sizeof(*wi));
+	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
+	wi->ifa = ifa;
+	wi->action = SCTP_SET_PRIM_ADDR;
+	atomic_add_int(&ifa->refcount, 1);
+
+	/* Now add it to the work queue */
+	SCTP_WQ_ADDR_LOCK();
+	/*
+	 * Should this really be a tailq? As it is we will process the
+	 * newest first :-0
+	 */
+	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
+	SCTP_WQ_ADDR_UNLOCK();
+	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
+			 (struct sctp_inpcb *)NULL,
+			 (struct sctp_tcb *)NULL,
+			 (struct sctp_nets *)NULL);
+	return (0);
+}
+
+#if defined(__Userspace__)
+/* no sctp_soreceive for __Userspace__ now */
+#endif
+
+#if !defined(__Userspace__)
+int
+sctp_soreceive(	struct socket *so,
+		struct sockaddr **psa,
+		struct uio *uio,
+		struct mbuf **mp0,
+		struct mbuf **controlp,
+		int *flagsp)
+{
+	int error, fromlen;
+	uint8_t sockbuf[256];
+	struct sockaddr *from;
+	struct sctp_extrcvinfo sinfo;
+	int filling_sinfo = 1;
+	struct sctp_inpcb *inp;
+
+	inp = (struct sctp_inpcb *)so->so_pcb;
+	/* pickup the assoc we are reading from */
+	if (inp == NULL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+		return (EINVAL);
+	}
+	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
+	     sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
+	     sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
+	    (controlp == NULL)) {
+		/* user does not want the sndrcv ctl */
+		filling_sinfo = 0;
+	}
+	if (psa) {
+		from = (struct sockaddr *)sockbuf;
+		fromlen = sizeof(sockbuf);
+#ifdef HAVE_SA_LEN
+		from->sa_len = 0;
+#endif
+	} else {
+		from = NULL;
+		fromlen = 0;
+	}
+
+#if defined(__APPLE__)
+	SCTP_SOCKET_LOCK(so, 1);
+#endif
+	if (filling_sinfo) {
+		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
+	}
+	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
+	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
+	if (controlp != NULL) {
+		/* copy back the sinfo in a CMSG format */
+		if (filling_sinfo)
+			*controlp = sctp_build_ctl_nchunk(inp,
+			                                  (struct sctp_sndrcvinfo *)&sinfo);
+		else
+			*controlp = NULL;
+	}
+	if (psa) {
+		/* copy back the address info */
+#ifdef HAVE_SA_LEN
+		if (from && from->sa_len) {
+#else
+		if (from) {
+#endif
+#if (defined(__FreeBSD__) && __FreeBSD_version > 500000) || defined(__Windows__)
+			*psa = sodupsockaddr(from, M_NOWAIT);
+#else
+			*psa = dup_sockaddr(from, mp0 == 0);
+#endif
+		} else {
+			*psa = NULL;
+		}
+	}
+#if defined(__APPLE__)
+	SCTP_SOCKET_UNLOCK(so, 1);
+#endif
+	return (error);
+}
+
+
+#if (defined(__FreeBSD__) && __FreeBSD_version < 603000) || defined(__Windows__)
+/*
+ * General routine to allocate a hash table with control of memory flags.
+ * is in 7.0 and beyond for sure :-)
+ */
+void *
+sctp_hashinit_flags(int elements, struct malloc_type *type,
+                    u_long *hashmask, int flags)
+{
+	long hashsize;
+	LIST_HEAD(generic, generic) *hashtbl;
+	int i;
+
+
+	if (elements <= 0) {
+#ifdef INVARIANTS
+		panic("hashinit: bad elements");
+#else
+		SCTP_PRINTF("hashinit: bad elements?");
+		elements = 1;
+#endif
+	}
+	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
+		continue;
+	hashsize >>= 1;
+	if (flags & HASH_WAITOK)
+		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
+	else if (flags & HASH_NOWAIT)
+		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_NOWAIT);
+	else {
+#ifdef INVARIANTS
+		panic("flag incorrect in hashinit_flags");
+#else
+		return (NULL);
+#endif
+	}
+
+	/* no memory? */
+	if (hashtbl == NULL)
+		return (NULL);
+
+	for (i = 0; i < hashsize; i++)
+		LIST_INIT(&hashtbl[i]);
+	*hashmask = hashsize - 1;
+	return (hashtbl);
+}
+#endif
+
+#else /*  __Userspace__ ifdef above sctp_soreceive */
+/*
+ * __Userspace__ Defining sctp_hashinit_flags() and sctp_hashdestroy() for userland.
+ * NOTE: We don't want multiple definitions here. So sctp_hashinit_flags() above for
+ *__FreeBSD__ must be excluded.
+ *
+ */
+
+void *
+sctp_hashinit_flags(int elements, struct malloc_type *type,
+                    u_long *hashmask, int flags)
+{
+	long hashsize;
+	LIST_HEAD(generic, generic) *hashtbl;
+	int i;
+
+	if (elements <= 0) {
+		SCTP_PRINTF("hashinit: bad elements?");
+#ifdef INVARIANTS
+		return (NULL);
+#else
+		elements = 1;
+#endif
+	}
+	for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
+		continue;
+	hashsize >>= 1;
+	/*cannot use MALLOC here because it has to be declared or defined
+	  using MALLOC_DECLARE or MALLOC_DEFINE first. */
+	if (flags & HASH_WAITOK)
+		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
+	else if (flags & HASH_NOWAIT)
+		hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl));
+	else {
+#ifdef INVARIANTS
+		SCTP_PRINTF("flag incorrect in hashinit_flags.\n");
+#endif
+		return (NULL);
+	}
+
+	/* no memory? */
+	if (hashtbl == NULL)
+		return (NULL);
+
+	for (i = 0; i < hashsize; i++)
+		LIST_INIT(&hashtbl[i]);
+	*hashmask = hashsize - 1;
+	return (hashtbl);
+}
+
+
+void
+sctp_hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
+{
+	LIST_HEAD(generic, generic) *hashtbl, *hp;
+
+	hashtbl = vhashtbl;
+	for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
+		if (!LIST_EMPTY(hp)) {
+			SCTP_PRINTF("hashdestroy: hash not empty.\n");
+			return;
+		}
+	FREE(hashtbl, type);
+}
+
+
+void
+sctp_hashfreedestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
+{
+	LIST_HEAD(generic, generic) *hashtbl/*, *hp*/;
+	/*
+	LIST_ENTRY(type) *start, *temp;
+	 */
+	hashtbl = vhashtbl;
+	/* Apparently temp is not dynamically allocated, so attempts to
+	   free it results in error.
+	for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
+		if (!LIST_EMPTY(hp)) {
+			start = LIST_FIRST(hp);
+			while (start != NULL) {
+				temp = start;
+				start = start->le_next;
+				SCTP_PRINTF("%s: %p \n", __func__, (void *)temp);
+				FREE(temp, type);
+			}
+		}
+	 */
+	FREE(hashtbl, type);
+}
+
+
+#endif
+
+
+int
+sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
+			 int totaddr, int *error)
+{
+	int added = 0;
+	int i;
+	struct sctp_inpcb *inp;
+	struct sockaddr *sa;
+	size_t incr = 0;
+#ifdef INET
+	struct sockaddr_in *sin;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 *sin6;
+#endif
+
+	sa = addr;
+	inp = stcb->sctp_ep;
+	*error = 0;
+	for (i = 0; i < totaddr; i++) {
+		switch (sa->sa_family) {
+#ifdef INET
+		case AF_INET:
+			incr = sizeof(struct sockaddr_in);
+			sin = (struct sockaddr_in *)sa;
+			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
+			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
+			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
+				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+				                      SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
+				*error = EINVAL;
+				goto out_now;
+			}
+			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
+			                         SCTP_DONOT_SETSCOPE,
+			                         SCTP_ADDR_IS_CONFIRMED)) {
+				/* assoc gone no un-lock */
+				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
+				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+				                      SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
+				*error = ENOBUFS;
+				goto out_now;
+			}
+			added++;
+			break;
+#endif
+#ifdef INET6
+		case AF_INET6:
+			incr = sizeof(struct sockaddr_in6);
+			sin6 = (struct sockaddr_in6 *)sa;
+			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
+			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
+				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+				                      SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
+				*error = EINVAL;
+				goto out_now;
+			}
+			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
+			                         SCTP_DONOT_SETSCOPE,
+			                         SCTP_ADDR_IS_CONFIRMED)) {
+				/* assoc gone no un-lock */
+				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
+				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+				                      SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
+				*error = ENOBUFS;
+				goto out_now;
+			}
+			added++;
+			break;
+#endif
+#if defined(__Userspace__)
+		case AF_CONN:
+			incr = sizeof(struct sockaddr_in6);
+			if (sctp_add_remote_addr(stcb, sa, NULL, stcb->asoc.port,
+			                         SCTP_DONOT_SETSCOPE,
+			                         SCTP_ADDR_IS_CONFIRMED)) {
+				/* assoc gone no un-lock */
+				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
+				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
+				                      SCTP_FROM_SCTPUTIL + SCTP_LOC_11);
+				*error = ENOBUFS;
+				goto out_now;
+			}
+			added++;
+			break;
+#endif
+		default:
+			break;
+		}
+		sa = (struct sockaddr *)((caddr_t)sa + incr);
+	}
+ out_now:
+	return (added);
+}
+
+struct sctp_tcb *
+sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
+			  unsigned int *totaddr,
+			  unsigned int *num_v4, unsigned int *num_v6, int *error,
+			  unsigned int limit, int *bad_addr)
+{
+	struct sockaddr *sa;
+	struct sctp_tcb *stcb = NULL;
+	unsigned int incr, at, i;
+
+	at = 0;
+	sa = addr;
+	*error = *num_v6 = *num_v4 = 0;
+	/* account and validate addresses */
+	for (i = 0; i < *totaddr; i++) {
+		switch (sa->sa_family) {
+#ifdef INET
+		case AF_INET:
+			incr = (unsigned int)sizeof(struct sockaddr_in);
+#ifdef HAVE_SA_LEN
+			if (sa->sa_len != incr) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+				*error = EINVAL;
+				*bad_addr = 1;
+				return (NULL);
+			}
+#endif
+			(*num_v4) += 1;
+			break;
+#endif
+#ifdef INET6
+		case AF_INET6:
+		{
+			struct sockaddr_in6 *sin6;
+
+			sin6 = (struct sockaddr_in6 *)sa;
+			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+				/* Must be non-mapped for connectx */
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+				*error = EINVAL;
+				*bad_addr = 1;
+				return (NULL);
+			}
+			incr = (unsigned int)sizeof(struct sockaddr_in6);
+#ifdef HAVE_SA_LEN
+			if (sa->sa_len != incr) {
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+				*error = EINVAL;
+				*bad_addr = 1;
+				return (NULL);
+			}
+#endif
+			(*num_v6) += 1;
+			break;
+		}
+#endif
+		default:
+			*totaddr = i;
+			incr = 0;
+			/* we are done */
+			break;
+		}
+		if (i == *totaddr) {
+			break;
+		}
+		SCTP_INP_INCR_REF(inp);
+		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
+		if (stcb != NULL) {
+			/* Already have or am bring up an association */
+			return (stcb);
+		} else {
+			SCTP_INP_DECR_REF(inp);
+		}
+		if ((at + incr) > limit) {
+			*totaddr = i;
+			break;
+		}
+		sa = (struct sockaddr *)((caddr_t)sa + incr);
+	}
+	return ((struct sctp_tcb *)NULL);
+}
+
+/*
+ * sctp_bindx(ADD) for one address.
+ * assumes all arguments are valid/checked by caller.
+ */
+void
+sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
+		       struct sockaddr *sa, sctp_assoc_t assoc_id,
+		       uint32_t vrf_id, int *error, void *p)
+{
+	struct sockaddr *addr_touse;
+#if defined(INET) && defined(INET6)
+	struct sockaddr_in sin;
+#endif
+#ifdef SCTP_MVRF
+	int i, fnd = 0;
+#endif
+
+	/* see if we're bound all already! */
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+		*error = EINVAL;
+		return;
+	}
+#ifdef SCTP_MVRF
+	/* Is the VRF one we have */
+	for (i = 0; i < inp->num_vrfs; i++) {
+		if (vrf_id == inp->m_vrf_ids[i]) {
+			fnd = 1;
+			break;
+		}
+	}
+	if (!fnd) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+		*error = EINVAL;
+		return;
+	}
+#endif
+	addr_touse = sa;
+#ifdef INET6
+	if (sa->sa_family == AF_INET6) {
+#ifdef INET
+		struct sockaddr_in6 *sin6;
+
+#endif
+#ifdef HAVE_SA_LEN
+		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+			*error = EINVAL;
+			return;
+		}
+#endif
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+			/* can only bind v6 on PF_INET6 sockets */
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+			*error = EINVAL;
+			return;
+		}
+#ifdef INET
+		sin6 = (struct sockaddr_in6 *)addr_touse;
+		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+			    SCTP_IPV6_V6ONLY(inp)) {
+				/* can't bind v4-mapped on PF_INET sockets */
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+				*error = EINVAL;
+				return;
+			}
+			in6_sin6_2_sin(&sin, sin6);
+			addr_touse = (struct sockaddr *)&sin;
+		}
+#endif
+	}
+#endif
+#ifdef INET
+	if (sa->sa_family == AF_INET) {
+#ifdef HAVE_SA_LEN
+		if (sa->sa_len != sizeof(struct sockaddr_in)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+			*error = EINVAL;
+			return;
+		}
+#endif
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+		    SCTP_IPV6_V6ONLY(inp)) {
+			/* can't bind v4 on PF_INET sockets */
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+			*error = EINVAL;
+			return;
+		}
+	}
+#endif
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
+#if !(defined(__Panda__) || defined(__Windows__) || defined(__Userspace__))
+		if (p == NULL) {
+			/* Can't get proc for Net/Open BSD */
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+			*error = EINVAL;
+			return;
+		}
+#endif
+		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
+		return;
+	}
+	/*
+	 * No locks required here since bind and mgmt_ep_sa
+	 * all do their own locking. If we do something for
+	 * the FIX: below we may need to lock in that case.
+	 */
+	if (assoc_id == 0) {
+		/* add the address */
+		struct sctp_inpcb *lep;
+		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
+
+		/* validate the incoming port */
+		if ((lsin->sin_port != 0) &&
+		    (lsin->sin_port != inp->sctp_lport)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+			*error = EINVAL;
+			return;
+		} else {
+			/* user specified 0 port, set it to existing port */
+			lsin->sin_port = inp->sctp_lport;
+		}
+
+		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
+		if (lep != NULL) {
+			/*
+			 * We must decrement the refcount
+			 * since we have the ep already and
+			 * are binding. No remove going on
+			 * here.
+			 */
+			SCTP_INP_DECR_REF(lep);
+		}
+		if (lep == inp) {
+			/* already bound to it.. ok */
+			return;
+		} else if (lep == NULL) {
+			((struct sockaddr_in *)addr_touse)->sin_port = 0;
+			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
+						      SCTP_ADD_IP_ADDRESS,
+						      vrf_id, NULL);
+		} else {
+			*error = EADDRINUSE;
+		}
+		if (*error)
+			return;
+	} else {
+		/*
+		 * FIX: decide whether we allow assoc based
+		 * bindx
+		 */
+	}
+}
+
+/*
+ * sctp_bindx(DELETE) for one address.
+ * assumes all arguments are valid/checked by caller.
+ */
+void
+sctp_bindx_delete_address(struct sctp_inpcb *inp,
+			  struct sockaddr *sa, sctp_assoc_t assoc_id,
+			  uint32_t vrf_id, int *error)
+{
+	struct sockaddr *addr_touse;
+#if defined(INET) && defined(INET6)
+	struct sockaddr_in sin;
+#endif
+#ifdef SCTP_MVRF
+	int i, fnd = 0;
+#endif
+
+	/* see if we're bound all already! */
+	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+		*error = EINVAL;
+		return;
+	}
+#ifdef SCTP_MVRF
+	/* Is the VRF one we have */
+	for (i = 0; i < inp->num_vrfs; i++) {
+		if (vrf_id == inp->m_vrf_ids[i]) {
+			fnd = 1;
+			break;
+		}
+	}
+	if (!fnd) {
+		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+		*error = EINVAL;
+		return;
+	}
+#endif
+	addr_touse = sa;
+#ifdef INET6
+	if (sa->sa_family == AF_INET6) {
+#ifdef INET
+		struct sockaddr_in6 *sin6;
+#endif
+
+#ifdef HAVE_SA_LEN
+		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+			*error = EINVAL;
+			return;
+		}
+#endif
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
+			/* can only bind v6 on PF_INET6 sockets */
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+			*error = EINVAL;
+			return;
+		}
+#ifdef INET
+		sin6 = (struct sockaddr_in6 *)addr_touse;
+		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
+			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+			    SCTP_IPV6_V6ONLY(inp)) {
+				/* can't bind mapped-v4 on PF_INET sockets */
+				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+				*error = EINVAL;
+				return;
+			}
+			in6_sin6_2_sin(&sin, sin6);
+			addr_touse = (struct sockaddr *)&sin;
+		}
+#endif
+	}
+#endif
+#ifdef INET
+	if (sa->sa_family == AF_INET) {
+#ifdef HAVE_SA_LEN
+		if (sa->sa_len != sizeof(struct sockaddr_in)) {
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+			*error = EINVAL;
+			return;
+		}
+#endif
+		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
+		    SCTP_IPV6_V6ONLY(inp)) {
+			/* can't bind v4 on PF_INET sockets */
+			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
+			*error = EINVAL;
+			return;
+		}
+	}
+#endif
+	/*
+	 * No lock required mgmt_ep_sa does its own locking.
+	 * If the FIX: below is ever changed we may need to
+	 * lock before calling association level binding.
+	 */
+	if (assoc_id == 0) {
+		/* delete the address */
+		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
+					      SCTP_DEL_IP_ADDRESS,
+					      vrf_id, NULL);
+	} else {
+		/*
+		 * FIX: decide whether we allow assoc based
+		 * bindx
+		 */
+	}
+}
+
+/*
+ * returns the valid local address count for an assoc, taking into account
+ * all scoping rules
+ */
+int
+sctp_local_addr_count(struct sctp_tcb *stcb)
+{
+	int loopback_scope;
+#if defined(INET)
+	int ipv4_local_scope, ipv4_addr_legal;
+#endif
+#if defined (INET6)
+	int local_scope, site_scope, ipv6_addr_legal;
+#endif
+#if defined(__Userspace__)
+	int conn_addr_legal;
+#endif
+	struct sctp_vrf *vrf;
+	struct sctp_ifn *sctp_ifn;
+	struct sctp_ifa *sctp_ifa;
+	int count = 0;
+
+	/* Turn on all the appropriate scopes */
+	loopback_scope = stcb->asoc.scope.loopback_scope;
+#if defined(INET)
+	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
+	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
+#endif
+#if defined(INET6)
+	local_scope = stcb->asoc.scope.local_scope;
+	site_scope = stcb->asoc.scope.site_scope;
+	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
+#endif
+#if defined(__Userspace__)
+	conn_addr_legal = stcb->asoc.scope.conn_addr_legal;
+#endif
+	SCTP_IPI_ADDR_RLOCK();
+	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
+	if (vrf == NULL) {
+		/* no vrf, no addresses */
+		SCTP_IPI_ADDR_RUNLOCK();
+		return (0);
+	}
+
+	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
+		/*
+		 * bound all case: go through all ifns on the vrf
+		 */
+		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
+			if ((loopback_scope == 0) &&
+			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
+				continue;
+			}
+			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
+				if (sctp_is_addr_restricted(stcb, sctp_ifa))
+					continue;
+				switch (sctp_ifa->address.sa.sa_family) {
+#ifdef INET
+				case AF_INET:
+					if (ipv4_addr_legal) {
+						struct sockaddr_in *sin;
+
+						sin = &sctp_ifa->address.sin;
+						if (sin->sin_addr.s_addr == 0) {
+							/* skip unspecified addrs */
+							continue;
+						}
+#if defined(__FreeBSD__)
+						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
+						                     &sin->sin_addr) != 0) {
+							continue;
+						}
+#endif
+						if ((ipv4_local_scope == 0) &&
+						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
+							continue;
+						}
+						/* count this one */
+						count++;
+					} else {
+						continue;
+					}
+					break;
+#endif
+#ifdef INET6
+				case AF_INET6:
+					if (ipv6_addr_legal) {
+						struct sockaddr_in6 *sin6;
+
+#if defined(SCTP_EMBEDDED_V6_SCOPE) && !defined(SCTP_KAME)
+						struct sockaddr_in6 lsa6;
+#endif
+						sin6 = &sctp_ifa->address.sin6;
+						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
+							continue;
+						}
+#if defined(__FreeBSD__)
+						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
+						                     &sin6->sin6_addr) != 0) {
+							continue;
+						}
+#endif
+						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
+							if (local_scope == 0)
+								continue;
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+							if (sin6->sin6_scope_id == 0) {
+#ifdef SCTP_KAME
+								if (sa6_recoverscope(sin6) != 0)
+									/*
+									 * bad link
+									 * local
+									 * address
+									 */
+									continue;
+#else
+								lsa6 = *sin6;
+								if (in6_recoverscope(&lsa6,
+								                     &lsa6.sin6_addr,
+								                     NULL))
+									/*
+									 * bad link
+									 * local
+									 * address
+									 */
+									continue;
+								sin6 = &lsa6;
+#endif /* SCTP_KAME */
+							}
+#endif /* SCTP_EMBEDDED_V6_SCOPE */
+						}
+						if ((site_scope == 0) &&
+						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
+							continue;
+						}
+						/* count this one */
+						count++;
+					}
+					break;
+#endif
+#if defined(__Userspace__)
+				case AF_CONN:
+					if (conn_addr_legal) {
+						count++;
+					}
+					break;
+#endif
+				default:
+					/* TSNH */
+					break;
+				}
+			}
+		}
+	} else {
+		/*
+		 * subset bound case
+		 */
+		struct sctp_laddr *laddr;
+		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
+			     sctp_nxt_addr) {
+			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
+				continue;
+			}
+			/* count this one */
+			count++;
+		}
+	}
+	SCTP_IPI_ADDR_RUNLOCK();
+	return (count);
+}
+
+#if defined(SCTP_LOCAL_TRACE_BUF)
+
+void
+sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
+{
+	uint32_t saveindex, newindex;
+
+#if defined(__Windows__)
+	if (SCTP_BASE_SYSCTL(sctp_log) == NULL) {
+		return;
+	}
+	do {
+		saveindex = SCTP_BASE_SYSCTL(sctp_log)->index;
+		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
+			newindex = 1;
+		} else {
+			newindex = saveindex + 1;
+		}
+	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log)->index, saveindex, newindex) == 0);
+	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
+		saveindex = 0;
+	}
+	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
+	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].subsys = subsys;
+	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[0] = a;
+	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[1] = b;
+	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[2] = c;
+	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[3] = d;
+	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[4] = e;
+	SCTP_BASE_SYSCTL(sctp_log)->entry[saveindex].params[5] = f;
+#else
+	do {
+		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
+		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
+			newindex = 1;
+		} else {
+			newindex = saveindex + 1;
+		}
+	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
+	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
+		saveindex = 0;
+	}
+	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
+	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
+	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
+	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
+	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
+	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
+	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
+	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
+#endif
+}
+
+#endif
+#if defined(__FreeBSD__)
+#if __FreeBSD_version >= 800044
+static void
+sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
+    const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
+{
+	struct ip *iph;
+#ifdef INET6
+	struct ip6_hdr *ip6;
+#endif
+	struct mbuf *sp, *last;
+	struct udphdr *uhdr;
+	uint16_t port;
+
+	if ((m->m_flags & M_PKTHDR) == 0) {
+		/* Can't handle one that is not a pkt hdr */
+		goto out;
+	}
+	/* Pull the src port */
+	iph = mtod(m, struct ip *);
+	uhdr = (struct udphdr *)((caddr_t)iph + off);
+	port = uhdr->uh_sport;
+	/* Split out the mbuf chain. Leave the
+	 * IP header in m, place the
+	 * rest in the sp.
+	 */
+	sp = m_split(m, off, M_NOWAIT);
+	if (sp == NULL) {
+		/* Gak, drop packet, we can't do a split */
+		goto out;
+	}
+	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
+		/* Gak, packet can't have an SCTP header in it - too small */
+		m_freem(sp);
+		goto out;
+	}
+	/* Now pull up the UDP header and SCTP header together */
+	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
+	if (sp == NULL) {
+		/* Gak pullup failed */
+		goto out;
+	}
+	/* Trim out the UDP header */
+	m_adj(sp, sizeof(struct udphdr));
+
+	/* Now reconstruct the mbuf chain */
+	for (last = m; last->m_next; last = last->m_next);
+	last->m_next = sp;
+	m->m_pkthdr.len += sp->m_pkthdr.len;
+	/*
+	 * The CSUM_DATA_VALID flags indicates that the HW checked the
+	 * UDP checksum and it was valid.
+	 * Since CSUM_DATA_VALID == CSUM_SCTP_VALID this would imply that
+	 * the HW also verified the SCTP checksum. Therefore, clear the bit.
+	 */
+#if __FreeBSD_version > 1000049
+	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
+	        "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%b.\n",
+	        m->m_pkthdr.len,
+	        if_name(m->m_pkthdr.rcvif),
+	        (int)m->m_pkthdr.csum_flags, CSUM_BITS);
+#else
+	SCTPDBG(SCTP_DEBUG_CRCOFFLOAD,
+	        "sctp_recv_udp_tunneled_packet(): Packet of length %d received on %s with csum_flags 0x%x.\n",
+	        m->m_pkthdr.len,
+	        if_name(m->m_pkthdr.rcvif),
+	        m->m_pkthdr.csum_flags);
+#endif
+	m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
+	iph = mtod(m, struct ip *);
+	switch (iph->ip_v) {
+#ifdef INET
+	case IPVERSION:
+#if __FreeBSD_version >= 1000000
+		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
+#else
+		iph->ip_len -= sizeof(struct udphdr);
+#endif
+		sctp_input_with_port(m, off, port);
+		break;
+#endif
+#ifdef INET6
+	case IPV6_VERSION >> 4:
+		ip6 = mtod(m, struct ip6_hdr *);
+		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
+		sctp6_input_with_port(&m, &off, port);
+		break;
+#endif
+	default:
+		goto out;
+		break;
+	}
+	return;
+ out:
+	m_freem(m);
+}
+#endif
+
+#if __FreeBSD_version >= 1100000
+#ifdef INET
+static void
+sctp_recv_icmp_tunneled_packet(int cmd, struct sockaddr *sa, void *vip, void *ctx SCTP_UNUSED)
+{
+	struct ip *outer_ip, *inner_ip;
+	struct sctphdr *sh;
+	struct icmp *icmp;
+	struct udphdr *udp;
+	struct sctp_inpcb *inp;
+	struct sctp_tcb *stcb;
+	struct sctp_nets *net;
+	struct sctp_init_chunk *ch;
+	struct sockaddr_in src, dst;
+	uint8_t type, code;
+
+	inner_ip = (struct ip *)vip;
+	icmp = (struct icmp *)((caddr_t)inner_ip -
+	    (sizeof(struct icmp) - sizeof(struct ip)));
+	outer_ip = (struct ip *)((caddr_t)icmp - sizeof(struct ip));
+	if (ntohs(outer_ip->ip_len) <
+	    sizeof(struct ip) + 8 + (inner_ip->ip_hl << 2) + sizeof(struct udphdr) + 8) {
+		return;
+	}
+	udp = (struct udphdr *)((caddr_t)inner_ip + (inner_ip->ip_hl << 2));
+	sh = (struct sctphdr *)(udp + 1);
+	memset(&src, 0, sizeof(struct sockaddr_in));
+	src.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+	src.sin_len = sizeof(struct sockaddr_in);
+#endif
+	src.sin_port = sh->src_port;
+	src.sin_addr = inner_ip->ip_src;
+	memset(&dst, 0, sizeof(struct sockaddr_in));
+	dst.sin_family = AF_INET;
+#ifdef HAVE_SIN_LEN
+	dst.sin_len = sizeof(struct sockaddr_in);
+#endif
+	dst.sin_port = sh->dest_port;
+	dst.sin_addr = inner_ip->ip_dst;
+	/*
+	 * 'dst' holds the dest of the packet that failed to be sent.
+	 * 'src' holds our local endpoint address. Thus we reverse
+	 * the dst and the src in the lookup.
+	 */
+	inp = NULL;
+	net = NULL;
+	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
+	                                    (struct sockaddr *)&src,
+	                                    &inp, &net, 1,
+	                                    SCTP_DEFAULT_VRFID);
+	if ((stcb != NULL) &&
+	    (net != NULL) &&
+	    (inp != NULL)) {
+		/* Check the UDP port numbers */
+		if ((udp->uh_dport != net->port) ||
+		    (udp->uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
+			SCTP_TCB_UNLOCK(stcb);
+			return;
+		}
+		/* Check the verification tag */
+		if (ntohl(sh->v_tag) != 0) {
+			/*
+			 * This must be the verification tag used
+			 * for sending out packets. We don't
+			 * consider packets reflecting the
+			 * verification tag.
+			 */
+			if (ntohl(sh->v_tag) != stcb->asoc.peer_vtag) {
+				SCTP_TCB_UNLOCK(stcb);
+				return;
+			}
+		} else {
+			if (ntohs(outer_ip->ip_len) >=
+			    sizeof(struct ip) +
+			    8 + (inner_ip->ip_hl << 2) + 8 + 20) {
+				/*
+				 * In this case we can check if we
+				 * got an INIT chunk and if the
+				 * initiate tag matches.
+				 */
+				ch = (struct sctp_init_chunk *)(sh + 1);
+				if ((ch->ch.chunk_type != SCTP_INITIATION) ||
+				    (ntohl(ch->init.initiate_tag) != stcb->asoc.my_vtag)) {
+					SCTP_TCB_UNLOCK(stcb);
+					return;
+				}
+			} else {
+				SCTP_TCB_UNLOCK(stcb);
+				return;
+			}
+		}
+		type = icmp->icmp_type;
+		code = icmp->icmp_code;
+		if ((type == ICMP_UNREACH) &&
+		    (code == ICMP_UNREACH_PORT)) {
+			code = ICMP_UNREACH_PROTOCOL;
+		}
+		sctp_notify(inp, stcb, net, type, code,
+		            ntohs(inner_ip->ip_len),
+		            ntohs(icmp->icmp_nextmtu));
+	} else {
+#if defined(__FreeBSD__) && __FreeBSD_version < 500000
+		/*
+		 * XXX must be fixed for 5.x and higher, leave for
+		 * 4.x
+		 */
+		if (PRC_IS_REDIRECT(cmd) && (inp != NULL)) {
+			in_rtchange((struct inpcb *)inp,
+			    inetctlerrmap[cmd]);
+		}
+#endif
+		if ((stcb == NULL) && (inp != NULL)) {
+			/* reduce ref-count */
+			SCTP_INP_WLOCK(inp);
+			SCTP_INP_DECR_REF(inp);
+			SCTP_INP_WUNLOCK(inp);
+		}
+		if (stcb) {
+			SCTP_TCB_UNLOCK(stcb);
+		}
+	}
+	return;
+}
+#endif
+
+#ifdef INET6
+static void
+sctp_recv_icmp6_tunneled_packet(int cmd, struct sockaddr *sa, void *d, void *ctx SCTP_UNUSED)
+{
+	struct ip6ctlparam *ip6cp;
+	struct sctp_inpcb *inp;
+	struct sctp_tcb *stcb;
+	struct sctp_nets *net;
+	struct sctphdr sh;
+	struct udphdr udp;
+	struct sockaddr_in6 src, dst;
+	uint8_t type, code;
+
+	ip6cp = (struct ip6ctlparam *)d;
+	/*
+	 * XXX: We assume that when IPV6 is non NULL, M and OFF are
+	 * valid.
+	 */
+	if (ip6cp->ip6c_m == NULL) {
+		return;
+	}
+	/* Check if we can safely examine the ports and the
+	 * verification tag of the SCTP common header.
+	 */
+	if (ip6cp->ip6c_m->m_pkthdr.len <
+	    ip6cp->ip6c_off + sizeof(struct udphdr)+ offsetof(struct sctphdr, checksum)) {
+		return;
+	}
+	/* Copy out the UDP header. */
+	memset(&udp, 0, sizeof(struct udphdr));
+	m_copydata(ip6cp->ip6c_m,
+		   ip6cp->ip6c_off,
+		   sizeof(struct udphdr),
+		   (caddr_t)&udp);
+	/* Copy out the port numbers and the verification tag. */
+	memset(&sh, 0, sizeof(struct sctphdr));
+	m_copydata(ip6cp->ip6c_m,
+		   ip6cp->ip6c_off + sizeof(struct udphdr),
+		   sizeof(uint16_t) + sizeof(uint16_t) + sizeof(uint32_t),
+		   (caddr_t)&sh);
+	memset(&src, 0, sizeof(struct sockaddr_in6));
+	src.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+	src.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+	src.sin6_port = sh.src_port;
+	src.sin6_addr = ip6cp->ip6c_ip6->ip6_src;
+#if defined(__FreeBSD__)
+	if (in6_setscope(&src.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
+		return;
+	}
+#endif
+	memset(&dst, 0, sizeof(struct sockaddr_in6));
+	dst.sin6_family = AF_INET6;
+#ifdef HAVE_SIN6_LEN
+	dst.sin6_len = sizeof(struct sockaddr_in6);
+#endif
+	dst.sin6_port = sh.dest_port;
+	dst.sin6_addr = ip6cp->ip6c_ip6->ip6_dst;
+#if defined(__FreeBSD__)
+	if (in6_setscope(&dst.sin6_addr, ip6cp->ip6c_m->m_pkthdr.rcvif, NULL) != 0) {
+		return;
+	}
+#endif
+	inp = NULL;
+	net = NULL;
+	stcb = sctp_findassociation_addr_sa((struct sockaddr *)&dst,
+	                                    (struct sockaddr *)&src,
+	                                    &inp, &net, 1, SCTP_DEFAULT_VRFID);
+	if ((stcb != NULL) &&
+	    (net != NULL) &&
+	    (inp != NULL)) {
+		/* Check the UDP port numbers */
+		if ((udp.uh_dport != net->port) ||
+		    (udp.uh_sport != htons(SCTP_BASE_SYSCTL(sctp_udp_tunneling_port)))) {
+			SCTP_TCB_UNLOCK(stcb);
+			return;
+		}
+		/* Check the verification tag */
+		if (ntohl(sh.v_tag) != 0) {
+			/*
+			 * This must be the verification tag used for
+			 * sending out packets. We don't consider
+			 * packets reflecting the verification tag.
+			 */
+			if (ntohl(sh.v_tag) != stcb->asoc.peer_vtag) {
+				SCTP_TCB_UNLOCK(stcb);
+				return;
+			}
+		} else {
+#if defined(__FreeBSD__)
+			if (ip6cp->ip6c_m->m_pkthdr.len >=
+			    ip6cp->ip6c_off + sizeof(struct udphdr) +
+			                      sizeof(struct sctphdr) +
+			                      sizeof(struct sctp_chunkhdr) +
+			                      offsetof(struct sctp_init, a_rwnd)) {
+				/*
+				 * In this case we can check if we
+				 * got an INIT chunk and if the
+				 * initiate tag matches.
+				 */
+				uint32_t initiate_tag;
+				uint8_t chunk_type;
+
+				m_copydata(ip6cp->ip6c_m,
+					   ip6cp->ip6c_off +
+					   sizeof(struct udphdr) +
+					   sizeof(struct sctphdr),
+					   sizeof(uint8_t),
+					   (caddr_t)&chunk_type);
+				m_copydata(ip6cp->ip6c_m,
+					   ip6cp->ip6c_off +
+					   sizeof(struct udphdr) +
+					   sizeof(struct sctphdr) +
+					   sizeof(struct sctp_chunkhdr),
+					   sizeof(uint32_t),
+					   (caddr_t)&initiate_tag);
+				if ((chunk_type != SCTP_INITIATION) ||
+				    (ntohl(initiate_tag) != stcb->asoc.my_vtag)) {
+					SCTP_TCB_UNLOCK(stcb);
+					return;
+				}
+			} else {
+				SCTP_TCB_UNLOCK(stcb);
+				return;
+			}
+#else
+			SCTP_TCB_UNLOCK(stcb);
+			return;
+#endif
+		}
+		type = ip6cp->ip6c_icmp6->icmp6_type;
+		code = ip6cp->ip6c_icmp6->icmp6_code;
+		if ((type == ICMP6_DST_UNREACH) &&
+		    (code == ICMP6_DST_UNREACH_NOPORT)) {
+			type = ICMP6_PARAM_PROB;
+			code = ICMP6_PARAMPROB_NEXTHEADER;
+		}
+		sctp6_notify(inp, stcb, net, type, code,
+			     (uint16_t)ntohl(ip6cp->ip6c_icmp6->icmp6_mtu));
+	} else {
+#if defined(__FreeBSD__) && __FreeBSD_version < 500000
+		if (PRC_IS_REDIRECT(cmd) && (inp != NULL)) {
+			in6_rtchange((struct in6pcb *)inp,
+			    inet6ctlerrmap[cmd]);
+		}
+#endif
+		if ((stcb == NULL) && (inp != NULL)) {
+			/* reduce inp's ref-count */
+			SCTP_INP_WLOCK(inp);
+			SCTP_INP_DECR_REF(inp);
+			SCTP_INP_WUNLOCK(inp);
+		}
+		if (stcb) {
+			SCTP_TCB_UNLOCK(stcb);
+		}
+	}
+}
+#endif
+#endif
+
+void
+sctp_over_udp_stop(void)
+{
+	/*
+	 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
+	 */
+#ifdef INET
+	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
+		soclose(SCTP_BASE_INFO(udp4_tun_socket));
+		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
+	}
+#endif
+#ifdef INET6
+	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
+		soclose(SCTP_BASE_INFO(udp6_tun_socket));
+		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
+	}
+#endif
+}
+
+int
+sctp_over_udp_start(void)
+{
+#if __FreeBSD_version >= 800044
+	uint16_t port;
+	int ret;
+#ifdef INET
+	struct sockaddr_in sin;
+#endif
+#ifdef INET6
+	struct sockaddr_in6 sin6;
+#endif
+	/*
+	 * This function assumes sysctl caller holds sctp_sysctl_info_lock() for writting!
+	 */
+	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
+	if (ntohs(port) == 0) {
+		/* Must have a port set */
+		return (EINVAL);
+	}
+#ifdef INET
+	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
+		/* Already running -- must stop first */
+		return (EALREADY);
+	}
+#endif
+#ifdef INET6
+	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
+		/* Already running -- must stop first */
+		return (EALREADY);
+	}
+#endif
+#ifdef INET
+	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
+	                    SOCK_DGRAM, IPPROTO_UDP,
+	                    curthread->td_ucred, curthread))) {
+		sctp_over_udp_stop();
+		return (ret);
+	}
+	/* Call the special UDP hook. */
+	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
+	                                    sctp_recv_udp_tunneled_packet,
+#if __FreeBSD_version >= 1100000
+	                                    sctp_recv_icmp_tunneled_packet,
+#endif
+	                                    NULL))) {
+		sctp_over_udp_stop();
+		return (ret);
+	}
+	/* Ok, we have a socket, bind it to the port. */
+	memset(&sin, 0, sizeof(struct sockaddr_in));
+	sin.sin_len = sizeof(struct sockaddr_in);
+	sin.sin_family = AF_INET;
+	sin.sin_port = htons(port);
+	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
+	                  (struct sockaddr *)&sin, curthread))) {
+		sctp_over_udp_stop();
+		return (ret);
+	}
+#endif
+#ifdef INET6
+	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
+	                    SOCK_DGRAM, IPPROTO_UDP,
+	                    curthread->td_ucred, curthread))) {
+		sctp_over_udp_stop();
+		return (ret);
+	}
+	/* Call the special UDP hook. */
+	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
+	                                    sctp_recv_udp_tunneled_packet,
+#if __FreeBSD_version >= 1100000
+	                                    sctp_recv_icmp6_tunneled_packet,
+#endif
+	                                    NULL))) {
+		sctp_over_udp_stop();
+		return (ret);
+	}
+	/* Ok, we have a socket, bind it to the port. */
+	memset(&sin6, 0, sizeof(struct sockaddr_in6));
+	sin6.sin6_len = sizeof(struct sockaddr_in6);
+	sin6.sin6_family = AF_INET6;
+	sin6.sin6_port = htons(port);
+	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
+	                  (struct sockaddr *)&sin6, curthread))) {
+		sctp_over_udp_stop();
+		return (ret);
+	}
+#endif
+	return (0);
+#else
+	return (ENOTSUP);
+#endif
+}
+#endif
diff --git a/usrsctplib/netinet/sctputil.h b/usrsctplib/netinet/sctputil.h
new file mode 100755
index 0000000..7746d54
--- /dev/null
+++ b/usrsctplib/netinet/sctputil.h
@@ -0,0 +1,423 @@
+/*-
+ * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
+ * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Cisco Systems, Inc. nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __FreeBSD__
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: head/sys/netinet/sctputil.h 310590 2016-12-26 11:06:41Z tuexen $");
+#endif
+
+#ifndef _NETINET_SCTP_UTIL_H_
+#define _NETINET_SCTP_UTIL_H_
+
+#if defined(_KERNEL) || defined(__Userspace__)
+
+#define SCTP_READ_LOCK_HELD 1
+#define SCTP_READ_LOCK_NOT_HELD 0
+
+#ifdef SCTP_ASOCLOG_OF_TSNS
+void sctp_print_out_track_log(struct sctp_tcb *stcb);
+#endif
+
+#ifdef SCTP_MBUF_LOGGING
+struct mbuf *sctp_m_free(struct mbuf *m);
+void sctp_m_freem(struct mbuf *m);
+#else
+#define sctp_m_free m_free
+#define sctp_m_freem m_freem
+#endif
+
+#if defined(SCTP_LOCAL_TRACE_BUF) || defined(__APPLE__)
+void
+sctp_log_trace(uint32_t fr, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f);
+#endif
+
+#define sctp_get_associd(stcb) ((sctp_assoc_t)stcb->asoc.assoc_id)
+
+
+/*
+ * Function prototypes
+ */
+int32_t
+sctp_map_assoc_state(int);
+
+uint32_t
+sctp_get_ifa_hash_val(struct sockaddr *addr);
+
+struct sctp_ifa *
+sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, int hold_lock);
+
+struct sctp_ifa *
+sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock);
+
+uint32_t sctp_select_initial_TSN(struct sctp_pcb *);
+
+uint32_t sctp_select_a_tag(struct sctp_inpcb *, uint16_t lport, uint16_t rport, int);
+
+int sctp_init_asoc(struct sctp_inpcb *, struct sctp_tcb *, uint32_t, uint32_t, uint16_t);
+
+void sctp_fill_random_store(struct sctp_pcb *);
+
+void
+sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin,
+			     uint16_t numberout, int flag);
+void
+sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag);
+
+void
+sctp_timer_start(int, struct sctp_inpcb *, struct sctp_tcb *,
+    struct sctp_nets *);
+
+void
+sctp_timer_stop(int, struct sctp_inpcb *, struct sctp_tcb *,
+    struct sctp_nets *, uint32_t);
+
+int
+sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id);
+
+void
+sctp_mtu_size_reset(struct sctp_inpcb *, struct sctp_association *, uint32_t);
+
+void
+sctp_wakeup_the_read_socket(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
+    int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+);
+
+#if defined(__Userspace__)
+void sctp_invoke_recv_callback(struct sctp_inpcb *,
+    struct sctp_tcb *,
+    struct sctp_queued_to_read *,
+    int);
+
+#endif
+void
+sctp_add_to_readq(struct sctp_inpcb *inp,
+    struct sctp_tcb *stcb,
+    struct sctp_queued_to_read *control,
+    struct sockbuf *sb,
+    int end,
+    int inpread_locked,
+    int so_locked
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+    );
+
+void sctp_iterator_worker(void);
+
+uint32_t sctp_get_prev_mtu(uint32_t);
+uint32_t sctp_get_next_mtu(uint32_t);
+
+void
+sctp_timeout_handler(void *);
+
+uint32_t
+sctp_calculate_rto(struct sctp_tcb *, struct sctp_association *,
+    struct sctp_nets *, struct timeval *, int, int);
+
+uint32_t sctp_calculate_len(struct mbuf *);
+
+caddr_t sctp_m_getptr(struct mbuf *, int, int, uint8_t *);
+
+struct sctp_paramhdr *
+sctp_get_next_param(struct mbuf *, int,
+    struct sctp_paramhdr *, int);
+
+struct mbuf *
+sctp_add_pad_tombuf(struct mbuf *, int);
+
+struct mbuf *
+sctp_pad_lastmbuf(struct mbuf *, int, struct mbuf *);
+
+void sctp_ulp_notify(uint32_t, struct sctp_tcb *, uint32_t, void *, int
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+    );
+
+void
+sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
+    struct sctp_inpcb *new_inp,
+    struct sctp_tcb *stcb, int waitflags);
+
+
+void sctp_stop_timers_for_shutdown(struct sctp_tcb *);
+
+void sctp_report_all_outbound(struct sctp_tcb *, uint16_t, int, int
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+    );
+
+int sctp_expand_mapping_array(struct sctp_association *, uint32_t);
+
+void sctp_abort_notification(struct sctp_tcb *, uint8_t, uint16_t,
+			     struct sctp_abort_chunk *, int
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+    );
+
+/* We abort responding to an IP packet for some reason */
+void
+sctp_abort_association(struct sctp_inpcb *, struct sctp_tcb *, struct mbuf *,
+                       int, struct sockaddr *, struct sockaddr *,
+                       struct sctphdr *, struct mbuf *,
+#if defined(__FreeBSD__)
+                       uint8_t, uint32_t,
+#endif
+                       uint32_t, uint16_t);
+
+
+/* We choose to abort via user input */
+void
+sctp_abort_an_association(struct sctp_inpcb *, struct sctp_tcb *,
+    struct mbuf *, int
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+);
+
+void sctp_handle_ootb(struct mbuf *, int, int,
+                      struct sockaddr *, struct sockaddr *,
+                      struct sctphdr *, struct sctp_inpcb *,
+                      struct mbuf *,
+#if defined(__FreeBSD__)
+                      uint8_t, uint32_t, uint16_t,
+#endif
+                      uint32_t, uint16_t);
+
+int sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
+    int totaddr, int *error);
+
+struct sctp_tcb *
+sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
+    unsigned int *totaddr, unsigned int *num_v4, unsigned int *num_v6,
+    int *error, unsigned int limit, int *bad_addr);
+
+int sctp_is_there_an_abort_here(struct mbuf *, int, uint32_t *);
+#ifdef INET6
+uint32_t sctp_is_same_scope(struct sockaddr_in6 *, struct sockaddr_in6 *);
+
+#if defined(SCTP_EMBEDDED_V6_SCOPE)
+struct sockaddr_in6 *
+sctp_recover_scope(struct sockaddr_in6 *, struct sockaddr_in6 *);
+
+#ifdef SCTP_KAME
+#define sctp_recover_scope_mac(addr, store) do { \
+	 if ((addr->sin6_family == AF_INET6) && \
+	     (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr))) { \
+		*store = *addr; \
+		if (addr->sin6_scope_id == 0) { \
+			if (!sa6_recoverscope(store)) { \
+				addr = store; \
+			} \
+		} else { \
+			in6_clearscope(&addr->sin6_addr); \
+			addr = store; \
+		} \
+	 } \
+} while (0)
+#else
+#define sctp_recover_scope_mac(addr, store) do { \
+	if ((addr->sin6_family == AF_INET6) && \
+	    (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr))) { \
+		*store = *addr; \
+		if (addr->sin6_scope_id == 0) { \
+			if (!in6_recoverscope(store, &store->sin6_addr, \
+					      NULL)) { \
+				addr = store; \
+			} \
+		} else { \
+			in6_clearscope(&addr->sin6_addr); \
+			addr = store; \
+		} \
+	} \
+} while (0)
+#endif
+#endif
+#endif
+
+int sctp_cmpaddr(struct sockaddr *, struct sockaddr *);
+
+void sctp_print_address(struct sockaddr *);
+
+int
+sctp_release_pr_sctp_chunk(struct sctp_tcb *, struct sctp_tmit_chunk *,
+    uint8_t, int
+#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
+    SCTP_UNUSED
+#endif
+);
+
+struct mbuf *sctp_generate_cause(uint16_t, char *);
+struct mbuf *sctp_generate_no_user_data_cause(uint32_t);
+
+void sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
+			    struct sockaddr *sa, sctp_assoc_t assoc_id,
+			    uint32_t vrf_id, int *error, void *p);
+void sctp_bindx_delete_address(struct sctp_inpcb *inp,
+			       struct sockaddr *sa, sctp_assoc_t assoc_id,
+			       uint32_t vrf_id, int *error);
+
+int sctp_local_addr_count(struct sctp_tcb *stcb);
+
+#ifdef SCTP_MBCNT_LOGGING
+void
+sctp_free_bufspace(struct sctp_tcb *, struct sctp_association *,
+    struct sctp_tmit_chunk *, int);
+
+#else
+#define sctp_free_bufspace(stcb, asoc, tp1, chk_cnt)  \
+do { \
+	if (tp1->data != NULL) { \
+		atomic_subtract_int(&((asoc)->chunks_on_out_queue), chk_cnt); \
+		if ((asoc)->total_output_queue_size >= tp1->book_size) { \
+			atomic_subtract_int(&((asoc)->total_output_queue_size), tp1->book_size); \
+		} else { \
+			(asoc)->total_output_queue_size = 0; \
+		} \
+		if (stcb->sctp_socket && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || \
+		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { \
+			if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { \
+				atomic_subtract_int(&((stcb)->sctp_socket->so_snd.sb_cc), tp1->book_size); \
+			} else { \
+				stcb->sctp_socket->so_snd.sb_cc = 0; \
+			} \
+		} \
+	} \
+} while (0)
+
+#endif
+
+#define sctp_free_spbufspace(stcb, asoc, sp)  \
+do { \
+	if (sp->data != NULL) { \
+		if ((asoc)->total_output_queue_size >= sp->length) { \
+			atomic_subtract_int(&(asoc)->total_output_queue_size, sp->length); \
+		} else { \
+			(asoc)->total_output_queue_size = 0; \
+		} \
+		if (stcb->sctp_socket && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || \
+		    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { \
+			if (stcb->sctp_socket->so_snd.sb_cc >= sp->length) { \
+				atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc,sp->length); \
+			} else { \
+				stcb->sctp_socket->so_snd.sb_cc = 0; \
+			} \
+		} \
+	} \
+} while (0)
+
+#define sctp_snd_sb_alloc(stcb, sz)  \
+do { \
+	atomic_add_int(&stcb->asoc.total_output_queue_size,sz); \
+	if ((stcb->sctp_socket != NULL) && \
+	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || \
+	     (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { \
+		atomic_add_int(&stcb->sctp_socket->so_snd.sb_cc,sz); \
+	} \
+} while (0)
+
+/* functions to start/stop udp tunneling */
+#if defined(__APPLE__) || defined(__FreeBSD__)
+void sctp_over_udp_stop(void);
+int sctp_over_udp_start(void);
+#endif
+#if defined(__Windows__)
+void sctp_over_udp_restart(void);
+#endif
+
+int
+sctp_soreceive(struct socket *so, struct sockaddr **psa,
+    struct uio *uio,
+    struct mbuf **mp0,
+    struct mbuf **controlp,
+    int *flagsp);
+
+void
+sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d);
+
+void
+sctp_wakeup_log(struct sctp_tcb *stcb,
+    uint32_t wake_cnt, int from);
+
+void sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t, uint16_t, uint16_t, int);
+
+void sctp_log_nagle_event(struct sctp_tcb *stcb, int action);
+
+
+#ifdef SCTP_MBUF_LOGGING
+void
+sctp_log_mb(struct mbuf *m, int from);
+
+void
+sctp_log_mbc(struct mbuf *m, int from);
+#endif
+
+void
+sctp_sblog(struct sockbuf *sb,
+    struct sctp_tcb *stcb, int from, int incr);
+
+void
+sctp_log_strm_del(struct sctp_queued_to_read *control,
+    struct sctp_queued_to_read *poschk,
+    int from);
+void sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *, int, uint8_t);
+void rto_logging(struct sctp_nets *net, int from);
+
+void sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc);
+
+void sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from);
+void sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *, int, int, uint8_t);
+void sctp_log_block(uint8_t, struct sctp_association *, size_t);
+void sctp_log_rwnd(uint8_t, uint32_t, uint32_t, uint32_t);
+void sctp_log_rwnd_set(uint8_t, uint32_t, uint32_t, uint32_t, uint32_t);
+int sctp_fill_stat_log(void *, size_t *);
+void sctp_log_fr(uint32_t, uint32_t, uint32_t, int);
+void sctp_log_sack(uint32_t, uint32_t, uint32_t, uint16_t, uint16_t, int);
+void sctp_log_map(uint32_t, uint32_t, uint32_t, int);
+void sctp_print_mapping_array(struct sctp_association *asoc);
+void sctp_clr_stat_log(void);
+
+
+#ifdef SCTP_AUDITING_ENABLED
+void
+sctp_auditing(int, struct sctp_inpcb *, struct sctp_tcb *,
+    struct sctp_nets *);
+void sctp_audit_log(uint8_t, uint8_t);
+
+#endif
+#endif				/* _KERNEL */
+#endif