__u32 highest_new_tsn,
                              int count_of_newacks);
 
-static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
-
 static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
 
 /* Add data to the front of the queue. */
         * following the procedures outlined in C1 - C5.
         */
        if (reason == SCTP_RTXR_T3_RTX)
-               sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
+               q->asoc->stream.si->generate_ftsn(q, q->asoc->ctsn_ack_point);
 
        /* Flush the queues only on timeout, since fast_rtx is only
         * triggered during sack processing and the queue
                case SCTP_CID_ECN_ECNE:
                case SCTP_CID_ASCONF:
                case SCTP_CID_FWD_TSN:
+               case SCTP_CID_I_FWD_TSN:
                case SCTP_CID_RECONF:
                        status = sctp_packet_transmit_chunk(packet, chunk,
                                                            one_packet, gfp);
                         * sender MUST assure that at least one T3-rtx
                         * timer is running.
                         */
-                       if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
+                       if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN ||
+                           chunk->chunk_hdr->type == SCTP_CID_I_FWD_TSN) {
                                sctp_transport_reset_t3_rtx(transport);
                                transport->last_time_sent = jiffies;
                        }
 
        asoc->peer.rwnd = sack_a_rwnd;
 
-       sctp_generate_fwdtsn(q, sack_ctsn);
+       asoc->stream.si->generate_ftsn(q, sack_ctsn);
 
        pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn);
        pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
 }
 
 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */
-static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
+void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
 {
        struct sctp_association *asoc = q->asoc;
        struct sctp_chunk *ftsn_chunk = NULL;
 
        sctp_ulpq_flush(ulpq);
 }
 
+static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
+                                   int nskips, __be16 stream, __u8 flags)
+{
+       int i;
+
+       for (i = 0; i < nskips; i++)
+               if (skiplist[i].stream == stream &&
+                   skiplist[i].flags == flags)
+                       return i;
+
+       return i;
+}
+
+#define SCTP_FTSN_U_BIT        0x1
+static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
+{
+       struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
+       struct sctp_association *asoc = q->asoc;
+       struct sctp_chunk *ftsn_chunk = NULL;
+       struct list_head *lchunk, *temp;
+       int nskips = 0, skip_pos;
+       struct sctp_chunk *chunk;
+       __u32 tsn;
+
+       if (!asoc->peer.prsctp_capable)
+               return;
+
+       if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
+               asoc->adv_peer_ack_point = ctsn;
+
+       list_for_each_safe(lchunk, temp, &q->abandoned) {
+               chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
+               tsn = ntohl(chunk->subh.data_hdr->tsn);
+
+               if (TSN_lte(tsn, ctsn)) {
+                       list_del_init(lchunk);
+                       sctp_chunk_free(chunk);
+               } else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
+                       __be16 sid = chunk->subh.idata_hdr->stream;
+                       __be32 mid = chunk->subh.idata_hdr->mid;
+                       __u8 flags = 0;
+
+                       if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+                               flags |= SCTP_FTSN_U_BIT;
+
+                       asoc->adv_peer_ack_point = tsn;
+                       skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
+                                                    sid, flags);
+                       ftsn_skip_arr[skip_pos].stream = sid;
+                       ftsn_skip_arr[skip_pos].reserved = 0;
+                       ftsn_skip_arr[skip_pos].flags = flags;
+                       ftsn_skip_arr[skip_pos].mid = mid;
+                       if (skip_pos == nskips)
+                               nskips++;
+                       if (nskips == 10)
+                               break;
+               } else {
+                       break;
+               }
+       }
+
+       if (asoc->adv_peer_ack_point > ctsn)
+               ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
+                                              nskips, &ftsn_skip_arr[0]);
+
+       if (ftsn_chunk) {
+               list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
+               SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
+       }
+}
+
 static struct sctp_stream_interleave sctp_stream_interleave_0 = {
        .data_chunk_len         = sizeof(struct sctp_data_chunk),
        /* DATA process functions */
        .renege_events          = sctp_ulpq_renege,
        .start_pd               = sctp_ulpq_partial_delivery,
        .abort_pd               = sctp_ulpq_abort_pd,
+       /* FORWARD-TSN process functions */
+       .generate_ftsn          = sctp_generate_fwdtsn,
 };
 
 static struct sctp_stream_interleave sctp_stream_interleave_1 = {
        .renege_events          = sctp_renege_events,
        .start_pd               = sctp_intl_start_pd,
        .abort_pd               = sctp_intl_abort_pd,
+       /* I-FORWARD-TSN process functions */
+       .generate_ftsn          = sctp_generate_iftsn,
 };
 
 void sctp_stream_interleave_init(struct sctp_stream *stream)