cfg_req->mh.num_entries = htons(
                bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
 
+       cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
        cfg_req->num_queue_sets = rx->num_paths;
        for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
                i < rx->num_paths;
                        /* Large/Single RxQ */
                        bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
                                                &q0->qpt);
-                       q0->buffer_size =
-                               bna_enet_mtu_get(&rx->bna->enet);
+                       if (q0->multi_buffer)
+                               /* multi-buffer is enabled by allocating
+                                * a new rx with new set of resources.
+                                * q0->buffer_size should be initialized to
+                                * fragment size.
+                                */
+                               cfg_req->rx_cfg.multi_buffer =
+                                       BNA_STATUS_T_ENABLED;
+                       else
+                               q0->buffer_size =
+                                       bna_enet_mtu_get(&rx->bna->enet);
                        cfg_req->q_cfg[i].ql.rx_buffer_size =
                                htons((u16)q0->buffer_size);
                        break;
        u32 hq_depth;
        u32 dq_depth;
 
-       dq_depth = q_cfg->q_depth;
-       hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
+       dq_depth = q_cfg->q0_depth;
+       hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
        cq_depth = dq_depth + hq_depth;
 
        BNA_TO_POWER_OF_2_HIGH(cq_depth);
        struct bna_rxq *q0;
        struct bna_rxq *q1;
        struct bna_intr_info *intr_info;
-       u32 page_count;
+       struct bna_mem_descr *hqunmap_mem;
+       struct bna_mem_descr *dqunmap_mem;
        struct bna_mem_descr *ccb_mem;
        struct bna_mem_descr *rcb_mem;
-       struct bna_mem_descr *unmapq_mem;
        struct bna_mem_descr *cqpt_mem;
        struct bna_mem_descr *cswqpt_mem;
        struct bna_mem_descr *cpage_mem;
        struct bna_mem_descr *dsqpt_mem;
        struct bna_mem_descr *hpage_mem;
        struct bna_mem_descr *dpage_mem;
-       int i;
-       int dpage_count, hpage_count, rcb_idx;
+       u32 dpage_count, hpage_count;
+       u32 hq_idx, dq_idx, rcb_idx;
+       u32 cq_depth, i;
+       u32 page_count;
 
        if (!bna_rx_res_check(rx_mod, rx_cfg))
                return NULL;
        intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
        ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
        rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
-       unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
+       dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
+       hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
        cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
        cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
        cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
        }
 
        rx->num_paths = rx_cfg->num_paths;
-       for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) {
+       for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
+                       i < rx->num_paths; i++) {
                rxp = bna_rxp_get(rx_mod);
                list_add_tail(&rxp->qe, &rx->rxp_q);
                rxp->type = rx_cfg->rxp_type;
                q0->rxp = rxp;
 
                q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
-               q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
-               rcb_idx++;
-               q0->rcb->q_depth = rx_cfg->q_depth;
+               q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
+               rcb_idx++; dq_idx++;
+               q0->rcb->q_depth = rx_cfg->q0_depth;
+               q0->q_depth = rx_cfg->q0_depth;
+               q0->multi_buffer = rx_cfg->q0_multi_buf;
+               q0->buffer_size = rx_cfg->q0_buf_size;
+               q0->num_vecs = rx_cfg->q0_num_vecs;
                q0->rcb->rxq = q0;
                q0->rcb->bnad = bna->bnad;
                q0->rcb->id = 0;
                        q1->rxp = rxp;
 
                        q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
-                       q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva;
-                       rcb_idx++;
-                       q1->rcb->q_depth = rx_cfg->q_depth;
+                       q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
+                       rcb_idx++; hq_idx++;
+                       q1->rcb->q_depth = rx_cfg->q1_depth;
+                       q1->q_depth = rx_cfg->q1_depth;
+                       q1->multi_buffer = BNA_STATUS_T_DISABLED;
+                       q1->num_vecs = 1;
                        q1->rcb->rxq = q1;
                        q1->rcb->bnad = bna->bnad;
                        q1->rcb->id = 1;
                        q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
                                        rx_cfg->hds_config.forced_offset
-                                       : rx_cfg->small_buff_size;
+                                       : rx_cfg->q1_buf_size;
                        q1->rx_packets = q1->rx_bytes = 0;
                        q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
 
                /* Setup CQ */
 
                rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
-               rxp->cq.ccb->q_depth =  rx_cfg->q_depth +
-                                       ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
-                                       0 : rx_cfg->q_depth);
+               cq_depth = rx_cfg->q0_depth +
+                       ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
+                        0 : rx_cfg->q1_depth);
+               /* if multi-buffer is enabled sum of q0_depth
+                * and q1_depth need not be a power of 2
+                */
+               BNA_TO_POWER_OF_2_HIGH(cq_depth);
+               rxp->cq.ccb->q_depth = cq_depth;
                rxp->cq.ccb->cq = &rxp->cq;
                rxp->cq.ccb->rcb[0] = q0->rcb;
                q0->rcb->ccb = rxp->cq.ccb;
 
 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
 {
        struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
-       int mtu, order;
+       int order;
 
        bnad_rxq_alloc_uninit(bnad, rcb);
 
-       mtu = bna_enet_mtu_get(&bnad->bna.enet);
-       order = get_order(mtu);
+       order = get_order(rcb->rxq->buffer_size);
+
+       unmap_q->type = BNAD_RXBUF_PAGE;
 
        if (bna_is_small_rxq(rcb->id)) {
                unmap_q->alloc_order = 0;
                unmap_q->map_size = rcb->rxq->buffer_size;
        } else {
-               unmap_q->alloc_order = order;
-               unmap_q->map_size =
-                       (rcb->rxq->buffer_size > 2048) ?
-                       PAGE_SIZE << order : 2048;
+               if (rcb->rxq->multi_buffer) {
+                       unmap_q->alloc_order = 0;
+                       unmap_q->map_size = rcb->rxq->buffer_size;
+                       unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
+               } else {
+                       unmap_q->alloc_order = order;
+                       unmap_q->map_size =
+                               (rcb->rxq->buffer_size > 2048) ?
+                               PAGE_SIZE << order : 2048;
+               }
        }
 
        BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
 
-       unmap_q->type = BNAD_RXBUF_PAGE;
-
        return 0;
 }
 
        for (i = 0; i < rcb->q_depth; i++) {
                struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
 
-               if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
-                       bnad_rxq_cleanup_page(bnad, unmap);
-               else
+               if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
                        bnad_rxq_cleanup_skb(bnad, unmap);
+               else
+                       bnad_rxq_cleanup_page(bnad, unmap);
        }
        bnad_rxq_alloc_uninit(bnad, rcb);
 }
        if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
                return;
 
-       if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
-               bnad_rxq_refill_page(bnad, rcb, to_alloc);
-       else
+       if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
                bnad_rxq_refill_skb(bnad, rcb, to_alloc);
+       else
+               bnad_rxq_refill_page(bnad, rcb, to_alloc);
 }
 
 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
                                BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
 
-static inline struct sk_buff *
-bnad_cq_prepare_skb(struct bnad_rx_ctrl *rx_ctrl,
-               struct bnad_rx_unmap_q *unmap_q,
-               struct bnad_rx_unmap *unmap,
-               u32 length, u32 flags)
+static void
+bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
+                   u32 sop_ci, u32 nvecs)
 {
-       struct bnad *bnad = rx_ctrl->bnad;
-       struct sk_buff *skb;
+       struct bnad_rx_unmap_q *unmap_q;
+       struct bnad_rx_unmap *unmap;
+       u32 ci, vec;
 
-       if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) {
-               skb = napi_get_frags(&rx_ctrl->napi);
-               if (unlikely(!skb))
-                       return NULL;
+       unmap_q = rcb->unmap_q;
+       for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
+               unmap = &unmap_q->unmap[ci];
+               BNA_QE_INDX_INC(ci, rcb->q_depth);
+
+               if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
+                       bnad_rxq_cleanup_skb(bnad, unmap);
+               else
+                       bnad_rxq_cleanup_page(bnad, unmap);
+       }
+}
+
+static void
+bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
+                       u32 sop_ci, u32 nvecs, u32 last_fraglen)
+{
+       struct bnad *bnad;
+       u32 ci, vec, len, totlen = 0;
+       struct bnad_rx_unmap_q *unmap_q;
+       struct bnad_rx_unmap *unmap;
+
+       unmap_q = rcb->unmap_q;
+       bnad = rcb->bnad;
+       for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
+               unmap = &unmap_q->unmap[ci];
+               BNA_QE_INDX_INC(ci, rcb->q_depth);
 
                dma_unmap_page(&bnad->pcidev->dev,
                                dma_unmap_addr(&unmap->vector, dma_addr),
                                unmap->vector.len, DMA_FROM_DEVICE);
+
+               len = (vec == nvecs) ?
+                       last_fraglen : unmap->vector.len;
+               totlen += len;
+
                skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
-                               unmap->page, unmap->page_offset, length);
-               skb->len += length;
-               skb->data_len += length;
-               skb->truesize += length;
+                               unmap->page, unmap->page_offset, len);
 
                unmap->page = NULL;
                unmap->vector.len = 0;
-
-               return skb;
        }
 
-       skb = unmap->skb;
-       BUG_ON(!skb);
+       skb->len += totlen;
+       skb->data_len += totlen;
+       skb->truesize += totlen;
+}
+
+static inline void
+bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
+                 struct bnad_rx_unmap *unmap, u32 len)
+{
+       prefetch(skb->data);
 
        dma_unmap_single(&bnad->pcidev->dev,
                        dma_unmap_addr(&unmap->vector, dma_addr),
                        unmap->vector.len, DMA_FROM_DEVICE);
 
-       skb_put(skb, length);
-
+       skb_put(skb, len);
        skb->protocol = eth_type_trans(skb, bnad->netdev);
 
        unmap->skb = NULL;
        unmap->vector.len = 0;
-       return skb;
 }
 
 static u32
 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
 {
-       struct bna_cq_entry *cq, *cmpl;
+       struct bna_cq_entry *cq, *cmpl, *next_cmpl;
        struct bna_rcb *rcb = NULL;
        struct bnad_rx_unmap_q *unmap_q;
-       struct bnad_rx_unmap *unmap;
-       struct sk_buff *skb;
+       struct bnad_rx_unmap *unmap = NULL;
+       struct sk_buff *skb = NULL;
        struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
        struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
-       u32 packets = 0, length = 0, flags, masked_flags;
+       u32 packets = 0, len = 0, totlen = 0;
+       u32 pi, vec, sop_ci = 0, nvecs = 0;
+       u32 flags, masked_flags;
 
        prefetch(bnad->netdev);
 
        cmpl = &cq[ccb->producer_index];
 
        while (cmpl->valid && (packets < budget)) {
-               packets++;
-               flags = ntohl(cmpl->flags);
-               length = ntohs(cmpl->length);
                BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
 
                if (bna_is_small_rxq(cmpl->rxq_id))
                        rcb = ccb->rcb[0];
 
                unmap_q = rcb->unmap_q;
-               unmap = &unmap_q->unmap[rcb->consumer_index];
 
-               if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
-                                       BNA_CQ_EF_FCS_ERROR |
-                                       BNA_CQ_EF_TOO_LONG))) {
-                       if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
-                               bnad_rxq_cleanup_page(bnad, unmap);
-                       else
-                               bnad_rxq_cleanup_skb(bnad, unmap);
+               /* start of packet ci */
+               sop_ci = rcb->consumer_index;
+
+               if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
+                       unmap = &unmap_q->unmap[sop_ci];
+                       skb = unmap->skb;
+               } else {
+                       skb = napi_get_frags(&rx_ctrl->napi);
+                       if (unlikely(!skb))
+                               break;
+               }
+               prefetch(skb);
+
+               flags = ntohl(cmpl->flags);
+               len = ntohs(cmpl->length);
+               totlen = len;
+               nvecs = 1;
+
+               /* Check all the completions for this frame.
+                * busy-wait doesn't help much, break here.
+                */
+               if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
+                   (flags & BNA_CQ_EF_EOP) == 0) {
+                       pi = ccb->producer_index;
+                       do {
+                               BNA_QE_INDX_INC(pi, ccb->q_depth);
+                               next_cmpl = &cq[pi];
 
+                               if (!next_cmpl->valid)
+                                       break;
+
+                               len = ntohs(next_cmpl->length);
+                               flags = ntohl(next_cmpl->flags);
+
+                               nvecs++;
+                               totlen += len;
+                       } while ((flags & BNA_CQ_EF_EOP) == 0);
+
+                       if (!next_cmpl->valid)
+                               break;
+               }
+
+               /* TODO: BNA_CQ_EF_LOCAL ? */
+               if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
+                                               BNA_CQ_EF_FCS_ERROR |
+                                               BNA_CQ_EF_TOO_LONG))) {
+                       bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
                        rcb->rxq->rx_packets_with_error++;
+
                        goto next;
                }
 
-               skb = bnad_cq_prepare_skb(ccb->ctrl, unmap_q, unmap,
-                               length, flags);
+               if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
+                       bnad_cq_setup_skb(bnad, skb, unmap, len);
+               else
+                       bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
 
-               if (unlikely(!skb))
-                       break;
+               packets++;
+               rcb->rxq->rx_packets++;
+               rcb->rxq->rx_bytes += totlen;
+               ccb->bytes_per_intr += totlen;
 
                masked_flags = flags & flags_cksum_prot_mask;
 
                else
                        skb_checksum_none_assert(skb);
 
-               rcb->rxq->rx_packets++;
-               rcb->rxq->rx_bytes += length;
-
                if (flags & BNA_CQ_EF_VLAN)
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
 
-               if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
-                       napi_gro_frags(&rx_ctrl->napi);
-               else
+               if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
                        netif_receive_skb(skb);
+               else
+                       napi_gro_frags(&rx_ctrl->napi);
 
 next:
-               cmpl->valid = 0;
-               BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth);
-               BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
+               BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
+               for (vec = 0; vec < nvecs; vec++) {
+                       cmpl = &cq[ccb->producer_index];
+                       cmpl->valid = 0;
+                       BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
+               }
                cmpl = &cq[ccb->producer_index];
        }
 
 static void
 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
 {
+       memset(rx_config, 0, sizeof(*rx_config));
        rx_config->rx_type = BNA_RX_T_REGULAR;
        rx_config->num_paths = bnad->num_rxp_per_rx;
        rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
                memset(&rx_config->rss_config, 0,
                       sizeof(rx_config->rss_config));
        }
+
+       rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
+       rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
+
+       /* BNA_RXP_SINGLE - one data-buffer queue
+        * BNA_RXP_SLR - one small-buffer and one large-buffer queues
+        * BNA_RXP_HDS - one header-buffer and one data-buffer queues
+        */
+       /* TODO: configurable param for queue type */
        rx_config->rxp_type = BNA_RXP_SLR;
-       rx_config->q_depth = bnad->rxq_depth;
 
-       rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE;
+       if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
+           rx_config->frame_size > 4096) {
+               /* though size_routing_enable is set in SLR,
+                * small packets may get routed to same rxq.
+                * set buf_size to 2048 instead of PAGE_SIZE.
+                */
+               rx_config->q0_buf_size = 2048;
+               /* this should be in multiples of 2 */
+               rx_config->q0_num_vecs = 4;
+               rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
+               rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
+       } else {
+               rx_config->q0_buf_size = rx_config->frame_size;
+               rx_config->q0_num_vecs = 1;
+               rx_config->q0_depth = bnad->rxq_depth;
+       }
+
+       /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
+       if (rx_config->rxp_type == BNA_RXP_SLR) {
+               rx_config->q1_depth = bnad->rxq_depth;
+               rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
+       }
 
        rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
 }
 }
 
 /* Called with mutex_lock(&bnad->conf_mutex) held */
+u32
+bnad_reinit_rx(struct bnad *bnad)
+{
+       struct net_device *netdev = bnad->netdev;
+       u32 err = 0, current_err = 0;
+       u32 rx_id = 0, count = 0;
+       unsigned long flags;
+
+       /* destroy and create new rx objects */
+       for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
+               if (!bnad->rx_info[rx_id].rx)
+                       continue;
+               bnad_destroy_rx(bnad, rx_id);
+       }
+
+       spin_lock_irqsave(&bnad->bna_lock, flags);
+       bna_enet_mtu_set(&bnad->bna.enet,
+                        BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+       for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
+               count++;
+               current_err = bnad_setup_rx(bnad, rx_id);
+               if (current_err && !err) {
+                       err = current_err;
+                       pr_err("RXQ:%u setup failed\n", rx_id);
+               }
+       }
+
+       /* restore rx configuration */
+       if (bnad->rx_info[0].rx && !err) {
+               bnad_restore_vlans(bnad, 0);
+               bnad_enable_default_bcast(bnad);
+               spin_lock_irqsave(&bnad->bna_lock, flags);
+               bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
+               spin_unlock_irqrestore(&bnad->bna_lock, flags);
+               bnad_set_rx_mode(netdev);
+       }
+
+       return count;
+}
+
+/* Called with bnad_conf_lock() held */
 void
 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
 {
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        /* Fill Unmap Q memory requirements */
-       BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ],
-                       rx_config->num_paths +
-                       ((rx_config->rxp_type == BNA_RXP_SINGLE) ?
-                        0 : rx_config->num_paths),
-                       ((bnad->rxq_depth * sizeof(struct bnad_rx_unmap)) +
-                        sizeof(struct bnad_rx_unmap_q)));
-
+       BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
+                                rx_config->num_paths,
+                       (rx_config->q0_depth *
+                        sizeof(struct bnad_rx_unmap)) +
+                        sizeof(struct bnad_rx_unmap_q));
+
+       if (rx_config->rxp_type != BNA_RXP_SINGLE) {
+               BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
+                                        rx_config->num_paths,
+                               (rx_config->q1_depth *
+                                sizeof(struct bnad_rx_unmap) +
+                                sizeof(struct bnad_rx_unmap_q)));
+       }
        /* Allocate resource */
        err = bnad_rx_res_alloc(bnad, res_info, rx_id);
        if (err)
        int err;
        struct bnad *bnad = netdev_priv(netdev);
        struct bna_pause_config pause_config;
-       int mtu;
        unsigned long flags;
 
        mutex_lock(&bnad->conf_mutex);
        pause_config.tx_pause = 0;
        pause_config.rx_pause = 0;
 
-       mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
-
        spin_lock_irqsave(&bnad->bna_lock, flags);
-       bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL);
+       bna_enet_mtu_set(&bnad->bna.enet,
+                        BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
        bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
        bna_enet_enable(&bnad->bna.enet);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 }
 
 static int
-bnad_mtu_set(struct bnad *bnad, int mtu)
+bnad_mtu_set(struct bnad *bnad, int frame_size)
 {
        unsigned long flags;
 
        init_completion(&bnad->bnad_completions.mtu_comp);
 
        spin_lock_irqsave(&bnad->bna_lock, flags);
-       bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set);
+       bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        wait_for_completion(&bnad->bnad_completions.mtu_comp);
 static int
 bnad_change_mtu(struct net_device *netdev, int new_mtu)
 {
-       int err, mtu = netdev->mtu;
+       int err, mtu;
        struct bnad *bnad = netdev_priv(netdev);
+       u32 rx_count = 0, frame, new_frame;
 
        if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
                return -EINVAL;
 
        mutex_lock(&bnad->conf_mutex);
 
+       mtu = netdev->mtu;
        netdev->mtu = new_mtu;
 
-       mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
-       err = bnad_mtu_set(bnad, mtu);
+       frame = BNAD_FRAME_SIZE(mtu);
+       new_frame = BNAD_FRAME_SIZE(new_mtu);
+
+       /* check if multi-buffer needs to be enabled */
+       if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
+           netif_running(bnad->netdev)) {
+               /* only when transition is over 4K */
+               if ((frame <= 4096 && new_frame > 4096) ||
+                   (frame > 4096 && new_frame <= 4096))
+                       rx_count = bnad_reinit_rx(bnad);
+       }
+
+       /* rx_count > 0 - new rx created
+        *      - Linux set err = 0 and return
+        */
+       err = bnad_mtu_set(bnad, new_frame);
        if (err)
                err = -EBUSY;