*/
        qp = vsi->base_queue;
        vector = vsi->base_vector;
-       q_vector = vsi->q_vectors;
-       for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) {
+       for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+               q_vector = vsi->q_vectors[i];
                q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
                q_vector->rx.latency_range = I40E_LOW_LATENCY;
                wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
  **/
 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
 {
-       struct i40e_q_vector *q_vector = vsi->q_vectors;
+       struct i40e_q_vector *q_vector = vsi->q_vectors[0];
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
        u32 val;
        int vector, err;
 
        for (vector = 0; vector < q_vectors; vector++) {
-               struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]);
+               struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
 
                if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) {
                        snprintf(q_vector->name, sizeof(q_vector->name) - 1,
                i40e_flush(hw);
 
                if (!test_bit(__I40E_DOWN, &pf->state))
-                       napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi);
+                       napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
        }
 
        if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
  **/
 static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
 {
-       struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
+       struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
        struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]);
 
        rx_ring->q_vector = q_vector;
  **/
 static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
 {
-       struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
+       struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
        struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);
 
        tx_ring->q_vector = q_vector;
        pf->flags |= I40E_FLAG_IN_NETPOLL;
        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
                for (i = 0; i < vsi->num_q_vectors; i++)
-                       i40e_msix_clean_rings(0, &vsi->q_vectors[i]);
+                       i40e_msix_clean_rings(0, vsi->q_vectors[i]);
        } else {
                i40e_intr(pf->pdev->irq, netdev);
        }
                        u16 vector = i + base;
 
                        /* free only the irqs that were actually requested */
-                       if (vsi->q_vectors[i].num_ringpairs == 0)
+                       if (vsi->q_vectors[i]->num_ringpairs == 0)
                                continue;
 
                        /* clear the affinity_mask in the IRQ descriptor */
                        irq_set_affinity_hint(pf->msix_entries[vector].vector,
                                              NULL);
                        free_irq(pf->msix_entries[vector].vector,
-                                &vsi->q_vectors[i]);
+                                vsi->q_vectors[i]);
 
                        /* Tear down the interrupt queue link list
                         *
        }
 }
 
+/**
+ * i40e_free_q_vector - Free memory allocated for specific interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+       struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+       int r_idx;
+
+       if (!q_vector)
+               return;
+
+       /* disassociate q_vector from rings */
+       for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
+               q_vector->tx.ring[r_idx]->q_vector = NULL;
+       for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
+               q_vector->rx.ring[r_idx]->q_vector = NULL;
+
+       /* only VSI w/ an associated netdev is set up w/ NAPI */
+       if (vsi->netdev)
+               netif_napi_del(&q_vector->napi);
+
+       vsi->q_vectors[v_idx] = NULL;
+
+       kfree_rcu(q_vector, rcu);
+}
+
 /**
  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
  * @vsi: the VSI being un-configured
 {
        int v_idx;
 
-       for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
-               struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx];
-               int r_idx;
-
-               if (!q_vector)
-                       continue;
-
-               /* disassociate q_vector from rings */
-               for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
-                       q_vector->tx.ring[r_idx]->q_vector = NULL;
-               for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
-                       q_vector->rx.ring[r_idx]->q_vector = NULL;
-
-               /* only VSI w/ an associated netdev is set up w/ NAPI */
-               if (vsi->netdev)
-                       netif_napi_del(&q_vector->napi);
-       }
-       kfree(vsi->q_vectors);
+       for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+               i40e_free_q_vector(vsi, v_idx);
 }
 
 /**
                return;
 
        for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-               napi_enable(&vsi->q_vectors[q_idx].napi);
+               napi_enable(&vsi->q_vectors[q_idx]->napi);
 }
 
 /**
                return;
 
        for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-               napi_disable(&vsi->q_vectors[q_idx].napi);
+               napi_disable(&vsi->q_vectors[q_idx]->napi);
 }
 
 /**
 {
        int ret = -ENODEV;
        struct i40e_vsi *vsi;
+       int sz_vectors;
        int vsi_idx;
        int i;
 
                vsi_idx = i;             /* Found one! */
        } else {
                ret = -ENODEV;
-               goto err_alloc_vsi;  /* out of VSI slots! */
+               goto unlock_pf;  /* out of VSI slots! */
        }
        pf->next_vsi = ++i;
 
        vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
        if (!vsi) {
                ret = -ENOMEM;
-               goto err_alloc_vsi;
+               goto unlock_pf;
        }
        vsi->type = type;
        vsi->back = pf;
 
        i40e_set_num_rings_in_vsi(vsi);
 
+       /* allocate memory for q_vector pointers */
+       sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+       vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
+       if (!vsi->q_vectors) {
+               ret = -ENOMEM;
+               goto err_vectors;
+       }
+
        /* Setup default MSIX irq handler for VSI */
        i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
 
        pf->vsi[vsi_idx] = vsi;
        ret = vsi_idx;
-err_alloc_vsi:
+       goto unlock_pf;
+
+err_vectors:
+       pf->next_vsi = i - 1;
+       kfree(vsi);
+unlock_pf:
        mutex_unlock(&pf->switch_mutex);
        return ret;
 }
        i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
        i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
 
+       /* free the ring and vector containers */
+       kfree(vsi->q_vectors);
+
        pf->vsi[vsi->idx] = NULL;
        if (vsi->idx < pf->next_vsi)
                pf->next_vsi = vsi->idx;
        return err;
 }
 
+/**
+ * i40e_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the vsi struct
+ *
+ * We allocate one q_vector.  If allocation fails we return -ENOMEM.
+ **/
+static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+       struct i40e_q_vector *q_vector;
+
+       /* allocate q_vector */
+       q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+       if (!q_vector)
+               return -ENOMEM;
+
+       q_vector->vsi = vsi;
+       q_vector->v_idx = v_idx;
+       cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+       if (vsi->netdev)
+               netif_napi_add(vsi->netdev, &q_vector->napi,
+                              i40e_napi_poll, vsi->work_limit);
+
+       /* tie q_vector and vsi together */
+       vsi->q_vectors[v_idx] = q_vector;
+
+       return 0;
+}
+
 /**
  * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
  * @vsi: the VSI being configured
 {
        struct i40e_pf *pf = vsi->back;
        int v_idx, num_q_vectors;
+       int err;
 
        /* if not MSIX, give the one vector only to the LAN VSI */
        if (pf->flags & I40E_FLAG_MSIX_ENABLED)
        else
                return -EINVAL;
 
-       vsi->q_vectors = kcalloc(num_q_vectors,
-                                sizeof(struct i40e_q_vector),
-                                GFP_KERNEL);
-       if (!vsi->q_vectors)
-               return -ENOMEM;
-
        for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
-               vsi->q_vectors[v_idx].vsi = vsi;
-               vsi->q_vectors[v_idx].v_idx = v_idx;
-               cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask);
-               if (vsi->netdev)
-                       netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
-                                      i40e_napi_poll, vsi->work_limit);
+               err = i40e_alloc_q_vector(vsi, v_idx);
+               if (err)
+                       goto err_out;
        }
 
        return 0;
+
+err_out:
+       while (v_idx--)
+               i40e_free_q_vector(vsi, v_idx);
+
+       return err;
 }
 
 /**
        int ret = -ENOENT;
        struct i40e_pf *pf = vsi->back;
 
-       if (vsi->q_vectors) {
+       if (vsi->q_vectors[0]) {
                dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
                         vsi->seid);
                return -EEXIST;