* @page: driver's pointer to the rxb page
  * @invalid: rxb is in driver ownership - not owned by HW
  * @vid: index of this rxb in the global table
+ * @size: size used from the buffer
  */
 struct iwl_rx_mem_buffer {
        dma_addr_t page_dma;
        u16 vid;
        bool invalid;
        struct list_head list;
+       u32 size;
 };
 
 /**
        IWL_CD_STTS_REPLAY_ERR,
 };
 
-#define IWL_RX_TD_TYPE         0xff000000
-#define IWL_RX_TD_SIZE         0x00ffffff
+#define IWL_RX_TD_TYPE_MSK     0xff000000
+#define IWL_RX_TD_SIZE_MSK     0x00ffffff
+#define IWL_RX_TD_SIZE_2K      BIT(11)
+#define IWL_RX_TD_TYPE         0
 
 /**
  * struct iwl_rx_transfer_desc - transfer descriptor
  * @id: queue index
  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
  *     Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
+ *     In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
  * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
  * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
        int id;
        void *bd;
        dma_addr_t bd_dma;
-       __le32 *used_bd;
+       void *used_bd;
        dma_addr_t used_bd_dma;
        __le16 *tr_tail;
        dma_addr_t tr_tail_dma;
        struct list_head rx_free;
        struct list_head rx_used;
        bool need_update;
-       struct iwl_rb_status *rb_stts;
+       void *rb_stts;
        dma_addr_t rb_stts_dma;
        spinlock_t lock;
        struct napi_struct napi;
        return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
 }
 
+/**
+ * iwl_get_closed_rb_stts - get closed rb stts from different structs
+ * @rxq - the rxq to get the rb stts from
+ */
+static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
+                                           struct iwl_rxq *rxq)
+{
+       if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+               __le16 *rb_stts = rxq->rb_stts;
+
+               return READ_ONCE(*rb_stts);
+       } else {
+               struct iwl_rb_status *rb_stts = rxq->rb_stts;
+
+               return READ_ONCE(rb_stts->closed_rb_num);
+       }
+}
+
 /**
  * iwl_queue_dec_wrap - decrement queue index, wrap back to end
  * @index -- current index
 
        }
 }
 
+static void iwl_pcie_restock_bd(struct iwl_trans *trans,
+                               struct iwl_rxq *rxq,
+                               struct iwl_rx_mem_buffer *rxb)
+{
+       if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+               struct iwl_rx_transfer_desc *bd = rxq->bd;
+
+               bd[rxq->write].type_n_size =
+                       cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
+                       ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
+               bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
+               bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
+       } else {
+               __le64 *bd = rxq->bd;
+
+               bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+       }
+}
+
 /*
  * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
  */
 
        spin_lock(&rxq->lock);
        while (rxq->free_count) {
-               __le64 *bd = (__le64 *)rxq->bd;
-
                /* Get next free Rx buffer, remove from free list */
                rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
                                       list);
                /* 12 first bits are expected to be empty */
                WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
                /* Point to Rx buffer via next RBD in circular buffer */
-               bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+               iwl_pcie_restock_bd(trans, rxq, rxb);
                rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
                rxq->free_count--;
        }
        iwl_pcie_rx_allocator(trans_pcie->trans);
 }
 
+static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
+{
+       struct iwl_rx_transfer_desc *rx_td;
+
+       if (use_rx_td)
+               return sizeof(*rx_td);
+       else
+               return trans->cfg->mq_rx_supported ? sizeof(__le64) :
+                       sizeof(__le32);
+}
+
 static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
                                  struct iwl_rxq *rxq)
 {
        struct device *dev = trans->dev;
-       int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
-                                                     sizeof(__le32);
+       struct iwl_rx_completion_desc *rx_cd;
+       bool use_rx_td = (trans->cfg->device_family >=
+                         IWL_DEVICE_FAMILY_22560);
+       int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
 
        if (rxq->bd)
-               dma_free_coherent(dev, free_size * rxq->queue_size,
+               dma_free_coherent(trans->dev,
+                                 free_size * rxq->queue_size,
                                  rxq->bd, rxq->bd_dma);
        rxq->bd_dma = 0;
        rxq->bd = NULL;
 
        if (rxq->rb_stts)
                dma_free_coherent(trans->dev,
+                                 use_rx_td ? sizeof(__le16) :
                                  sizeof(struct iwl_rb_status),
                                  rxq->rb_stts, rxq->rb_stts_dma);
        rxq->rb_stts_dma = 0;
        rxq->rb_stts = NULL;
 
        if (rxq->used_bd)
-               dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
+               dma_free_coherent(trans->dev,
+                                 (use_rx_td ? sizeof(*rx_cd) :
+                                  sizeof(__le32)) * rxq->queue_size,
                                  rxq->used_bd, rxq->used_bd_dma);
        rxq->used_bd_dma = 0;
        rxq->used_bd = NULL;
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct device *dev = trans->dev;
+       struct iwl_rx_completion_desc *rx_cd;
        int i;
-       int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
-                                                     sizeof(__le32);
+       int free_size;
+       bool use_rx_td = (trans->cfg->device_family >=
+                         IWL_DEVICE_FAMILY_22560);
 
        spin_lock_init(&rxq->lock);
        if (trans->cfg->mq_rx_supported)
        else
                rxq->queue_size = RX_QUEUE_SIZE;
 
+       free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+
        /*
         * Allocate the circular buffer of Read Buffer Descriptors
         * (RBDs)
 
        if (trans->cfg->mq_rx_supported) {
                rxq->used_bd = dma_zalloc_coherent(dev,
-                                                  sizeof(__le32) *
+                                                  (use_rx_td ?
+                                                  sizeof(*rx_cd) :
+                                                  sizeof(__le32)) *
                                                   rxq->queue_size,
                                                   &rxq->used_bd_dma,
                                                   GFP_KERNEL);
        }
 
        /* Allocate the driver's pointer to receive buffer status */
-       rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
+       rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
+                                          sizeof(__le16) :
+                                          sizeof(struct iwl_rb_status),
                                           &rxq->rb_stts_dma,
                                           GFP_KERNEL);
        if (!rxq->rb_stts)
                goto err;
 
-       if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+       if (!use_rx_td)
                return 0;
 
        /* Allocate the driver's pointer to TR tail */
                                           GFP_KERNEL);
        if (!rxq->cr_tail)
                goto err;
+       /*
+        * W/A 22560 device step Z0 must be non zero bug
+        * TODO: remove this when stop supporting Z0
+        */
+       *rxq->cr_tail = cpu_to_le16(500);
 
        return 0;
 
                rxq->read = 0;
                rxq->write = 0;
                rxq->write_actual = 0;
-               memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
+               memset(rxq->rb_stts, 0,
+                      (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+                      sizeof(__le16) : sizeof(struct iwl_rb_status));
 
                iwl_pcie_rx_init_rxb_lists(rxq);
 
                }
 
                page_stolen |= rxcb._page_stolen;
+               if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+                       break;
                offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
        }
 
        spin_lock(&rxq->lock);
        /* uCode's read index (stored in shared DRAM) indicates the last Rx
         * buffer that the driver may process (last buffer filled by ucode). */
-       r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+       r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
        i = rxq->read;
 
        /* W/A 9000 device step A0 wrap-around bug */
                        emergency = true;
 
                if (trans->cfg->mq_rx_supported) {
+                       u16 vid;
                        /*
-                        * used_bd is a 32 bit but only 12 are used to retrieve
-                        * the vid
+                        * used_bd is a 32/16 bit but only 12 are used
+                        * to retrieve the vid
                         */
-                       u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
+                       if (trans->cfg->device_family >=
+                           IWL_DEVICE_FAMILY_22560) {
+                               struct iwl_rx_completion_desc *rx_cd =
+                                       &((struct iwl_rx_completion_desc *)
+                                         rxq->used_bd)[i];
+
+                               vid = le16_to_cpu(rx_cd->rbid) & 0x0FFF;
+                       } else {
+                               __le32 *used =
+                                       &((__le32 *)rxq->used_bd)[i];
+
+                               vid = le32_to_cpu(*used) & 0x0FFF;
+                       }
 
                        if (WARN(!vid ||
                                 vid > ARRAY_SIZE(trans_pcie->global_table),
                                iwl_force_nmi(trans);
                                goto out;
                        }
+                       if (trans->cfg->device_family >=
+                           IWL_DEVICE_FAMILY_22560) {
+                               struct iwl_rx_completion_desc *rx_cd =
+                                       &((struct iwl_rx_completion_desc *)
+                                         rxq->used_bd)[i];
+
+                               rxb->size = le32_to_cpu(rx_cd->size) &
+                                       IWL_RX_CD_SIZE;
+                       }
+
                        rxb->invalid = true;
                } else {
                        rxb = rxq->queue[i];
 out:
        /* Backtrack one entry */
        rxq->read = i;
+       /* update cr tail with the rxq read pointer */
+       if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+               *rxq->cr_tail = cpu_to_le16(r);
        spin_unlock(&rxq->lock);
 
        /*