*/
 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
                                         void **per_transfer_contextp,
-                                        u32 *bufferp,
-                                        unsigned int *nbytesp,
-                                        unsigned int *transfer_idp,
-                                        unsigned int *flagsp)
+                                        unsigned int *nbytesp)
 {
        struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
        unsigned int nentries_mask = dest_ring->nentries_mask;
-       struct ath10k *ar = ce_state->ar;
        unsigned int sw_index = dest_ring->sw_index;
 
        struct ce_desc *base = dest_ring->base_addr_owner_space;
        desc->nbytes = 0;
 
        /* Return data from completed destination descriptor */
-       *bufferp = __le32_to_cpu(sdesc.addr);
        *nbytesp = nbytes;
-       *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
-
-       if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
-               *flagsp = CE_RECV_FLAG_SWAPPED;
-       else
-               *flagsp = 0;
 
        if (per_transfer_contextp)
                *per_transfer_contextp =
 
 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
                                  void **per_transfer_contextp,
-                                 u32 *bufferp,
-                                 unsigned int *nbytesp,
-                                 unsigned int *transfer_idp,
-                                 unsigned int *flagsp)
+                                 unsigned int *nbytesp)
 {
        struct ath10k *ar = ce_state->ar;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        spin_lock_bh(&ar_pci->ce_lock);
        ret = ath10k_ce_completed_recv_next_nolock(ce_state,
                                                   per_transfer_contextp,
-                                                  bufferp, nbytesp,
-                                                  transfer_idp, flagsp);
+                                                  nbytesp);
        spin_unlock_bh(&ar_pci->ce_lock);
 
        return ret;
 
  */
 int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
                                  void **per_transfer_contextp,
-                                 u32 *bufferp,
-                                 unsigned int *nbytesp,
-                                 unsigned int *transfer_idp,
-                                 unsigned int *flagsp);
+                                 unsigned int *nbytesp);
 /*
  * Supply data for the next completed unprocessed send descriptor.
  * Pops 1 completed send buffer from Source ring.
 
 int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
                                         void **per_transfer_contextp,
-                                        u32 *bufferp,
-                                        unsigned int *nbytesp,
-                                        unsigned int *transfer_idp,
-                                        unsigned int *flagsp);
+                                        unsigned int *nbytesp);
 
 /*
  * Support clean shutdown by allowing the caller to cancel
 
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int ret = 0;
-       u32 buf;
+       u32 *buf;
        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
-       unsigned int id;
-       unsigned int flags;
        struct ath10k_ce_pipe *ce_diag;
        /* Host buffer address in CE space */
        u32 ce_data;
                nbytes = min_t(unsigned int, remaining_bytes,
                               DIAG_TRANSFER_LIMIT);
 
-               ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
+               ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
                if (ret != 0)
                        goto done;
 
                }
 
                i = 0;
-               while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
-                                                           &completed_nbytes,
-                                                           &id, &flags) != 0) {
+               while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+                                                           (void **)&buf,
+                                                           &completed_nbytes)
+                                                               != 0) {
                        mdelay(1);
 
                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
                        goto done;
                }
 
-               if (buf != ce_data) {
+               if (*buf != ce_data) {
                        ret = -EIO;
                        goto done;
                }
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int ret = 0;
-       u32 buf;
+       u32 *buf;
        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
-       unsigned int id;
-       unsigned int flags;
        struct ath10k_ce_pipe *ce_diag;
        void *data_buf = NULL;
        u32 ce_data;    /* Host buffer address in CE space */
                nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
 
                /* Set up to receive directly into Target(!) address */
-               ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
+               ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
                if (ret != 0)
                        goto done;
 
                }
 
                i = 0;
-               while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
-                                                           &completed_nbytes,
-                                                           &id, &flags) != 0) {
+               while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+                                                           (void **)&buf,
+                                                           &completed_nbytes)
+                                                               != 0) {
                        mdelay(1);
 
                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
                        goto done;
                }
 
-               if (buf != address) {
+               if (*buf != address) {
                        ret = -EIO;
                        goto done;
                }
        struct sk_buff *skb;
        struct sk_buff_head list;
        void *transfer_context;
-       u32 ce_data;
        unsigned int nbytes, max_nbytes;
-       unsigned int transfer_id;
-       unsigned int flags;
 
        __skb_queue_head_init(&list);
        while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
-                                            &ce_data, &nbytes, &transfer_id,
-                                            &flags) == 0) {
+                                            &nbytes) == 0) {
                skb = transfer_context;
                max_nbytes = skb->len + skb_tailroom(skb);
                dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
 {
        struct ath10k *ar = ce_state->ar;
        struct bmi_xfer *xfer;
-       u32 ce_data;
        unsigned int nbytes;
-       unsigned int transfer_id;
-       unsigned int flags;
 
-       if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
-                                         &nbytes, &transfer_id, &flags))
+       if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
+                                         &nbytes))
                return;
 
        if (WARN_ON_ONCE(!xfer))