* The caller takes responsibility for any necessary locking.
  */
 int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
-                                        void **per_transfer_contextp,
-                                        u32 *bufferp,
-                                        unsigned int *nbytesp,
-                                        unsigned int *transfer_idp)
+                                        void **per_transfer_contextp)
 {
        struct ath10k_ce_ring *src_ring = ce_state->src_ring;
        u32 ctrl_addr = ce_state->ctrl_addr;
        struct ath10k *ar = ce_state->ar;
        unsigned int nentries_mask = src_ring->nentries_mask;
        unsigned int sw_index = src_ring->sw_index;
-       struct ce_desc *sdesc, *sbase;
        unsigned int read_index;
 
        if (src_ring->hw_index == sw_index) {
        if (read_index == sw_index)
                return -EIO;
 
-       sbase = src_ring->base_addr_owner_space;
-       sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
-
-       /* Return data from completed source descriptor */
-       *bufferp = __le32_to_cpu(sdesc->addr);
-       *nbytesp = __le16_to_cpu(sdesc->nbytes);
-       *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
-                          CE_DESC_FLAGS_META_DATA);
-
        if (per_transfer_contextp)
                *per_transfer_contextp =
                        src_ring->per_transfer_context[sw_index];
 }
 
 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
-                                 void **per_transfer_contextp,
-                                 u32 *bufferp,
-                                 unsigned int *nbytesp,
-                                 unsigned int *transfer_idp)
+                                 void **per_transfer_contextp)
 {
        struct ath10k *ar = ce_state->ar;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
        spin_lock_bh(&ar_pci->ce_lock);
        ret = ath10k_ce_completed_send_next_nolock(ce_state,
-                                                  per_transfer_contextp,
-                                                  bufferp, nbytesp,
-                                                  transfer_idp);
+                                                  per_transfer_contextp);
        spin_unlock_bh(&ar_pci->ce_lock);
 
        return ret;
 
  * Pops 1 completed send buffer from Source ring.
  */
 int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
-                                 void **per_transfer_contextp,
-                                 u32 *bufferp,
-                                 unsigned int *nbytesp,
-                                 unsigned int *transfer_idp);
+                                 void **per_transfer_contextp);
 
 int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
-                                        void **per_transfer_contextp,
-                                        u32 *bufferp,
-                                        unsigned int *nbytesp,
-                                        unsigned int *transfer_idp);
+                                        void **per_transfer_contextp);
 
 /*==================CE Engine Initialization=======================*/
 
 
                        goto done;
 
                i = 0;
-               while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
-                                                           &completed_nbytes,
-                                                           &id) != 0) {
+               while (ath10k_ce_completed_send_next_nolock(ce_diag,
+                                                           NULL) != 0) {
                        mdelay(1);
                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
                                ret = -EBUSY;
                        goto done;
 
                i = 0;
-               while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
-                                                           &completed_nbytes,
-                                                           &id) != 0) {
+               while (ath10k_ce_completed_send_next_nolock(ce_diag,
+                                                           NULL) != 0) {
                        mdelay(1);
 
                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
        struct ath10k *ar = ce_state->ar;
        struct sk_buff_head list;
        struct sk_buff *skb;
-       u32 ce_data;
-       unsigned int nbytes;
-       unsigned int transfer_id;
 
        __skb_queue_head_init(&list);
-       while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
-                                            &nbytes, &transfer_id) == 0) {
+       while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
                /* no need to call tx completion for NULL pointers */
                if (skb == NULL)
                        continue;
 {
        struct ath10k *ar = ce_state->ar;
        struct sk_buff *skb;
-       u32 ce_data;
-       unsigned int nbytes;
-       unsigned int transfer_id;
 
-       while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
-                                            &nbytes, &transfer_id) == 0) {
+       while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
                /* no need to call tx completion for NULL pointers */
                if (!skb)
                        continue;
 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
 {
        struct bmi_xfer *xfer;
-       u32 ce_data;
-       unsigned int nbytes;
-       unsigned int transfer_id;
 
-       if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
-                                         &nbytes, &transfer_id))
+       if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
                return;
 
        xfer->tx_done = true;