arg->packet_id == wvif->bss_loss_confirm_id)
                        wfx_cqm_bssloss_sm(wvif, 0, 0, 1);
        }
-       wfx_pending_remove(wvif->wdev, skb);
+       wfx_skb_dtor(wvif->wdev, skb);
 }
 
 static void wfx_notify_buffered_tx(struct wfx_vif *wvif, struct sk_buff *skb)
 
        return 0;
 }
 
-int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb)
-{
-       struct wfx_queue *queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
-
-       WARN_ON(skb_get_queue_mapping(skb) > 3);
-       WARN_ON(!atomic_read(&queue->pending_frames));
-
-       atomic_dec(&queue->pending_frames);
-       skb_unlink(skb, &wdev->tx_pending);
-       wfx_skb_dtor(wdev, skb);
-
-       return 0;
-}
-
 struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
 {
-       struct sk_buff *skb;
+       struct wfx_queue *queue;
        struct hif_req_tx *req;
+       struct sk_buff *skb;
 
        spin_lock_bh(&wdev->tx_pending.lock);
        skb_queue_walk(&wdev->tx_pending, skb) {
                req = wfx_skb_txreq(skb);
                if (req->packet_id == packet_id) {
                        spin_unlock_bh(&wdev->tx_pending.lock);
+                       queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
+                       WARN_ON(skb_get_queue_mapping(skb) > 3);
+                       WARN_ON(!atomic_read(&queue->pending_frames));
+                       atomic_dec(&queue->pending_frames);
+                       skb_unlink(skb, &wdev->tx_pending);
                        return skb;
                }
        }
 
 
 
 struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id);
-int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb);
 int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb);
 unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
                                          struct sk_buff *skb);