#include "txrx.h"
 #include "debug.h"
 
+static u8 ath10k_htt_tx_txq_calc_size(size_t count)
+{
+       int exp;
+       int factor;
+
+       exp = 0;
+       factor = count >> 7;
+
+       while (factor >= 64 && exp < 4) {
+               factor >>= 3;
+               exp++;
+       }
+
+       if (exp == 4)
+               return 0xff;
+
+       if (count > 0)
+               factor = max(1, factor);
+
+       return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
+              SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
+}
+
+static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+                                      struct ieee80211_txq *txq)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_sta *arsta = (void *)txq->sta->drv_priv;
+       struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
+       unsigned long frame_cnt;
+       unsigned long byte_cnt;
+       int idx;
+       u32 bit;
+       u16 peer_id;
+       u8 tid;
+       u8 count;
+
+       lockdep_assert_held(&ar->htt.tx_lock);
+
+       if (!ar->htt.tx_q_state.enabled)
+               return;
+
+       if (txq->sta)
+               peer_id = arsta->peer_id;
+       else
+               peer_id = arvif->peer_id;
+
+       tid = txq->tid;
+       bit = BIT(peer_id % 32);
+       idx = peer_id / 32;
+
+       ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
+       count = ath10k_htt_tx_txq_calc_size(byte_cnt);
+
+       if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+           unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+               ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
+                           peer_id, tid);
+               return;
+       }
+
+       ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
+       ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
+       ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
+
+       ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
+                  peer_id, tid, count);
+}
+
+static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
+{
+       u32 seq;
+       size_t size;
+
+       lockdep_assert_held(&ar->htt.tx_lock);
+
+       if (!ar->htt.tx_q_state.enabled)
+               return;
+
+       seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
+       seq++;
+       ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
+
+       ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
+                  seq);
+
+       size = sizeof(*ar->htt.tx_q_state.vaddr);
+       dma_sync_single_for_device(ar->dev,
+                                  ar->htt.tx_q_state.paddr,
+                                  size,
+                                  DMA_TO_DEVICE);
+}
+
+void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
+                             struct ieee80211_txq *txq)
+{
+       struct ath10k *ar = hw->priv;
+
+       spin_lock_bh(&ar->htt.tx_lock);
+       __ath10k_htt_tx_txq_recalc(hw, txq);
+       __ath10k_htt_tx_txq_sync(ar);
+       spin_unlock_bh(&ar->htt.tx_lock);
+}
+
 void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
                               bool is_mgmt)
 {