__skb_incr_checksum_unnecessary(skb);
  }
  
 -#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
 -int aq_ring_rx_clean(struct aq_ring_s *self,
 -                   struct napi_struct *napi,
 -                   int *work_done,
 -                   int budget)
 +int aq_xdp_xmit(struct net_device *dev, int num_frames,
 +              struct xdp_frame **frames, u32 flags)
 +{
 +      struct aq_nic_s *aq_nic = netdev_priv(dev);
 +      unsigned int vec, i, drop = 0;
 +      int cpu = smp_processor_id();
 +      struct aq_nic_cfg_s *aq_cfg;
 +      struct aq_ring_s *ring;
 +
 +      aq_cfg = aq_nic_get_cfg(aq_nic);
 +      vec = cpu % aq_cfg->vecs;
 +      ring = aq_nic->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(aq_cfg, 0, vec)];
 +
 +      for (i = 0; i < num_frames; i++) {
 +              struct xdp_frame *xdpf = frames[i];
 +
 +              if (aq_nic_xmit_xdpf(aq_nic, ring, xdpf) == NETDEV_TX_BUSY)
 +                      drop++;
 +      }
 +
 +      return num_frames - drop;
 +}
 +
 +static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
 +                                     struct xdp_buff *xdp,
 +                                     struct aq_ring_s *rx_ring,
 +                                     struct aq_ring_buff_s *buff)
 +{
 +      int result = NETDEV_TX_BUSY;
 +      struct aq_ring_s *tx_ring;
 +      struct xdp_frame *xdpf;
 +      struct bpf_prog *prog;
 +      u32 act = XDP_ABORTED;
 +      struct sk_buff *skb;
 +
 +      u64_stats_update_begin(&rx_ring->stats.rx.syncp);
 +      ++rx_ring->stats.rx.packets;
 +      rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp);
 +      u64_stats_update_end(&rx_ring->stats.rx.syncp);
 +
 +      prog = READ_ONCE(rx_ring->xdp_prog);
 +      if (!prog)
 +              goto pass;
 +
 +      prefetchw(xdp->data_hard_start); /* xdp_frame write */
 +
 +      /* single buffer XDP program, but packet is multi buffer, aborted */
 +      if (xdp_buff_has_frags(xdp) && !prog->aux->xdp_has_frags)
 +              goto out_aborted;
 +
 +      act = bpf_prog_run_xdp(prog, xdp);
 +      switch (act) {
 +      case XDP_PASS:
 +pass:
 +              xdpf = xdp_convert_buff_to_frame(xdp);
 +              if (unlikely(!xdpf))
 +                      goto out_aborted;
 +              skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev);
 +              if (!skb)
 +                      goto out_aborted;
 +              u64_stats_update_begin(&rx_ring->stats.rx.syncp);
 +              ++rx_ring->stats.rx.xdp_pass;
 +              u64_stats_update_end(&rx_ring->stats.rx.syncp);
 +              aq_get_rxpages_xdp(buff, xdp);
 +              return skb;
 +      case XDP_TX:
 +              xdpf = xdp_convert_buff_to_frame(xdp);
 +              if (unlikely(!xdpf))
 +                      goto out_aborted;
 +              tx_ring = aq_nic->aq_ring_tx[rx_ring->idx];
 +              result = aq_nic_xmit_xdpf(aq_nic, tx_ring, xdpf);
 +              if (result == NETDEV_TX_BUSY)
 +                      goto out_aborted;
 +              u64_stats_update_begin(&rx_ring->stats.rx.syncp);
 +              ++rx_ring->stats.rx.xdp_tx;
 +              u64_stats_update_end(&rx_ring->stats.rx.syncp);
 +              aq_get_rxpages_xdp(buff, xdp);
 +              break;
 +      case XDP_REDIRECT:
 +              if (xdp_do_redirect(aq_nic->ndev, xdp, prog) < 0)
 +                      goto out_aborted;
 +              xdp_do_flush();
 +              u64_stats_update_begin(&rx_ring->stats.rx.syncp);
 +              ++rx_ring->stats.rx.xdp_redirect;
 +              u64_stats_update_end(&rx_ring->stats.rx.syncp);
 +              aq_get_rxpages_xdp(buff, xdp);
 +              break;
 +      default:
 +              fallthrough;
 +      case XDP_ABORTED:
 +out_aborted:
 +              u64_stats_update_begin(&rx_ring->stats.rx.syncp);
 +              ++rx_ring->stats.rx.xdp_aborted;
 +              u64_stats_update_end(&rx_ring->stats.rx.syncp);
 +              trace_xdp_exception(aq_nic->ndev, prog, act);
 +              bpf_warn_invalid_xdp_action(aq_nic->ndev, prog, act);
 +              break;
 +      case XDP_DROP:
 +              u64_stats_update_begin(&rx_ring->stats.rx.syncp);
 +              ++rx_ring->stats.rx.xdp_drop;
 +              u64_stats_update_end(&rx_ring->stats.rx.syncp);
 +              break;
 +      }
 +
 +      return ERR_PTR(-result);
 +}
 +
 +static bool aq_add_rx_fragment(struct device *dev,
 +                             struct aq_ring_s *ring,
 +                             struct aq_ring_buff_s *buff,
 +                             struct xdp_buff *xdp)
 +{
 +      struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
 +      struct aq_ring_buff_s *buff_ = buff;
 +
 +      memset(sinfo, 0, sizeof(*sinfo));
 +      do {
 +              skb_frag_t *frag;
 +
 +              if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS))
 +                      return true;
 +
 +              frag = &sinfo->frags[sinfo->nr_frags++];
 +              buff_ = &ring->buff_ring[buff_->next];
 +              dma_sync_single_range_for_cpu(dev,
 +                                            buff_->rxdata.daddr,
 +                                            buff_->rxdata.pg_off,
 +                                            buff_->len,
 +                                            DMA_FROM_DEVICE);
 +              skb_frag_off_set(frag, buff_->rxdata.pg_off);
 +              skb_frag_size_set(frag, buff_->len);
 +              sinfo->xdp_frags_size += buff_->len;
 +              __skb_frag_set_page(frag, buff_->rxdata.page);
 +
 +              buff_->is_cleaned = 1;
 +
 +              buff->is_ip_cso &= buff_->is_ip_cso;
 +              buff->is_udp_cso &= buff_->is_udp_cso;
 +              buff->is_tcp_cso &= buff_->is_tcp_cso;
 +              buff->is_cso_err |= buff_->is_cso_err;
 +
 +              if (page_is_pfmemalloc(buff_->rxdata.page))
 +                      xdp_buff_set_frag_pfmemalloc(xdp);
 +
 +      } while (!buff_->is_eop);
 +
 +      xdp_buff_set_frags_flag(xdp);
 +
 +      return false;
 +}
 +
 +static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
 +                            int *work_done, int budget)
  {
        struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
-       bool is_rsc_completed = true;
        int err = 0;
  
        for (; (self->sw_head != self->hw_head) && budget;
                       ALIGN(hdr_len, sizeof(long)));
  
                if (buff->len - hdr_len > 0) {
-                       skb_add_rx_frag(skb, 0, buff->rxdata.page,
+                       skb_add_rx_frag(skb, i++, buff->rxdata.page,
                                        buff->rxdata.pg_off + hdr_len,
                                        buff->len - hdr_len,
 -                                      AQ_CFG_RX_FRAME_MAX);
 +                                      self->frame_max);
                        page_ref_inc(buff->rxdata.page);
                }