]> www.infradead.org Git - users/hch/misc.git/commitdiff
idpf: enable XSk features and ndo_xsk_wakeup
authorAlexander Lobakin <aleksander.lobakin@intel.com>
Thu, 11 Sep 2025 16:22:33 +0000 (18:22 +0200)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Wed, 24 Sep 2025 17:34:45 +0000 (10:34 -0700)
Now that AF_XDP functionality is fully implemented, advertise XSk XDP
feature and add .ndo_xsk_wakeup() callback to be able to use it with
this driver.

Co-developed-by: Michal Kubiak <michal.kubiak@intel.com>
Signed-off-by: Michal Kubiak <michal.kubiak@intel.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Tested-by: Ramu R <ramu.r@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/idpf/idpf.h
drivers/net/ethernet/intel/idpf/idpf_lib.c
drivers/net/ethernet/intel/idpf/idpf_txrx.c
drivers/net/ethernet/intel/idpf/idpf_txrx.h
drivers/net/ethernet/intel/idpf/xdp.c
drivers/net/ethernet/intel/idpf/xsk.c
drivers/net/ethernet/intel/idpf/xsk.h

index 6db6d6f0562a056191ea643cdd509b9cff6f0977..ca4da0c8997947084e52c646417f8a1a94f875e0 100644 (file)
@@ -995,6 +995,13 @@ static inline void idpf_vport_ctrl_unlock(struct net_device *netdev)
        mutex_unlock(&np->adapter->vport_ctrl_lock);
 }
 
+static inline bool idpf_vport_ctrl_is_locked(struct net_device *netdev)
+{
+       struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+       return mutex_is_locked(&np->adapter->vport_ctrl_lock);
+}
+
 void idpf_statistics_task(struct work_struct *work);
 void idpf_init_task(struct work_struct *work);
 void idpf_service_task(struct work_struct *work);
index 9b8f7a6d65d65dce2ef049e558692b4705483351..8a941f0fb048b4974dfb5c60d2a314d4eb8ddaf4 100644 (file)
@@ -5,6 +5,7 @@
 #include "idpf_virtchnl.h"
 #include "idpf_ptp.h"
 #include "xdp.h"
+#include "xsk.h"
 
 static const struct net_device_ops idpf_netdev_ops;
 
@@ -2618,4 +2619,5 @@ static const struct net_device_ops idpf_netdev_ops = {
        .ndo_hwtstamp_set = idpf_hwtstamp_set,
        .ndo_bpf = idpf_xdp,
        .ndo_xdp_xmit = idpf_xdp_xmit,
+       .ndo_xsk_wakeup = idpf_xsk_wakeup,
 };
index 67963c0f454183289604b74611f4f467153b6206..828f7c444d3093cd5401a0df6c5c94e42bc20d32 100644 (file)
@@ -1210,6 +1210,8 @@ static int idpf_qp_enable(const struct idpf_queue_set *qs, u32 qid)
                if (!idpf_queue_has(XSK, q->txq))
                        continue;
 
+               idpf_xsk_init_wakeup(q_vector);
+
                q->txq->q_vector = q_vector;
                q_vector->xsksq[q_vector->num_xsksq++] = q->txq;
        }
@@ -4418,6 +4420,7 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
                        continue;
 
                qv = idpf_find_rxq_vec(vport, i);
+               idpf_xsk_init_wakeup(qv);
 
                xdpsq->q_vector = qv;
                qv->xsksq[qv->num_xsksq++] = xdpsq;
index a42aa4669c3ce624845fcd6b679c9602462d863b..75b977094741fd848e11b3a2535dd453f98bf53e 100644 (file)
@@ -374,9 +374,10 @@ struct idpf_intr_reg {
  * @complq: array of completion queues
  * @xsksq: array of XSk send queues
  * @intr_reg: See struct idpf_intr_reg
- * @napi: napi handler
+ * @csd: XSk wakeup CSD
  * @total_events: Number of interrupts processed
  * @wb_on_itr: whether WB on ITR is enabled
+ * @napi: napi handler
  * @tx_dim: Data for TX net_dim algorithm
  * @tx_itr_value: TX interrupt throttling rate
  * @tx_intr_mode: Dynamic ITR or not
@@ -406,10 +407,13 @@ struct idpf_q_vector {
        __cacheline_group_end_aligned(read_mostly);
 
        __cacheline_group_begin_aligned(read_write);
-       struct napi_struct napi;
+       call_single_data_t csd;
+
        u16 total_events;
        bool wb_on_itr;
 
+       struct napi_struct napi;
+
        struct dim tx_dim;
        u16 tx_itr_value;
        bool tx_intr_mode;
@@ -427,7 +431,7 @@ struct idpf_q_vector {
        __cacheline_group_end_aligned(cold);
 };
 libeth_cacheline_set_assert(struct idpf_q_vector, 136,
-                           24 + sizeof(struct napi_struct) +
+                           56 + sizeof(struct napi_struct) +
                            2 * sizeof(struct dim),
                            8);
 
index cde6d56553d28acdab2f10211b8dbd087192fe0e..21ce25b0567f6fbf675b2a6b570ba537f6e8de97 100644 (file)
@@ -400,7 +400,9 @@ void idpf_xdp_set_features(const struct idpf_vport *vport)
        if (!idpf_is_queue_model_split(vport->rxq_model))
                return;
 
-       libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo);
+       libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo,
+                                       idpf_get_max_tx_bufs(vport->adapter),
+                                       libeth_xsktmo);
 }
 
 static int idpf_xdp_setup_prog(struct idpf_vport *vport,
index ba35dca946d58ed98edede21295f03b6548a64fb..fd2cc43ab43cbacd2a72c39900bca4f72815db52 100644 (file)
@@ -158,6 +158,11 @@ void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type)
        }
 }
 
+void idpf_xsk_init_wakeup(struct idpf_q_vector *qv)
+{
+       libeth_xsk_init_wakeup(&qv->csd, &qv->napi);
+}
+
 void idpf_xsksq_clean(struct idpf_tx_queue *xdpsq)
 {
        struct libeth_xdpsq_napi_stats ss = { };
@@ -602,3 +607,27 @@ err_dis:
 
        return ret;
 }
+
+int idpf_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+{
+       const struct idpf_netdev_priv *np = netdev_priv(dev);
+       const struct idpf_vport *vport = np->vport;
+       struct idpf_q_vector *q_vector;
+
+       if (unlikely(idpf_vport_ctrl_is_locked(dev)))
+               return -EBUSY;
+
+       if (unlikely(!vport->link_up))
+               return -ENETDOWN;
+
+       if (unlikely(!vport->num_xdp_txq))
+               return -ENXIO;
+
+       q_vector = idpf_find_rxq_vec(vport, qid);
+       if (unlikely(!q_vector->xsksq))
+               return -ENXIO;
+
+       libeth_xsk_wakeup(&q_vector->csd, qid);
+
+       return 0;
+}
index d5338cbef8bdd936ab02d74106e3ff79b79a3da5..b622d08c03e82d2e8f64a4a7992de145cb756649 100644 (file)
@@ -8,14 +8,17 @@
 
 enum virtchnl2_queue_type;
 struct idpf_buf_queue;
+struct idpf_q_vector;
 struct idpf_rx_queue;
 struct idpf_tx_queue;
 struct idpf_vport;
+struct net_device;
 struct netdev_bpf;
 
 void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
                          enum virtchnl2_queue_type type);
 void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type);
+void idpf_xsk_init_wakeup(struct idpf_q_vector *qv);
 
 int idpf_xskfq_init(struct idpf_buf_queue *bufq);
 void idpf_xskfq_rel(struct idpf_buf_queue *bufq);
@@ -25,5 +28,6 @@ int idpf_xskrq_poll(struct idpf_rx_queue *rxq, u32 budget);
 bool idpf_xsk_xmit(struct idpf_tx_queue *xsksq);
 
 int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *xdp);
+int idpf_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
 
 #endif /* !_IDPF_XSK_H_ */