mutex_unlock(&np->adapter->vport_ctrl_lock);
}
+static inline bool idpf_vport_ctrl_is_locked(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ return mutex_is_locked(&np->adapter->vport_ctrl_lock);
+}
+
void idpf_statistics_task(struct work_struct *work);
void idpf_init_task(struct work_struct *work);
void idpf_service_task(struct work_struct *work);
#include "idpf_virtchnl.h"
#include "idpf_ptp.h"
#include "xdp.h"
+#include "xsk.h"
static const struct net_device_ops idpf_netdev_ops;
.ndo_hwtstamp_set = idpf_hwtstamp_set,
.ndo_bpf = idpf_xdp,
.ndo_xdp_xmit = idpf_xdp_xmit,
+ .ndo_xsk_wakeup = idpf_xsk_wakeup,
};
if (!idpf_queue_has(XSK, q->txq))
continue;
+ idpf_xsk_init_wakeup(q_vector);
+
q->txq->q_vector = q_vector;
q_vector->xsksq[q_vector->num_xsksq++] = q->txq;
}
continue;
qv = idpf_find_rxq_vec(vport, i);
+ idpf_xsk_init_wakeup(qv);
xdpsq->q_vector = qv;
qv->xsksq[qv->num_xsksq++] = xdpsq;
* @complq: array of completion queues
* @xsksq: array of XSk send queues
* @intr_reg: See struct idpf_intr_reg
- * @napi: napi handler
+ * @csd: XSk wakeup CSD
* @total_events: Number of interrupts processed
* @wb_on_itr: whether WB on ITR is enabled
+ * @napi: napi handler
* @tx_dim: Data for TX net_dim algorithm
* @tx_itr_value: TX interrupt throttling rate
* @tx_intr_mode: Dynamic ITR or not
__cacheline_group_end_aligned(read_mostly);
__cacheline_group_begin_aligned(read_write);
- struct napi_struct napi;
+ call_single_data_t csd;
+
u16 total_events;
bool wb_on_itr;
+ struct napi_struct napi;
+
struct dim tx_dim;
u16 tx_itr_value;
bool tx_intr_mode;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_q_vector, 136,
- 24 + sizeof(struct napi_struct) +
+ 56 + sizeof(struct napi_struct) +
2 * sizeof(struct dim),
8);
if (!idpf_is_queue_model_split(vport->rxq_model))
return;
- libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo);
+ libeth_xdp_set_features_noredir(vport->netdev, &idpf_xdpmo,
+ idpf_get_max_tx_bufs(vport->adapter),
+ libeth_xsktmo);
}
static int idpf_xdp_setup_prog(struct idpf_vport *vport,
}
}
+void idpf_xsk_init_wakeup(struct idpf_q_vector *qv)
+{
+ libeth_xsk_init_wakeup(&qv->csd, &qv->napi);
+}
+
void idpf_xsksq_clean(struct idpf_tx_queue *xdpsq)
{
struct libeth_xdpsq_napi_stats ss = { };
return ret;
}
+
+int idpf_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+{
+ const struct idpf_netdev_priv *np = netdev_priv(dev);
+ const struct idpf_vport *vport = np->vport;
+ struct idpf_q_vector *q_vector;
+
+ if (unlikely(idpf_vport_ctrl_is_locked(dev)))
+ return -EBUSY;
+
+ if (unlikely(!vport->link_up))
+ return -ENETDOWN;
+
+ if (unlikely(!vport->num_xdp_txq))
+ return -ENXIO;
+
+ q_vector = idpf_find_rxq_vec(vport, qid);
+ if (unlikely(!q_vector->xsksq))
+ return -ENXIO;
+
+ libeth_xsk_wakeup(&q_vector->csd, qid);
+
+ return 0;
+}
enum virtchnl2_queue_type;
struct idpf_buf_queue;
+struct idpf_q_vector;
struct idpf_rx_queue;
struct idpf_tx_queue;
struct idpf_vport;
+struct net_device;
struct netdev_bpf;
void idpf_xsk_setup_queue(const struct idpf_vport *vport, void *q,
enum virtchnl2_queue_type type);
void idpf_xsk_clear_queue(void *q, enum virtchnl2_queue_type type);
+void idpf_xsk_init_wakeup(struct idpf_q_vector *qv);
int idpf_xskfq_init(struct idpf_buf_queue *bufq);
void idpf_xskfq_rel(struct idpf_buf_queue *bufq);
bool idpf_xsk_xmit(struct idpf_tx_queue *xsksq);
int idpf_xsk_pool_setup(struct idpf_vport *vport, struct netdev_bpf *xdp);
+int idpf_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
#endif /* !_IDPF_XSK_H_ */