void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
+static inline void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
+ struct mlx4_ib_cq *recv_cq)
+ __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
+{
+ if (send_cq == recv_cq) {
+ spin_lock(&send_cq->lock);
+ __acquire(&recv_cq->lock);
+ } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
+ spin_lock(&send_cq->lock);
+ spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
+ } else {
+ spin_lock(&recv_cq->lock);
+ spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
+ }
+}
+
+static inline void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
+ struct mlx4_ib_cq *recv_cq)
+ __releases(&send_cq->lock) __releases(&recv_cq->lock)
+{
+ if (send_cq == recv_cq) {
+ __release(&recv_cq->lock);
+ spin_unlock(&send_cq->lock);
+ } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
+ spin_unlock(&recv_cq->lock);
+ spin_unlock(&send_cq->lock);
+ } else {
+ spin_unlock(&send_cq->lock);
+ spin_unlock(&recv_cq->lock);
+ }
+}
+
+static inline void mlx4_ib_get_cqs(struct mlx4_ib_qp *qp,
+ struct mlx4_ib_cq **send_cq,
+ struct mlx4_ib_cq **recv_cq)
+{
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_XRC_TGT:
+ *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq);
+ *recv_cq = *send_cq;
+ break;
+ case IB_QPT_XRC_INI:
+ *send_cq = to_mcq(qp->ibqp.send_cq);
+ *recv_cq = *send_cq;
+ break;
+ default:
+ *send_cq = to_mcq(qp->ibqp.send_cq);
+ *recv_cq = to_mcq(qp->ibqp.recv_cq);
+ break;
+ }
+}
+
struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
struct ib_udata *udata);
int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
#include "mlx4_ib.h"
#include "user.h"
-static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
- struct mlx4_ib_cq *recv_cq);
-static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
- struct mlx4_ib_cq *recv_cq);
-
enum {
MLX4_IB_ACK_REQ_FREQ = 8,
};
}
}
-static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
- __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
-{
- if (send_cq == recv_cq) {
- spin_lock(&send_cq->lock);
- __acquire(&recv_cq->lock);
- } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
- spin_lock(&send_cq->lock);
- spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
- } else {
- spin_lock(&recv_cq->lock);
- spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
- }
-}
-
-static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
- __releases(&send_cq->lock) __releases(&recv_cq->lock)
-{
- if (send_cq == recv_cq) {
- __release(&recv_cq->lock);
- spin_unlock(&send_cq->lock);
- } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
- spin_unlock(&recv_cq->lock);
- spin_unlock(&send_cq->lock);
- } else {
- spin_unlock(&send_cq->lock);
- spin_unlock(&recv_cq->lock);
- }
-}
-
static void del_gid_entries(struct mlx4_ib_qp *qp)
{
struct mlx4_ib_gid_entry *ge, *tmp;
return to_mpd(qp->ibqp.pd);
}
-static void get_cqs(struct mlx4_ib_qp *qp,
- struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq)
-{
- switch (qp->ibqp.qp_type) {
- case IB_QPT_XRC_TGT:
- *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq);
- *recv_cq = *send_cq;
- break;
- case IB_QPT_XRC_INI:
- *send_cq = to_mcq(qp->ibqp.send_cq);
- *recv_cq = *send_cq;
- break;
- default:
- *send_cq = to_mcq(qp->ibqp.send_cq);
- *recv_cq = to_mcq(qp->ibqp.recv_cq);
- break;
- }
-}
-
static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
int is_user)
{
}
}
- get_cqs(qp, &send_cq, &recv_cq);
+ mlx4_ib_get_cqs(qp, &send_cq, &recv_cq);
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx4_ib_lock_cqs(send_cq, recv_cq);
}
pd = get_pd(qp);
- get_cqs(qp, &send_cq, &recv_cq);
+ mlx4_ib_get_cqs(qp, &send_cq, &recv_cq);
context->pd = cpu_to_be32(pd->pdn);
context->cqn_send = cpu_to_be32(send_cq->mcq.cqn);
context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
return 0;
}
-
-
static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
{
switch (state) {
static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
struct ib_qp_init_attr *init_attr)
{
+ struct mlx4_ib_cq *send_cq, *recv_cq;
+ unsigned long flags;
+
if (qp->state != IB_QPS_RESET)
if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n",
qp->mqp.qpn);
+ mlx4_ib_get_cqs(qp, &send_cq, &recv_cq);
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx4_ib_lock_cqs(send_cq, recv_cq);
+
+ /* del from lists under both locks above to protect reset flow paths */
+ list_del(&qp->qps_list);
+ list_del(&qp->cq_send_list);
+ list_del(&qp->cq_recv_list);
+
mlx4_qp_remove(dev->dev, &qp->mqp);
+
+ mlx4_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
mlx4_qp_free(dev->dev, &qp->mqp);
mlx4_mtt_cleanup(dev->dev, &qp->mtt);
mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
return !attr->srq;
}
-
static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, int sqpn, struct mlx4_ib_qp *qp)
int err;
enum mlx4_ib_qp_type qp_type =
(enum mlx4_ib_qp_type) init_attr->qp_type;
+ struct mlx4_ib_cq *mcq;
+ unsigned long flags;
qp->mlx4_ib_qp_type = qp_type;
qp->pri.vid = qp->alt.vid = 0xFFFF;
mutex_init(&qp->mutex);
qp->mqp.event = mlx4_ib_qp_event;
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
+ to_mcq(init_attr->recv_cq));
+ /* Maintain device to QPs access, needed for further handling
+ * via reset flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling
+ * via reset flow
+ */
+ mcq = to_mcq(init_attr->send_cq);
+ list_add_tail(&qp->cq_send_list, &mcq->send_qp_list);
+ mcq = to_mcq(init_attr->recv_cq);
+ list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
+ mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
+ to_mcq(init_attr->recv_cq));
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
return 0;
err_qpn: