SET_OBJ_SIZE(dev_ops, ib_ah);
        SET_OBJ_SIZE(dev_ops, ib_pd);
+       SET_OBJ_SIZE(dev_ops, ib_srq);
        SET_OBJ_SIZE(dev_ops, ib_ucontext);
 }
 EXPORT_SYMBOL(ib_set_device_ops);
 
        obj->uevent.events_reported = 0;
        INIT_LIST_HEAD(&obj->uevent.event_list);
 
-       srq = pd->device->ops.create_srq(pd, &attr, udata);
-       if (IS_ERR(srq)) {
-               ret = PTR_ERR(srq);
+       srq = rdma_zalloc_drv_obj(ib_dev, ib_srq);
+       if (!srq) {
+               ret = -ENOMEM;
                goto err_put;
        }
 
        srq->event_handler = attr.event_handler;
        srq->srq_context   = attr.srq_context;
 
+       ret = pd->device->ops.create_srq(srq, &attr, udata);
+       if (ret)
+               goto err_free;
+
        if (ib_srq_has_cq(cmd->srq_type)) {
                srq->ext.cq       = attr.ext.cq;
                atomic_inc(&attr.ext.cq->usecnt);
 err_copy:
        ib_destroy_srq_user(srq, &attrs->driver_udata);
 
+err_free:
+       kfree(srq);
 err_put:
        uobj_put_obj_read(pd);
 
 
                             struct ib_srq_init_attr *srq_init_attr)
 {
        struct ib_srq *srq;
+       int ret;
 
        if (!pd->device->ops.create_srq)
                return ERR_PTR(-EOPNOTSUPP);
 
-       srq = pd->device->ops.create_srq(pd, srq_init_attr, NULL);
-
-       if (!IS_ERR(srq)) {
-               srq->device        = pd->device;
-               srq->pd            = pd;
-               srq->uobject       = NULL;
-               srq->event_handler = srq_init_attr->event_handler;
-               srq->srq_context   = srq_init_attr->srq_context;
-               srq->srq_type      = srq_init_attr->srq_type;
-               if (ib_srq_has_cq(srq->srq_type)) {
-                       srq->ext.cq   = srq_init_attr->ext.cq;
-                       atomic_inc(&srq->ext.cq->usecnt);
-               }
-               if (srq->srq_type == IB_SRQT_XRC) {
-                       srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
-                       atomic_inc(&srq->ext.xrc.xrcd->usecnt);
-               }
-               atomic_inc(&pd->usecnt);
-               atomic_set(&srq->usecnt, 0);
+       srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
+       if (!srq)
+               return ERR_PTR(-ENOMEM);
+
+       srq->device = pd->device;
+       srq->pd = pd;
+       srq->event_handler = srq_init_attr->event_handler;
+       srq->srq_context = srq_init_attr->srq_context;
+       srq->srq_type = srq_init_attr->srq_type;
+
+       if (ib_srq_has_cq(srq->srq_type)) {
+               srq->ext.cq = srq_init_attr->ext.cq;
+               atomic_inc(&srq->ext.cq->usecnt);
+       }
+       if (srq->srq_type == IB_SRQT_XRC) {
+               srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
+               atomic_inc(&srq->ext.xrc.xrcd->usecnt);
+       }
+       atomic_inc(&pd->usecnt);
+
+       ret = pd->device->ops.create_srq(srq, srq_init_attr, NULL);
+       if (ret) {
+               atomic_dec(&srq->pd->usecnt);
+               if (srq->srq_type == IB_SRQT_XRC)
+                       atomic_dec(&srq->ext.xrc.xrcd->usecnt);
+               if (ib_srq_has_cq(srq->srq_type))
+                       atomic_dec(&srq->ext.cq->usecnt);
+               kfree(srq);
+               return ERR_PTR(ret);
        }
 
        return srq;
 
 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
 {
-       struct ib_pd *pd;
-       enum ib_srq_type srq_type;
-       struct ib_xrcd *uninitialized_var(xrcd);
-       struct ib_cq *uninitialized_var(cq);
-       int ret;
-
        if (atomic_read(&srq->usecnt))
                return -EBUSY;
 
-       pd = srq->pd;
-       srq_type = srq->srq_type;
-       if (ib_srq_has_cq(srq_type))
-               cq = srq->ext.cq;
-       if (srq_type == IB_SRQT_XRC)
-               xrcd = srq->ext.xrc.xrcd;
+       srq->device->ops.destroy_srq(srq, udata);
 
-       ret = srq->device->ops.destroy_srq(srq, udata);
-       if (!ret) {
-               atomic_dec(&pd->usecnt);
-               if (srq_type == IB_SRQT_XRC)
-                       atomic_dec(&xrcd->usecnt);
-               if (ib_srq_has_cq(srq_type))
-                       atomic_dec(&cq->usecnt);
-       }
+       atomic_dec(&srq->pd->usecnt);
+       if (srq->srq_type == IB_SRQT_XRC)
+               atomic_dec(&srq->ext.xrc.xrcd->usecnt);
+       if (ib_srq_has_cq(srq->srq_type))
+               atomic_dec(&srq->ext.cq->usecnt);
+       kfree(srq);
 
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL(ib_destroy_srq_user);
 
 
 }
 
 /* Shared Receive Queues */
-int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
+void bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
 {
        struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
                                               ib_srq);
        struct bnxt_re_dev *rdev = srq->rdev;
        struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
        struct bnxt_qplib_nq *nq = NULL;
-       int rc;
 
        if (qplib_srq->cq)
                nq = qplib_srq->cq->nq;
-       rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
-       if (rc) {
-               dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
-               return rc;
-       }
-
+       bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
        if (srq->umem)
                ib_umem_release(srq->umem);
-       kfree(srq);
        atomic_dec(&rdev->srq_count);
        if (nq)
                nq->budget--;
-       return 0;
 }
 
 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
        return 0;
 }
 
-struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
-                                 struct ib_srq_init_attr *srq_init_attr,
-                                 struct ib_udata *udata)
+int bnxt_re_create_srq(struct ib_srq *ib_srq,
+                      struct ib_srq_init_attr *srq_init_attr,
+                      struct ib_udata *udata)
 {
+       struct ib_pd *ib_pd = ib_srq->pd;
        struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
        struct bnxt_re_dev *rdev = pd->rdev;
        struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
-       struct bnxt_re_srq *srq;
+       struct bnxt_re_srq *srq =
+               container_of(ib_srq, struct bnxt_re_srq, ib_srq);
        struct bnxt_qplib_nq *nq = NULL;
        int rc, entries;
 
                goto exit;
        }
 
-       srq = kzalloc(sizeof(*srq), GFP_KERNEL);
-       if (!srq) {
-               rc = -ENOMEM;
-               goto exit;
-       }
        srq->rdev = rdev;
        srq->qplib_srq.pd = &pd->qplib_pd;
        srq->qplib_srq.dpi = &rdev->dpi_privileged;
                nq->budget++;
        atomic_inc(&rdev->srq_count);
 
-       return &srq->ib_srq;
+       return 0;
 
 fail:
        if (srq->umem)
                ib_umem_release(srq->umem);
-       kfree(srq);
 exit:
-       return ERR_PTR(rc);
+       return rc;
 }
 
 int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
 
 };
 
 struct bnxt_re_srq {
+       struct ib_srq           ib_srq;
        struct bnxt_re_dev      *rdev;
        u32                     srq_limit;
-       struct ib_srq           ib_srq;
        struct bnxt_qplib_srq   qplib_srq;
        struct ib_umem          *umem;
        spinlock_t              lock;           /* protect srq */
 int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
 int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
 void bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags);
-struct ib_srq *bnxt_re_create_srq(struct ib_pd *pd,
-                                 struct ib_srq_init_attr *srq_init_attr,
-                                 struct ib_udata *udata);
+int bnxt_re_create_srq(struct ib_srq *srq,
+                      struct ib_srq_init_attr *srq_init_attr,
+                      struct ib_udata *udata);
 int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
                       enum ib_srq_attr_mask srq_attr_mask,
                       struct ib_udata *udata);
 int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
-int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
+void bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
 int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr,
                          const struct ib_recv_wr **bad_recv_wr);
 struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
 
        .req_notify_cq = bnxt_re_req_notify_cq,
        INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
        INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
+       INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq),
        INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx),
 };
 
 
        writeq(val, db);
 }
 
-int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
                           struct bnxt_qplib_srq *srq)
 {
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        /* Configure the request */
        req.srq_cid = cpu_to_le32(srq->id);
 
-       rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
-                                         (void *)&resp, NULL, 0);
+       rc = bnxt_qplib_rcfw_send_message(rcfw, (struct cmdq_base *)&req,
+                                         (struct creq_base *)&resp, NULL, 0);
+       kfree(srq->swq);
        if (rc)
-               return rc;
-
+               return;
        bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
-       kfree(srq->swq);
-       return 0;
 }
 
 int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
 
                          struct bnxt_qplib_srq *srq);
 int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
                         struct bnxt_qplib_srq *srq);
-int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
-                          struct bnxt_qplib_srq *srq);
+void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+                           struct bnxt_qplib_srq *srq);
 int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
                             struct bnxt_qplib_swqe *wqe);
 int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
 
 int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
                    enum ib_srq_attr_mask srq_attr_mask,
                    struct ib_udata *udata);
-int c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata);
-struct ib_srq *c4iw_create_srq(struct ib_pd *pd,
-                              struct ib_srq_init_attr *attrs,
-                              struct ib_udata *udata);
+void c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata);
+int c4iw_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attrs,
+                   struct ib_udata *udata);
 int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
 struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
                             struct ib_qp_init_attr *attrs,
 
        .reg_user_mr = c4iw_reg_user_mr,
        .req_notify_cq = c4iw_arm_cq,
        INIT_RDMA_OBJ_SIZE(ib_pd, c4iw_pd, ibpd),
+       INIT_RDMA_OBJ_SIZE(ib_srq, c4iw_srq, ibsrq),
        INIT_RDMA_OBJ_SIZE(ib_ucontext, c4iw_ucontext, ibucontext),
 };
 
 
        }
 }
 
-struct ib_srq *c4iw_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *attrs,
+int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
                               struct ib_udata *udata)
 {
+       struct ib_pd *pd = ib_srq->pd;
        struct c4iw_dev *rhp;
-       struct c4iw_srq *srq;
+       struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
        struct c4iw_pd *php;
        struct c4iw_create_srq_resp uresp;
        struct c4iw_ucontext *ucontext;
        rhp = php->rhp;
 
        if (!rhp->rdev.lldi.vr->srq.size)
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
        if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size)
-               return ERR_PTR(-E2BIG);
+               return -E2BIG;
        if (attrs->attr.max_sge > T4_MAX_RECV_SGE)
-               return ERR_PTR(-E2BIG);
+               return -E2BIG;
 
        /*
         * SRQ RQT and RQ must be a power of 2 and at least 16 deep.
        ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
                                             ibucontext);
 
-       srq = kzalloc(sizeof(*srq), GFP_KERNEL);
-       if (!srq)
-               return ERR_PTR(-ENOMEM);
-
        srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
-       if (!srq->wr_waitp) {
-               ret = -ENOMEM;
-               goto err_free_srq;
-       }
+       if (!srq->wr_waitp)
+               return -ENOMEM;
 
        srq->idx = c4iw_alloc_srq_idx(&rhp->rdev);
        if (srq->idx < 0) {
                        (unsigned long)srq->wq.memsize, attrs->attr.max_wr);
 
        spin_lock_init(&srq->lock);
-       return &srq->ibsrq;
+       return 0;
+
 err_free_srq_db_key_mm:
        kfree(srq_db_key_mm);
 err_free_srq_key_mm:
        c4iw_free_srq_idx(&rhp->rdev, srq->idx);
 err_free_wr_wait:
        c4iw_put_wr_wait(srq->wr_waitp);
-err_free_srq:
-       kfree(srq);
-       return ERR_PTR(ret);
+       return ret;
 }
 
-int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+void c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 {
        struct c4iw_dev *rhp;
        struct c4iw_srq *srq;
                       srq->wr_waitp);
        c4iw_free_srq_idx(&rhp->rdev, srq->idx);
        c4iw_put_wr_wait(srq->wr_waitp);
-       kfree(srq);
-       return 0;
 }
 
 int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
                               struct hns_roce_mtt *mtt, struct ib_umem *umem);
 
-struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
-                                  struct ib_srq_init_attr *srq_init_attr,
-                                  struct ib_udata *udata);
+int hns_roce_create_srq(struct ib_srq *srq,
+                       struct ib_srq_init_attr *srq_init_attr,
+                       struct ib_udata *udata);
 int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
                        enum ib_srq_attr_mask srq_attr_mask,
                        struct ib_udata *udata);
-int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
+void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
 
 struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
                                 struct ib_qp_init_attr *init_attr,
 
 static const struct ib_device_ops hns_roce_dev_srq_ops = {
        .create_srq = hns_roce_create_srq,
        .destroy_srq = hns_roce_destroy_srq,
+
+       INIT_RDMA_OBJ_SIZE(ib_srq, hns_roce_srq, ibsrq),
 };
 
 static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
 
        return 0;
 }
 
-struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
-                                  struct ib_srq_init_attr *srq_init_attr,
-                                  struct ib_udata *udata)
+int hns_roce_create_srq(struct ib_srq *ib_srq,
+                       struct ib_srq_init_attr *srq_init_attr,
+                       struct ib_udata *udata)
 {
-       struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+       struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
        struct hns_roce_ib_create_srq_resp resp = {};
-       struct hns_roce_srq *srq;
+       struct hns_roce_srq *srq = to_hr_srq(ib_srq);
        int srq_desc_size;
        int srq_buf_size;
        u32 page_shift;
        /* Check the actual SRQ wqe and SRQ sge num */
        if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
            srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
-               return ERR_PTR(-EINVAL);
-
-       srq = kzalloc(sizeof(*srq), GFP_KERNEL);
-       if (!srq)
-               return ERR_PTR(-ENOMEM);
+               return -EINVAL;
 
        mutex_init(&srq->mutex);
        spin_lock_init(&srq->lock);
        if (udata) {
                struct hns_roce_ib_create_srq  ucmd;
 
-               if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
-                       ret = -EFAULT;
-                       goto err_srq;
-               }
+               if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
+                       return -EFAULT;
 
                srq->umem =
                        ib_umem_get(udata, ucmd.buf_addr, srq_buf_size, 0, 0);
-               if (IS_ERR(srq->umem)) {
-                       ret = PTR_ERR(srq->umem);
-                       goto err_srq;
-               }
+               if (IS_ERR(srq->umem))
+                       return PTR_ERR(srq->umem);
 
                if (hr_dev->caps.srqwqe_buf_pg_sz) {
                        npages = (ib_umem_page_count(srq->umem) +
        } else {
                page_shift = PAGE_SHIFT + hr_dev->caps.srqwqe_buf_pg_sz;
                if (hns_roce_buf_alloc(hr_dev, srq_buf_size,
-                                     (1 << page_shift) * 2,
-                                     &srq->buf, page_shift)) {
-                       ret = -ENOMEM;
-                       goto err_srq;
-               }
+                                      (1 << page_shift) * 2, &srq->buf,
+                                      page_shift))
+                       return -ENOMEM;
 
                srq->head = 0;
                srq->tail = srq->max - 1;
                        goto err_srq_mtt;
 
                page_shift = PAGE_SHIFT + hr_dev->caps.idx_buf_pg_sz;
-               ret = hns_roce_create_idx_que(pd, srq, page_shift);
+               ret = hns_roce_create_idx_que(ib_srq->pd, srq, page_shift);
                if (ret) {
                        dev_err(hr_dev->dev, "Create idx queue fail(%d)!\n",
                                ret);
 
        srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
 
-       ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(pd)->pdn, cqn, 0,
+       ret = hns_roce_srq_alloc(hr_dev, to_hr_pd(ib_srq->pd)->pdn, cqn, 0,
                                 &srq->mtt, 0, srq);
        if (ret)
                goto err_wrid;
                }
        }
 
-       return &srq->ibsrq;
+       return 0;
 
 err_srqc_alloc:
        hns_roce_srq_free(hr_dev, srq);
        else
                hns_roce_buf_free(hr_dev, srq_buf_size, &srq->buf);
 
-err_srq:
-       kfree(srq);
-       return ERR_PTR(ret);
+       return ret;
 }
 
-int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 {
        struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
        struct hns_roce_srq *srq = to_hr_srq(ibsrq);
                hns_roce_buf_free(hr_dev, srq->max << srq->wqe_shift,
                                  &srq->buf);
        }
-
-       kfree(srq);
-
-       return 0;
 }
 
 int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev)
 
 
        INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
        INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
+       INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
        INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
 };
 
 
 int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
 void mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags);
 
-struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
-                                 struct ib_srq_init_attr *init_attr,
-                                 struct ib_udata *udata);
+int mlx4_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
+                      struct ib_udata *udata);
 int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
                       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
 int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
-int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
+void mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
 int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
                          const struct ib_recv_wr **bad_wr);
 
        }
 }
 
-struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
-                                 struct ib_srq_init_attr *init_attr,
-                                 struct ib_udata *udata)
+int mlx4_ib_create_srq(struct ib_srq *ib_srq,
+                      struct ib_srq_init_attr *init_attr,
+                      struct ib_udata *udata)
 {
-       struct mlx4_ib_dev *dev = to_mdev(pd->device);
+       struct mlx4_ib_dev *dev = to_mdev(ib_srq->device);
        struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context(
                udata, struct mlx4_ib_ucontext, ibucontext);
-       struct mlx4_ib_srq *srq;
+       struct mlx4_ib_srq *srq = to_msrq(ib_srq);
        struct mlx4_wqe_srq_next_seg *next;
        struct mlx4_wqe_data_seg *scatter;
        u32 cqn;
        /* Sanity check SRQ size before proceeding */
        if (init_attr->attr.max_wr  >= dev->dev->caps.max_srq_wqes ||
            init_attr->attr.max_sge >  dev->dev->caps.max_srq_sge)
-               return ERR_PTR(-EINVAL);
-
-       srq = kmalloc(sizeof *srq, GFP_KERNEL);
-       if (!srq)
-               return ERR_PTR(-ENOMEM);
+               return -EINVAL;
 
        mutex_init(&srq->mutex);
        spin_lock_init(&srq->lock);
        if (udata) {
                struct mlx4_ib_create_srq ucmd;
 
-               if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
-                       err = -EFAULT;
-                       goto err_srq;
-               }
+               if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
+                       return -EFAULT;
 
                srq->umem = ib_umem_get(udata, ucmd.buf_addr, buf_size, 0, 0);
-               if (IS_ERR(srq->umem)) {
-                       err = PTR_ERR(srq->umem);
-                       goto err_srq;
-               }
+               if (IS_ERR(srq->umem))
+                       return PTR_ERR(srq->umem);
 
                err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
                                    srq->umem->page_shift, &srq->mtt);
        } else {
                err = mlx4_db_alloc(dev->dev, &srq->db, 0);
                if (err)
-                       goto err_srq;
+                       return err;
 
                *srq->db.db = 0;
 
        xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
                to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn :
                (u16) dev->dev->caps.reserved_xrcds;
-       err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt,
-                            srq->db.dma, &srq->msrq);
+       err = mlx4_srq_alloc(dev->dev, to_mpd(ib_srq->pd)->pdn, cqn, xrcdn,
+                            &srq->mtt, srq->db.dma, &srq->msrq);
        if (err)
                goto err_wrid;
 
 
        init_attr->attr.max_wr = srq->msrq.max - 1;
 
-       return &srq->ibsrq;
+       return 0;
 
 err_wrid:
        if (udata)
        if (!udata)
                mlx4_db_free(dev->dev, &srq->db);
 
-err_srq:
-       kfree(srq);
-
-       return ERR_PTR(err);
+       return err;
 }
 
 int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
        return 0;
 }
 
-int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
+void mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 {
        struct mlx4_ib_dev *dev = to_mdev(srq->device);
        struct mlx4_ib_srq *msrq = to_msrq(srq);
                              &msrq->buf);
                mlx4_db_free(dev->dev, &msrq->db);
        }
-
-       kfree(msrq);
-
-       return 0;
 }
 
 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
 
        attr.ext.cq = devr->c0;
        attr.ext.xrc.xrcd = devr->x0;
 
-       devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
-       if (IS_ERR(devr->s0)) {
-               ret = PTR_ERR(devr->s0);
+       devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq);
+       if (!devr->s0) {
+               ret = -ENOMEM;
                goto error4;
        }
+
        devr->s0->device        = &dev->ib_dev;
        devr->s0->pd            = devr->p0;
-       devr->s0->uobject       = NULL;
-       devr->s0->event_handler = NULL;
-       devr->s0->srq_context   = NULL;
        devr->s0->srq_type      = IB_SRQT_XRC;
        devr->s0->ext.xrc.xrcd  = devr->x0;
        devr->s0->ext.cq        = devr->c0;
+       ret = mlx5_ib_create_srq(devr->s0, &attr, NULL);
+       if (ret)
+               goto err_create;
+
        atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
        atomic_inc(&devr->s0->ext.cq->usecnt);
        atomic_inc(&devr->p0->usecnt);
        attr.attr.max_sge = 1;
        attr.attr.max_wr = 1;
        attr.srq_type = IB_SRQT_BASIC;
-       devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
-       if (IS_ERR(devr->s1)) {
-               ret = PTR_ERR(devr->s1);
+       devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq);
+       if (!devr->s1) {
+               ret = -ENOMEM;
                goto error5;
        }
+
        devr->s1->device        = &dev->ib_dev;
        devr->s1->pd            = devr->p0;
-       devr->s1->uobject       = NULL;
-       devr->s1->event_handler = NULL;
-       devr->s1->srq_context   = NULL;
        devr->s1->srq_type      = IB_SRQT_BASIC;
        devr->s1->ext.cq        = devr->c0;
+
+       ret = mlx5_ib_create_srq(devr->s1, &attr, NULL);
+       if (ret)
+               goto error6;
+
        atomic_inc(&devr->p0->usecnt);
        atomic_set(&devr->s1->usecnt, 0);
 
 
        return 0;
 
+error6:
+       kfree(devr->s1);
 error5:
        mlx5_ib_destroy_srq(devr->s0, NULL);
+err_create:
+       kfree(devr->s0);
 error4:
        mlx5_ib_dealloc_xrcd(devr->x1, NULL);
 error3:
        int port;
 
        mlx5_ib_destroy_srq(devr->s1, NULL);
+       kfree(devr->s1);
        mlx5_ib_destroy_srq(devr->s0, NULL);
+       kfree(devr->s0);
        mlx5_ib_dealloc_xrcd(devr->x0, NULL);
        mlx5_ib_dealloc_xrcd(devr->x1, NULL);
        mlx5_ib_destroy_cq(devr->c0, NULL);
 
        INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
        INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
+       INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
        INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
 };
 
 
                      struct ib_udata *udata);
 int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
 void mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags);
-struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
-                                 struct ib_srq_init_attr *init_attr,
-                                 struct ib_udata *udata);
+int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
+                      struct ib_udata *udata);
 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
                       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
-int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
+void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
                          const struct ib_recv_wr **bad_wr);
 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
 
        mlx5_db_free(dev->mdev, &srq->db);
 }
 
-struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
-                                 struct ib_srq_init_attr *init_attr,
-                                 struct ib_udata *udata)
+int mlx5_ib_create_srq(struct ib_srq *ib_srq,
+                      struct ib_srq_init_attr *init_attr,
+                      struct ib_udata *udata)
 {
-       struct mlx5_ib_dev *dev = to_mdev(pd->device);
-       struct mlx5_ib_srq *srq;
+       struct mlx5_ib_dev *dev = to_mdev(ib_srq->device);
+       struct mlx5_ib_srq *srq = to_msrq(ib_srq);
        size_t desc_size;
        size_t buf_size;
        int err;
-       struct mlx5_srq_attr in = {0};
+       struct mlx5_srq_attr in = {};
        __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
 
        /* Sanity check SRQ size before proceeding */
                mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
                            init_attr->attr.max_wr,
                            max_srq_wqes);
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
        }
 
-       srq = kmalloc(sizeof(*srq), GFP_KERNEL);
-       if (!srq)
-               return ERR_PTR(-ENOMEM);
-
        mutex_init(&srq->mutex);
        spin_lock_init(&srq->lock);
        srq->msrq.max    = roundup_pow_of_two(init_attr->attr.max_wr + 1);
 
        desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
                    srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
-       if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
-               err = -EINVAL;
-               goto err_srq;
-       }
+       if (desc_size == 0 || srq->msrq.max_gs > desc_size)
+               return -EINVAL;
+
        desc_size = roundup_pow_of_two(desc_size);
        desc_size = max_t(size_t, 32, desc_size);
-       if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
-               err = -EINVAL;
-               goto err_srq;
-       }
+       if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
+               return -EINVAL;
+
        srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
                sizeof(struct mlx5_wqe_data_seg);
        srq->msrq.wqe_shift = ilog2(desc_size);
        buf_size = srq->msrq.max * desc_size;
-       if (buf_size < desc_size) {
-               err = -EINVAL;
-               goto err_srq;
-       }
+       if (buf_size < desc_size)
+               return -EINVAL;
+
        in.type = init_attr->srq_type;
 
        if (udata)
-               err = create_srq_user(pd, srq, &in, udata, buf_size);
+               err = create_srq_user(ib_srq->pd, srq, &in, udata, buf_size);
        else
                err = create_srq_kernel(dev, srq, &in, buf_size);
 
        if (err) {
                mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
                             udata ? "user" : "kernel", err);
-               goto err_srq;
+               return err;
        }
 
        in.log_size = ilog2(srq->msrq.max);
        else
                in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
 
-       in.pd = to_mpd(pd)->pdn;
+       in.pd = to_mpd(ib_srq->pd)->pdn;
        in.db_record = srq->db.dma;
        err = mlx5_cmd_create_srq(dev, &srq->msrq, &in);
        kvfree(in.pas);
 
        init_attr->attr.max_wr = srq->msrq.max - 1;
 
-       return &srq->ibsrq;
+       return 0;
 
 err_core:
        mlx5_cmd_destroy_srq(dev, &srq->msrq);
 
 err_usr_kern_srq:
        if (udata)
-               destroy_srq_user(pd, srq, udata);
+               destroy_srq_user(ib_srq->pd, srq, udata);
        else
                destroy_srq_kernel(dev, srq);
 
-err_srq:
-       kfree(srq);
-
-       return ERR_PTR(err);
+       return err;
 }
 
 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
        return ret;
 }
 
-int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
+void mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 {
        struct mlx5_ib_dev *dev = to_mdev(srq->device);
        struct mlx5_ib_srq *msrq = to_msrq(srq);
        } else {
                destroy_srq_kernel(dev, msrq);
        }
-
-       kfree(srq);
-       return 0;
 }
 
 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
 
 
 int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
                        struct mlx5_srq_attr *in);
-int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq);
+void mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq);
 int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
                       struct mlx5_srq_attr *out);
 int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
 
        return err;
 }
 
-int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
+void mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
 {
        struct mlx5_srq_table *table = &dev->srq_table;
        struct mlx5_core_srq *tmp;
 
        tmp = xa_erase_irq(&table->array, srq->srqn);
        if (!tmp || tmp != srq)
-               return -EINVAL;
+               return;
 
        err = destroy_srq_split(dev, srq);
        if (err)
-               return err;
+               return;
 
        mlx5_core_res_put(&srq->common);
        wait_for_completion(&srq->common.free);
-
-       return 0;
 }
 
 int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
 
        mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
 }
 
-static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
-                                      struct ib_srq_init_attr *init_attr,
-                                      struct ib_udata *udata)
+static int mthca_create_srq(struct ib_srq *ibsrq,
+                           struct ib_srq_init_attr *init_attr,
+                           struct ib_udata *udata)
 {
        struct mthca_create_srq ucmd;
        struct mthca_ucontext *context = rdma_udata_to_drv_context(
                udata, struct mthca_ucontext, ibucontext);
-       struct mthca_srq *srq;
+       struct mthca_srq *srq = to_msrq(ibsrq);
        int err;
 
        if (init_attr->srq_type != IB_SRQT_BASIC)
-               return ERR_PTR(-EOPNOTSUPP);
-
-       srq = kmalloc(sizeof *srq, GFP_KERNEL);
-       if (!srq)
-               return ERR_PTR(-ENOMEM);
+               return -EOPNOTSUPP;
 
        if (udata) {
-               if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
-                       err = -EFAULT;
-                       goto err_free;
-               }
+               if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
+                       return -EFAULT;
 
-               err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
+               err = mthca_map_user_db(to_mdev(ibsrq->device), &context->uar,
                                        context->db_tab, ucmd.db_index,
                                        ucmd.db_page);
 
                if (err)
-                       goto err_free;
+                       return err;
 
                srq->mr.ibmr.lkey = ucmd.lkey;
                srq->db_index     = ucmd.db_index;
        }
 
-       err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
+       err = mthca_alloc_srq(to_mdev(ibsrq->device), to_mpd(ibsrq->pd),
                              &init_attr->attr, srq, udata);
 
        if (err && udata)
-               mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
+               mthca_unmap_user_db(to_mdev(ibsrq->device), &context->uar,
                                    context->db_tab, ucmd.db_index);
 
        if (err)
-               goto err_free;
+               return err;
 
-       if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
-               mthca_free_srq(to_mdev(pd->device), srq);
-               err = -EFAULT;
-               goto err_free;
+       if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) {
+               mthca_free_srq(to_mdev(ibsrq->device), srq);
+               return -EFAULT;
        }
 
-       return &srq->ibsrq;
-
-err_free:
-       kfree(srq);
-
-       return ERR_PTR(err);
+       return 0;
 }
 
-static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
+static void mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 {
        if (udata) {
                struct mthca_ucontext *context =
        }
 
        mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
-       kfree(srq);
-
-       return 0;
 }
 
 static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
        .modify_srq = mthca_modify_srq,
        .post_srq_recv = mthca_arbel_post_srq_recv,
        .query_srq = mthca_query_srq,
+
+       INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq),
 };
 
 static const struct ib_device_ops mthca_dev_tavor_srq_ops = {
        .modify_srq = mthca_modify_srq,
        .post_srq_recv = mthca_tavor_post_srq_recv,
        .query_srq = mthca_query_srq,
+
+       INIT_RDMA_OBJ_SIZE(ib_srq, mthca_srq, ibsrq),
 };
 
 static const struct ib_device_ops mthca_dev_arbel_fmr_ops = {
 
        return status;
 }
 
-int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
+void ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
 {
-       int status = -ENOMEM;
        struct ocrdma_destroy_srq *cmd;
        struct pci_dev *pdev = dev->nic_info.pdev;
        cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd));
        if (!cmd)
-               return status;
+               return;
        cmd->id = srq->id;
-       status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+       ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
        if (srq->rq.va)
                dma_free_coherent(&pdev->dev, srq->rq.len,
                                  srq->rq.va, srq->rq.pa);
        kfree(cmd);
-       return status;
 }
 
 static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
 
                          struct ocrdma_pd *);
 int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *);
 int ocrdma_mbx_query_srq(struct ocrdma_srq *, struct ib_srq_attr *);
-int ocrdma_mbx_destroy_srq(struct ocrdma_dev *, struct ocrdma_srq *);
+void ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq);
 
 int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah);
 void ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah);
 
        .modify_srq = ocrdma_modify_srq,
        .post_srq_recv = ocrdma_post_srq_recv,
        .query_srq = ocrdma_query_srq,
+
+       INIT_RDMA_OBJ_SIZE(ib_srq, ocrdma_srq, ibsrq),
 };
 
 static int ocrdma_register_device(struct ocrdma_dev *dev)
 
        return status;
 }
 
-struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
-                                struct ib_srq_init_attr *init_attr,
-                                struct ib_udata *udata)
+int ocrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
+                     struct ib_udata *udata)
 {
-       int status = -ENOMEM;
-       struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
-       struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
-       struct ocrdma_srq *srq;
+       int status;
+       struct ocrdma_pd *pd = get_ocrdma_pd(ibsrq->pd);
+       struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
+       struct ocrdma_srq *srq = get_ocrdma_srq(ibsrq);
 
        if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
        if (init_attr->attr.max_wr > dev->attr.max_rqe)
-               return ERR_PTR(-EINVAL);
-
-       srq = kzalloc(sizeof(*srq), GFP_KERNEL);
-       if (!srq)
-               return ERR_PTR(status);
+               return -EINVAL;
 
        spin_lock_init(&srq->q_lock);
        srq->pd = pd;
        srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
        status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
        if (status)
-               goto err;
+               return status;
 
-       if (udata == NULL) {
-               status = -ENOMEM;
+       if (!udata) {
                srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64),
                                             GFP_KERNEL);
-               if (srq->rqe_wr_id_tbl == NULL)
+               if (!srq->rqe_wr_id_tbl) {
+                       status = -ENOMEM;
                        goto arm_err;
+               }
 
                srq->bit_fields_len = (srq->rq.max_cnt / 32) +
                    (srq->rq.max_cnt % 32 ? 1 : 0);
                srq->idx_bit_fields =
                    kmalloc_array(srq->bit_fields_len, sizeof(u32),
                                  GFP_KERNEL);
-               if (srq->idx_bit_fields == NULL)
+               if (!srq->idx_bit_fields) {
+                       status = -ENOMEM;
                        goto arm_err;
+               }
                memset(srq->idx_bit_fields, 0xff,
                       srq->bit_fields_len * sizeof(u32));
        }
                        goto arm_err;
        }
 
-       return &srq->ibsrq;
+       return 0;
 
 arm_err:
        ocrdma_mbx_destroy_srq(dev, srq);
-err:
        kfree(srq->rqe_wr_id_tbl);
        kfree(srq->idx_bit_fields);
-       kfree(srq);
-       return ERR_PTR(status);
+       return status;
 }
 
 int ocrdma_modify_srq(struct ib_srq *ibsrq,
        return status;
 }
 
-int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+void ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 {
-       int status;
        struct ocrdma_srq *srq;
        struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
 
        srq = get_ocrdma_srq(ibsrq);
 
-       status = ocrdma_mbx_destroy_srq(dev, srq);
+       ocrdma_mbx_destroy_srq(dev, srq);
 
        if (srq->pd->uctx)
                ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
 
        kfree(srq->idx_bit_fields);
        kfree(srq->rqe_wr_id_tbl);
-       kfree(srq);
-       return status;
 }
 
 /* unprivileged verbs and their support functions. */
 
 int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
 void ocrdma_del_flush_qp(struct ocrdma_qp *qp);
 
-struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *,
-                                struct ib_udata *);
+int ocrdma_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attr,
+                     struct ib_udata *udata);
 int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *,
                      enum ib_srq_attr_mask, struct ib_udata *);
 int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *);
-int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
+void ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
 int ocrdma_post_srq_recv(struct ib_srq *, const struct ib_recv_wr *,
                         const struct ib_recv_wr **bad_recv_wr);
 
 
 
        INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
        INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
+       INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq),
        INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
 };
 
 
        }
 }
 
-static int qedr_check_srq_params(struct ib_pd *ibpd, struct qedr_dev *dev,
+static int qedr_check_srq_params(struct qedr_dev *dev,
                                 struct ib_srq_init_attr *attrs,
                                 struct ib_udata *udata)
 {
        return rc;
 }
 
-struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
-                              struct ib_srq_init_attr *init_attr,
-                              struct ib_udata *udata)
+int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
+                   struct ib_udata *udata)
 {
        struct qed_rdma_destroy_srq_in_params destroy_in_params;
        struct qed_rdma_create_srq_in_params in_params = {};
-       struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+       struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
        struct qed_rdma_create_srq_out_params out_params;
-       struct qedr_pd *pd = get_qedr_pd(ibpd);
+       struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
        struct qedr_create_srq_ureq ureq = {};
        u64 pbl_base_addr, phy_prod_pair_addr;
        struct qedr_srq_hwq_info *hw_srq;
        u32 page_cnt, page_size;
-       struct qedr_srq *srq;
+       struct qedr_srq *srq = get_qedr_srq(ibsrq);
        int rc = 0;
 
        DP_DEBUG(dev, QEDR_MSG_QP,
                 "create SRQ called from %s (pd %p)\n",
                 (udata) ? "User lib" : "kernel", pd);
 
-       rc = qedr_check_srq_params(ibpd, dev, init_attr, udata);
+       rc = qedr_check_srq_params(dev, init_attr, udata);
        if (rc)
-               return ERR_PTR(-EINVAL);
-
-       srq = kzalloc(sizeof(*srq), GFP_KERNEL);
-       if (!srq)
-               return ERR_PTR(-ENOMEM);
+               return -EINVAL;
 
        srq->dev = dev;
        hw_srq = &srq->hw_srq;
 
        DP_DEBUG(dev, QEDR_MSG_SRQ,
                 "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
-       return &srq->ibsrq;
+       return 0;
 
 err2:
        destroy_in_params.srq_id = srq->srq_id;
        else
                qedr_free_srq_kernel_params(srq);
 err0:
-       kfree(srq);
-
-       return ERR_PTR(-EFAULT);
+       return -EFAULT;
 }
 
-int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 {
        struct qed_rdma_destroy_srq_in_params in_params = {};
        struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
        DP_DEBUG(dev, QEDR_MSG_SRQ,
                 "destroy srq: destroyed srq with srq_id=0x%0x\n",
                 srq->srq_id);
-       kfree(srq);
-
-       return 0;
 }
 
 int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 
                  int qp_attr_mask, struct ib_qp_init_attr *);
 int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
 
-struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
-                              struct ib_srq_init_attr *attr,
-                              struct ib_udata *udata);
+int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *attr,
+                   struct ib_udata *udata);
 int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
                    enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
 int qedr_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
-int qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
+void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
 int qedr_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
                       const struct ib_recv_wr **bad_recv_wr);
 int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
 
        .destroy_srq = pvrdma_destroy_srq,
        .modify_srq = pvrdma_modify_srq,
        .query_srq = pvrdma_query_srq,
+
+       INIT_RDMA_OBJ_SIZE(ib_srq, pvrdma_srq, ibsrq),
 };
 
 static int pvrdma_register_device(struct pvrdma_dev *dev)
 
  * @init_attr: shared receive queue attributes
  * @udata: user data
  *
- * @return: the ib_srq pointer on success, otherwise returns an errno.
+ * @return: 0 on success, otherwise returns an errno.
  */
-struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
-                                struct ib_srq_init_attr *init_attr,
-                                struct ib_udata *udata)
+int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
+                     struct ib_udata *udata)
 {
-       struct pvrdma_srq *srq = NULL;
-       struct pvrdma_dev *dev = to_vdev(pd->device);
+       struct pvrdma_srq *srq = to_vsrq(ibsrq);
+       struct pvrdma_dev *dev = to_vdev(ibsrq->device);
        union pvrdma_cmd_req req;
        union pvrdma_cmd_resp rsp;
        struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
        struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
-       struct pvrdma_create_srq_resp srq_resp = {0};
+       struct pvrdma_create_srq_resp srq_resp = {};
        struct pvrdma_create_srq ucmd;
        unsigned long flags;
        int ret;
                /* No support for kernel clients. */
                dev_warn(&dev->pdev->dev,
                         "no shared receive queue support for kernel client\n");
-               return ERR_PTR(-EOPNOTSUPP);
+               return -EOPNOTSUPP;
        }
 
        if (init_attr->srq_type != IB_SRQT_BASIC) {
                dev_warn(&dev->pdev->dev,
                         "shared receive queue type %d not supported\n",
                         init_attr->srq_type);
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
        }
 
        if (init_attr->attr.max_wr  > dev->dsr->caps.max_srq_wr ||
            init_attr->attr.max_sge > dev->dsr->caps.max_srq_sge) {
                dev_warn(&dev->pdev->dev,
                         "shared receive queue size invalid\n");
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
        }
 
        if (!atomic_add_unless(&dev->num_srqs, 1, dev->dsr->caps.max_srq))
-               return ERR_PTR(-ENOMEM);
-
-       srq = kmalloc(sizeof(*srq), GFP_KERNEL);
-       if (!srq) {
-               ret = -ENOMEM;
-               goto err_srq;
-       }
+               return -ENOMEM;
 
        spin_lock_init(&srq->lock);
        refcount_set(&srq->refcnt, 1);
        cmd->hdr.cmd = PVRDMA_CMD_CREATE_SRQ;
        cmd->srq_type = init_attr->srq_type;
        cmd->nchunks = srq->npages;
-       cmd->pd_handle = to_vpd(pd)->pd_handle;
+       cmd->pd_handle = to_vpd(ibsrq->pd)->pd_handle;
        cmd->attrs.max_wr = init_attr->attr.max_wr;
        cmd->attrs.max_sge = init_attr->attr.max_sge;
        cmd->attrs.srq_limit = init_attr->attr.srq_limit;
        if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
                dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
                pvrdma_destroy_srq(&srq->ibsrq, udata);
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
        }
 
-       return &srq->ibsrq;
+       return 0;
 
 err_page_dir:
        pvrdma_page_dir_cleanup(dev, &srq->pdir);
 err_umem:
        ib_umem_release(srq->umem);
 err_srq:
-       kfree(srq);
        atomic_dec(&dev->num_srqs);
 
-       return ERR_PTR(ret);
+       return ret;
 }
 
 static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
  *
  * @return: 0 for success.
  */
-int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
+void pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
 {
        struct pvrdma_srq *vsrq = to_vsrq(srq);
        union pvrdma_cmd_req req;
                         ret);
 
        pvrdma_free_srq(dev, vsrq);
-
-       return 0;
 }
 
 /**
 
                     struct ib_udata *udata);
 void pvrdma_destroy_ah(struct ib_ah *ah, u32 flags);
 
-struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
-                                struct ib_srq_init_attr *init_attr,
-                                struct ib_udata *udata);
+int pvrdma_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
+                     struct ib_udata *udata);
 int pvrdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
                      enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
 int pvrdma_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
-int pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
+void pvrdma_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
 
 struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
                               struct ib_qp_init_attr *init_attr,
 
  * @srq_init_attr: the attributes of the SRQ
  * @udata: data from libibverbs when creating a user SRQ
  *
- * Return: Allocated srq object
+ * Return: 0 on success
  */
-struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
-                             struct ib_srq_init_attr *srq_init_attr,
-                             struct ib_udata *udata)
+int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
+                  struct ib_udata *udata)
 {
-       struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
-       struct rvt_srq *srq;
+       struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
+       struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
        u32 sz;
-       struct ib_srq *ret;
+       int ret;
 
        if (srq_init_attr->srq_type != IB_SRQT_BASIC)
-               return ERR_PTR(-EOPNOTSUPP);
+               return -EOPNOTSUPP;
 
        if (srq_init_attr->attr.max_sge == 0 ||
            srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
            srq_init_attr->attr.max_wr == 0 ||
            srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
-               return ERR_PTR(-EINVAL);
-
-       srq = kzalloc_node(sizeof(*srq), GFP_KERNEL, dev->dparms.node);
-       if (!srq)
-               return ERR_PTR(-ENOMEM);
+               return -EINVAL;
 
        /*
         * Need to use vmalloc() if we want to support large #s of entries.
                vzalloc_node(sizeof(struct rvt_rwq) + srq->rq.size * sz,
                             dev->dparms.node);
        if (!srq->rq.wq) {
-               ret = ERR_PTR(-ENOMEM);
+               ret = -ENOMEM;
                goto bail_srq;
        }
 
         * See rvt_mmap() for details.
         */
        if (udata && udata->outlen >= sizeof(__u64)) {
-               int err;
                u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
 
                srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
                if (!srq->ip) {
-                       ret = ERR_PTR(-ENOMEM);
+                       ret = -ENOMEM;
                        goto bail_wq;
                }
 
-               err = ib_copy_to_udata(udata, &srq->ip->offset,
+               ret = ib_copy_to_udata(udata, &srq->ip->offset,
                                       sizeof(srq->ip->offset));
-               if (err) {
-                       ret = ERR_PTR(err);
+               if (ret)
                        goto bail_ip;
-               }
        }
 
        /*
        spin_lock(&dev->n_srqs_lock);
        if (dev->n_srqs_allocated == dev->dparms.props.max_srq) {
                spin_unlock(&dev->n_srqs_lock);
-               ret = ERR_PTR(-ENOMEM);
+               ret = -ENOMEM;
                goto bail_ip;
        }
 
                spin_unlock_irq(&dev->pending_lock);
        }
 
-       return &srq->ibsrq;
+       return 0;
 
 bail_ip:
        kfree(srq->ip);
 bail_wq:
        vfree(srq->rq.wq);
 bail_srq:
-       kfree(srq);
        return ret;
 }
 
  * rvt_destroy_srq - destory an srq
  * @ibsrq: srq object to destroy
  *
- * Return always 0
  */
-int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+void rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 {
        struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
        struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
                kref_put(&srq->ip->ref, rvt_release_mmap_info);
        else
                vfree(srq->rq.wq);
-       kfree(srq);
-
-       return 0;
 }
 
 
 #include <rdma/rdma_vt.h>
 void rvt_driver_srq_init(struct rvt_dev_info *rdi);
-struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
-                             struct ib_srq_init_attr *srq_init_attr,
-                             struct ib_udata *udata);
+int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
+                  struct ib_udata *udata);
 int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
                   enum ib_srq_attr_mask attr_mask,
                   struct ib_udata *udata);
 int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
-int rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
+void rvt_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
 
 #endif          /* DEF_RVTSRQ_H */
 
 
        INIT_RDMA_OBJ_SIZE(ib_ah, rvt_ah, ibah),
        INIT_RDMA_OBJ_SIZE(ib_pd, rvt_pd, ibpd),
+       INIT_RDMA_OBJ_SIZE(ib_srq, rvt_srq, ibsrq),
        INIT_RDMA_OBJ_SIZE(ib_ucontext, rvt_ucontext, ibucontext),
 };
 
 
        [RXE_TYPE_SRQ] = {
                .name           = "rxe-srq",
                .size           = sizeof(struct rxe_srq),
-               .flags          = RXE_POOL_INDEX,
+               .flags          = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
                .min_index      = RXE_MIN_SRQ_INDEX,
                .max_index      = RXE_MAX_SRQ_INDEX,
        },
 
        return err;
 }
 
-static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
-                                    struct ib_srq_init_attr *init,
-                                    struct ib_udata *udata)
+static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
+                         struct ib_udata *udata)
 {
        int err;
-       struct rxe_dev *rxe = to_rdev(ibpd->device);
-       struct rxe_pd *pd = to_rpd(ibpd);
-       struct rxe_srq *srq;
+       struct rxe_dev *rxe = to_rdev(ibsrq->device);
+       struct rxe_pd *pd = to_rpd(ibsrq->pd);
+       struct rxe_srq *srq = to_rsrq(ibsrq);
        struct rxe_create_srq_resp __user *uresp = NULL;
 
        if (udata) {
                if (udata->outlen < sizeof(*uresp))
-                       return ERR_PTR(-EINVAL);
+                       return -EINVAL;
                uresp = udata->outbuf;
        }
 
        if (err)
                goto err1;
 
-       srq = rxe_alloc(&rxe->srq_pool);
-       if (!srq) {
-               err = -ENOMEM;
+       err = rxe_add_to_pool(&rxe->srq_pool, &srq->pelem);
+       if (err)
                goto err1;
-       }
 
-       rxe_add_index(srq);
        rxe_add_ref(pd);
        srq->pd = pd;
 
        if (err)
                goto err2;
 
-       return &srq->ibsrq;
+       return 0;
 
 err2:
        rxe_drop_ref(pd);
-       rxe_drop_index(srq);
        rxe_drop_ref(srq);
 err1:
-       return ERR_PTR(err);
+       return err;
 }
 
 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
        return 0;
 }
 
-static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+static void rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 {
        struct rxe_srq *srq = to_rsrq(ibsrq);
 
                rxe_queue_cleanup(srq->rq.queue);
 
        rxe_drop_ref(srq->pd);
-       rxe_drop_index(srq);
        rxe_drop_ref(srq);
-
-       return 0;
 }
 
 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
 
        INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
        INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
+       INIT_RDMA_OBJ_SIZE(ib_srq, rxe_srq, ibsrq),
        INIT_RDMA_OBJ_SIZE(ib_ucontext, rxe_ucontext, ibuc),
 };
 
 
 };
 
 struct rxe_srq {
-       struct rxe_pool_entry   pelem;
        struct ib_srq           ibsrq;
+       struct rxe_pool_entry   pelem;
        struct rxe_pd           *pd;
        struct rxe_rq           rq;
        u32                     srq_num;
 
        int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
        int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
        void (*destroy_ah)(struct ib_ah *ah, u32 flags);
-       struct ib_srq *(*create_srq)(struct ib_pd *pd,
-                                    struct ib_srq_init_attr *srq_init_attr,
-                                    struct ib_udata *udata);
+       int (*create_srq)(struct ib_srq *srq,
+                         struct ib_srq_init_attr *srq_init_attr,
+                         struct ib_udata *udata);
        int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
                          enum ib_srq_attr_mask srq_attr_mask,
                          struct ib_udata *udata);
        int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
-       int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
+       void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
        struct ib_qp *(*create_qp)(struct ib_pd *pd,
                                   struct ib_qp_init_attr *qp_init_attr,
                                   struct ib_udata *udata);
 
        DECLARE_RDMA_OBJ_SIZE(ib_ah);
        DECLARE_RDMA_OBJ_SIZE(ib_pd);
+       DECLARE_RDMA_OBJ_SIZE(ib_srq);
        DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
 };