ah->qplib_ah.flow_label = grh->flow_label;
        ah->qplib_ah.hop_limit = grh->hop_limit;
        ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
-       if (ib_pd->uobject &&
+       if (udata &&
            !rdma_is_multicast_addr((struct in6_addr *)
                                    grh->dgid.raw) &&
            !rdma_link_local_addr((struct in6_addr *)
        }
 
        /* Write AVID to shared page. */
-       if (ib_pd->uobject) {
+       if (udata) {
                struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
                struct bnxt_re_ucontext *uctx;
                unsigned long flag;
 
         * Kernel users need more wq space for fastreg WRs which can take
         * 2 WR fragments.
         */
-       ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL;
+       ucontext = udata ? to_iwch_ucontext(pd->uobject->context) : NULL;
        if (!ucontext && wqsize < (rqsize + (2 * sqsize)))
                wqsize = roundup_pow_of_two(rqsize +
                                roundup_pow_of_two(attrs->cap.max_send_wr * 2));
 
        if (sqsize < 8)
                sqsize = 8;
 
-       ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
+       ucontext = udata ? to_c4iw_ucontext(pd->uobject->context) : NULL;
 
        qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
        if (!qhp)
        rqsize = attrs->attr.max_wr + 1;
        rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16));
 
-       ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
+       ucontext = udata ? to_c4iw_ucontext(pd->uobject->context) : NULL;
 
        srq = kzalloc(sizeof(*srq), GFP_KERNEL);
        if (!srq)
 
        struct hns_roce_qp_work *qp_work;
        struct hns_roce_v1_priv *priv;
        struct hns_roce_cq *send_cq, *recv_cq;
-       int is_user = !!ibqp->pd->uobject;
+       bool is_user = ibqp->uobject;
        int is_timeout = 0;
        int ret;
 
 
 
 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
                                         struct hns_roce_qp *hr_qp,
-                                        int is_user)
+                                        bool is_user)
 {
        struct hns_roce_cq *send_cq, *recv_cq;
        struct device *dev = hr_dev->dev;
        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
        int ret;
 
-       ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, !!ibqp->pd->uobject);
+       ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, ibqp->uobject);
        if (ret) {
                dev_err(hr_dev->dev, "Destroy qp failed(%d)\n", ret);
                return ret;
 
 EXPORT_SYMBOL_GPL(hns_roce_release_range_qp);
 
 static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
-                               struct ib_qp_cap *cap, int is_user, int has_rq,
+                               struct ib_qp_cap *cap, bool is_user, int has_rq,
                                struct hns_roce_qp *hr_qp)
 {
        struct device *dev = hr_dev->dev;
        else
                hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
 
-       ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
+       ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, udata,
                                   hns_roce_qp_has_rq(init_attr), hr_qp);
        if (ret) {
                dev_err(dev, "hns_roce_set_rq_size failed\n");
                                init_attr->cap.max_recv_sge];
        }
 
-       if (ib_pd->uobject) {
+       if (udata) {
                if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
                        dev_err(dev, "ib_copy_from_udata error for create qp\n");
                        ret = -EFAULT;
        else
                hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
 
-       if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) &&
+       if (udata && (udata->outlen >= sizeof(resp)) &&
                (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) {
 
                /* indicate kernel supports rq record db */
                hns_roce_release_range_qp(hr_dev, qpn, 1);
 
 err_wrid:
-       if (ib_pd->uobject) {
+       if (udata) {
                if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
                    (udata->outlen >= sizeof(resp)) &&
                    hns_roce_qp_has_rq(init_attr))
        }
 
 err_sq_dbmap:
-       if (ib_pd->uobject)
+       if (udata)
                if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
                    (udata->inlen >= sizeof(ucmd)) &&
                    (udata->outlen >= sizeof(resp)) &&
        hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
 
 err_buf:
-       if (ib_pd->uobject)
+       if (hr_qp->umem)
                ib_umem_release(hr_qp->umem);
        else
                hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
 
 err_db:
-       if (!ib_pd->uobject && hns_roce_qp_has_rq(init_attr) &&
+       if (!udata && hns_roce_qp_has_rq(init_attr) &&
            (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
                hns_roce_free_db(hr_dev, &hr_qp->rdb);
 
        }
        case IB_QPT_GSI: {
                /* Userspace is not allowed to create special QPs: */
-               if (pd->uobject) {
+               if (udata) {
                        dev_err(dev, "not support usr space GSI\n");
                        return ERR_PTR(-EINVAL);
                }
 
        srq->event = hns_roce_ib_srq_event;
        srq->ibsrq.ext.xrc.srq_num = srq->srqn;
 
-       if (pd->uobject) {
+       if (udata) {
                if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) {
                        ret = -EFAULT;
                        goto err_wrid;
 
                        goto error;
                }
                iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
-               if (ibpd->uobject && ibpd->uobject->context) {
-                       iwqp->user_mode = 1;
-                       ucontext = to_ucontext(ibpd->uobject->context);
-
-                       if (req.user_wqe_buffers) {
-                               struct i40iw_pbl *iwpbl;
-
-                               spin_lock_irqsave(
-                                   &ucontext->qp_reg_mem_list_lock, flags);
-                               iwpbl = i40iw_get_pbl(
-                                   (unsigned long)req.user_wqe_buffers,
-                                   &ucontext->qp_reg_mem_list);
-                               spin_unlock_irqrestore(
-                                   &ucontext->qp_reg_mem_list_lock, flags);
-
-                               if (!iwpbl) {
-                                       err_code = -ENODATA;
-                                       i40iw_pr_err("no pbl info\n");
-                                       goto error;
-                               }
-                               memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
+               iwqp->user_mode = 1;
+               ucontext = to_ucontext(ibpd->uobject->context);
+
+               if (req.user_wqe_buffers) {
+                       struct i40iw_pbl *iwpbl;
+
+                       spin_lock_irqsave(
+                           &ucontext->qp_reg_mem_list_lock, flags);
+                       iwpbl = i40iw_get_pbl(
+                           (unsigned long)req.user_wqe_buffers,
+                           &ucontext->qp_reg_mem_list);
+                       spin_unlock_irqrestore(
+                           &ucontext->qp_reg_mem_list_lock, flags);
+
+                       if (!iwpbl) {
+                               err_code = -ENODATA;
+                               i40iw_pr_err("no pbl info\n");
+                               goto error;
                        }
+                       memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
                }
                err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
        } else {
        iwdev->qp_table[qp_num] = iwqp;
        i40iw_add_pdusecount(iwqp->iwpd);
        i40iw_add_devusecount(iwdev);
-       if (ibpd->uobject && udata) {
+       if (udata) {
                memset(&uresp, 0, sizeof(uresp));
                uresp.actual_sq_size = sq_size;
                uresp.actual_rq_size = rq_size;
                ib_umem_release(iwmr->region);
 
        if (iwmr->type != IW_MEMREG_TYPE_MEM) {
-               if (ibpd->uobject) {
+               /* region is released. only test for userness. */
+               if (iwmr->region) {
                        struct i40iw_ucontext *ucontext;
 
                        ucontext = to_ucontext(ibpd->uobject->context);
 
 }
 
 static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
-                      int is_user, int has_rq, struct mlx4_ib_qp *qp,
+                      bool is_user, int has_rq, struct mlx4_ib_qp *qp,
                       u32 inl_recv_sz)
 {
        /* Sanity check RQ size before proceeding */
                qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
 
 
-       if (pd->uobject) {
+       if (udata) {
                union {
                        struct mlx4_ib_create_qp qp;
                        struct mlx4_ib_create_wq wq;
                        qp->flags |= MLX4_IB_QP_SCATTER_FCS;
                }
 
-               err = set_rq_size(dev, &init_attr->cap, !!pd->uobject,
+               err = set_rq_size(dev, &init_attr->cap, udata,
                                  qp_has_rq(init_attr), qp, qp->inl_recv_sz);
                if (err)
                        goto err;
                }
                qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS;
        } else {
-               err = set_rq_size(dev, &init_attr->cap, !!pd->uobject,
+               err = set_rq_size(dev, &init_attr->cap, udata,
                                  qp_has_rq(init_attr), qp, 0);
                if (err)
                        goto err;
        if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
                free_proxy_bufs(pd->device, qp);
 err_wrid:
-       if (pd->uobject) {
+       if (udata) {
                if (qp_has_rq(init_attr))
                        mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &qp->db);
        } else {
        mlx4_mtt_cleanup(dev->dev, &qp->mtt);
 
 err_buf:
-       if (pd->uobject)
+       if (qp->umem)
                ib_umem_release(qp->umem);
        else
                mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
 
 err_db:
-       if (!pd->uobject && qp_has_rq(init_attr))
+       if (!udata && qp_has_rq(init_attr))
                mlx4_db_free(dev->dev, &qp->db);
 
 err:
 }
 
 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
-                             enum mlx4_ib_source_type src, int is_user)
+                             enum mlx4_ib_source_type src, bool is_user)
 {
        struct mlx4_ib_cq *send_cq, *recv_cq;
        unsigned long flags;
                struct mlx4_ib_pd *pd;
 
                pd = get_pd(mqp);
-               destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, !!pd->ibpd.uobject);
+               destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, qp->uobject);
        }
 
        if (is_sqp(dev, mqp))
        struct mlx4_ib_create_wq ucmd;
        int err, required_cmd_sz;
 
-       if (!(udata && pd->uobject))
+       if (!udata)
                return ERR_PTR(-EINVAL);
 
        required_cmd_sz = offsetof(typeof(ucmd), comp_mask) +
 
 
        buf_size = srq->msrq.max * desc_size;
 
-       if (pd->uobject) {
+       if (udata) {
                struct mlx4_ib_create_srq ucmd;
 
                if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
        srq->msrq.event = mlx4_ib_srq_event;
        srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
 
-       if (pd->uobject)
+       if (udata)
                if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
                        err = -EFAULT;
                        goto err_wrid;
        return &srq->ibsrq;
 
 err_wrid:
-       if (pd->uobject)
+       if (udata)
                mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
        else
                kvfree(srq->wrid);
        mlx4_mtt_cleanup(dev->dev, &srq->mtt);
 
 err_buf:
-       if (pd->uobject)
+       if (srq->umem)
                ib_umem_release(srq->umem);
        else
                mlx4_buf_free(dev->dev, buf_size, &srq->buf);
 
 err_db:
-       if (!pd->uobject)
+       if (!udata)
                mlx4_db_free(dev->dev, &srq->db);
 
 err_srq:
 
                qp->flags |= MLX5_IB_QP_CVLAN_STRIPPING;
        }
 
-       if (pd && pd->uobject) {
+       if (udata) {
                if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
                        mlx5_ib_dbg(dev, "copy failed\n");
                        return -EFAULT;
 
        qp->has_rq = qp_has_rq(init_attr);
        err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
-                         qp, (pd && pd->uobject) ? &ucmd : NULL);
+                         qp, udata ? &ucmd : NULL);
        if (err) {
                mlx5_ib_dbg(dev, "err %d\n", err);
                return err;
        }
 
        if (pd) {
-               if (pd->uobject) {
+               if (udata) {
                        __u32 max_wqes =
                                1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
                        mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
        if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
                configure_responder_scat_cqe(init_attr, qpc);
                configure_requester_scat_cqe(dev, init_attr,
-                                            (pd && pd->uobject) ? &ucmd : NULL,
+                                            udata ? &ucmd : NULL,
                                             qpc);
        }
 
                dev = to_mdev(pd->device);
 
                if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
-                       if (!pd->uobject) {
+                       if (!udata) {
                                mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n");
                                return ERR_PTR(-EINVAL);
                        } else if (!to_mucontext(pd->uobject->context)->cqe_version) {
 
        }
        in.type = init_attr->srq_type;
 
-       if (pd->uobject)
+       if (udata)
                err = create_srq_user(pd, srq, &in, udata, buf_size);
        else
                err = create_srq_kernel(dev, srq, &in, buf_size);
 
        if (err) {
                mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
-                            pd->uobject ? "user" : "kernel", err);
+                            udata ? "user" : "kernel", err);
                goto err_srq;
        }
 
        srq->msrq.event = mlx5_ib_srq_event;
        srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
 
-       if (pd->uobject)
+       if (udata)
                if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
                        mlx5_ib_dbg(dev, "copy to user failed\n");
                        err = -EFAULT;
        mlx5_cmd_destroy_srq(dev, &srq->msrq);
 
 err_usr_kern_srq:
-       if (pd->uobject)
+       if (udata)
                destroy_srq_user(pd, srq);
        else
                destroy_srq_kernel(dev, srq);
 
 void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe);
 
 int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
-                   struct ib_srq_attr *attr, struct mthca_srq *srq);
+                   struct ib_srq_attr *attr, struct mthca_srq *srq,
+                   struct ib_udata *udata);
 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
 int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
                     enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
                   enum ib_qp_type type,
                   enum ib_sig_type send_policy,
                   struct ib_qp_cap *cap,
-                  struct mthca_qp *qp);
+                  struct mthca_qp *qp,
+                  struct ib_udata *udata);
 int mthca_alloc_sqp(struct mthca_dev *dev,
                    struct mthca_pd *pd,
                    struct mthca_cq *send_cq,
                    struct ib_qp_cap *cap,
                    int qpn,
                    int port,
-                   struct mthca_sqp *sqp);
+                   struct mthca_sqp *sqp,
+                   struct ib_udata *udata);
 void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp);
 int mthca_create_ah(struct mthca_dev *dev,
                    struct mthca_pd *pd,
 
        if (!srq)
                return ERR_PTR(-ENOMEM);
 
-       if (pd->uobject) {
+       if (udata) {
                context = to_mucontext(pd->uobject->context);
 
                if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
        }
 
        err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
-                             &init_attr->attr, srq);
+                             &init_attr->attr, srq, udata);
 
-       if (err && pd->uobject)
+       if (err && udata)
                mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
                                    context->db_tab, ucmd.db_index);
 
                if (!qp)
                        return ERR_PTR(-ENOMEM);
 
-               if (pd->uobject) {
+               if (udata) {
                        context = to_mucontext(pd->uobject->context);
 
                        if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
                                     to_mcq(init_attr->send_cq),
                                     to_mcq(init_attr->recv_cq),
                                     init_attr->qp_type, init_attr->sq_sig_type,
-                                    &init_attr->cap, qp);
+                                    &init_attr->cap, qp, udata);
 
-               if (err && pd->uobject) {
+               if (err && udata) {
                        context = to_mucontext(pd->uobject->context);
 
                        mthca_unmap_user_db(to_mdev(pd->device),
        case IB_QPT_GSI:
        {
                /* Don't allow userspace to create special QPs */
-               if (pd->uobject)
+               if (udata)
                        return ERR_PTR(-EINVAL);
 
                qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
                                      to_mcq(init_attr->recv_cq),
                                      init_attr->sq_sig_type, &init_attr->cap,
                                      qp->ibqp.qp_num, init_attr->port_num,
-                                     to_msqp(qp));
+                                     to_msqp(qp), udata);
                break;
        }
        default:
 
  */
 static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
                               struct mthca_pd *pd,
-                              struct mthca_qp *qp)
+                              struct mthca_qp *qp,
+                              struct ib_udata *udata)
 {
        int size;
        int err = -ENOMEM;
         * allocate anything.  All we need is to calculate the WQE
         * sizes and the send_wqe_offset, so we're done now.
         */
-       if (pd->ibpd.uobject)
+       if (udata)
                return 0;
 
        size = PAGE_ALIGN(qp->send_wqe_offset +
                                 struct mthca_cq *send_cq,
                                 struct mthca_cq *recv_cq,
                                 enum ib_sig_type send_policy,
-                                struct mthca_qp *qp)
+                                struct mthca_qp *qp,
+                                struct ib_udata *udata)
 {
        int ret;
        int i;
        if (ret)
                return ret;
 
-       ret = mthca_alloc_wqe_buf(dev, pd, qp);
+       ret = mthca_alloc_wqe_buf(dev, pd, qp, udata);
        if (ret) {
                mthca_unmap_memfree(dev, qp);
                return ret;
         * will be allocated and buffers will be initialized in
         * userspace.
         */
-       if (pd->ibpd.uobject)
+       if (udata)
                return 0;
 
        ret = mthca_alloc_memfree(dev, qp);
                   enum ib_qp_type type,
                   enum ib_sig_type send_policy,
                   struct ib_qp_cap *cap,
-                  struct mthca_qp *qp)
+                  struct mthca_qp *qp,
+                  struct ib_udata *udata)
 {
        int err;
 
        qp->port = 0;
 
        err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
-                                   send_policy, qp);
+                                   send_policy, qp, udata);
        if (err) {
                mthca_free(&dev->qp_table.alloc, qp->qpn);
                return err;
                    struct ib_qp_cap *cap,
                    int qpn,
                    int port,
-                   struct mthca_sqp *sqp)
+                   struct mthca_sqp *sqp,
+                   struct ib_udata *udata)
 {
        u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
        int err;
        sqp->qp.transport = MLX;
 
        err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
-                                   send_policy, &sqp->qp);
+                                   send_policy, &sqp->qp, udata);
        if (err)
                goto err_out_free;
 
 
 static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
                                         struct mthca_pd *pd,
                                         struct mthca_srq *srq,
-                                        struct mthca_tavor_srq_context *context)
+                                        struct mthca_tavor_srq_context *context,
+                                        bool is_user)
 {
        memset(context, 0, sizeof *context);
 
        context->state_pd    = cpu_to_be32(pd->pd_num);
        context->lkey        = cpu_to_be32(srq->mr.ibmr.lkey);
 
-       if (pd->ibpd.uobject)
+       if (is_user)
                context->uar =
                        cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
        else
 static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
                                         struct mthca_pd *pd,
                                         struct mthca_srq *srq,
-                                        struct mthca_arbel_srq_context *context)
+                                        struct mthca_arbel_srq_context *context,
+                                        bool is_user)
 {
        int logsize, max;
 
        context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
        context->db_index = cpu_to_be32(srq->db_index);
        context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
-       if (pd->ibpd.uobject)
+       if (is_user)
                context->logstride_usrpage |=
                        cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
        else
 }
 
 static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
-                              struct mthca_srq *srq)
+                              struct mthca_srq *srq, struct ib_udata *udata)
 {
        struct mthca_data_seg *scatter;
        void *wqe;
        int err;
        int i;
 
-       if (pd->ibpd.uobject)
+       if (udata)
                return 0;
 
        srq->wrid = kmalloc_array(srq->max, sizeof(u64), GFP_KERNEL);
 }
 
 int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
-                   struct ib_srq_attr *attr, struct mthca_srq *srq)
+                   struct ib_srq_attr *attr, struct mthca_srq *srq,
+                   struct ib_udata *udata)
 {
        struct mthca_mailbox *mailbox;
        int ds;
                if (err)
                        goto err_out;
 
-               if (!pd->ibpd.uobject) {
+               if (!udata) {
                        srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
                                                       srq->srqn, &srq->db);
                        if (srq->db_index < 0) {
                goto err_out_db;
        }
 
-       err = mthca_alloc_srq_buf(dev, pd, srq);
+       err = mthca_alloc_srq_buf(dev, pd, srq, udata);
        if (err)
                goto err_out_mailbox;
 
        mutex_init(&srq->mutex);
 
        if (mthca_is_memfree(dev))
-               mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
+               mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf, udata);
        else
-               mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
+               mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf, udata);
 
        err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn);
 
                mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
 
 err_out_free_buf:
-       if (!pd->ibpd.uobject)
+       if (!udata)
                mthca_free_srq_buf(dev, srq);
 
 err_out_mailbox:
        mthca_free_mailbox(dev, mailbox);
 
 err_out_db:
-       if (!pd->ibpd.uobject && mthca_is_memfree(dev))
+       if (!udata && mthca_is_memfree(dev))
                mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
 
 err_out_icm:
 
                                }
                                if (req.user_qp_buffer)
                                        nesqp->nesuqp_addr = req.user_qp_buffer;
-                               if ((ibpd->uobject) && (ibpd->uobject->context)) {
+                               if (udata && (ibpd->uobject->context)) {
                                        nesqp->user_mode = 1;
                                        nes_ucontext = to_nesucontext(ibpd->uobject->context);
                                        if (virt_wqs) {
 
                        nes_put_cqp_request(nesdev, cqp_request);
 
-                       if (ibpd->uobject) {
+                       if (udata) {
                                uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index;
                                uresp.mmap_rq_db_index = 0;
                                uresp.actual_sq_size = sq_size;
 
 }
 
 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
-                                 struct ib_qp_init_attr *attrs)
+                                 struct ib_qp_init_attr *attrs,
+                                 struct ib_udata *udata)
 {
        if ((attrs->qp_type != IB_QPT_GSI) &&
            (attrs->qp_type != IB_QPT_RC) &&
                return -EINVAL;
        }
        /* unprivileged user space cannot create special QP */
-       if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
+       if (udata && attrs->qp_type == IB_QPT_GSI) {
                pr_err
                    ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
                     __func__, dev->id, attrs->qp_type);
        struct ocrdma_create_qp_ureq ureq;
        u16 dpp_credit_lmt, dpp_offset;
 
-       status = ocrdma_check_qp_params(ibpd, dev, attrs);
+       status = ocrdma_check_qp_params(ibpd, dev, attrs, udata);
        if (status)
                goto gen_err;
 
 
 }
 
 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
-                              struct ib_qp_init_attr *attrs)
+                              struct ib_qp_init_attr *attrs,
+                              struct ib_udata *udata)
 {
        struct qedr_device_attr *qattr = &dev->attr;
 
        }
 
        /* Unprivileged user space cannot create special QP */
-       if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
+       if (udata && attrs->qp_type == IB_QPT_GSI) {
                DP_ERR(dev,
                       "create qp: userspace can't create special QPs of type=0x%x\n",
                       attrs->qp_type);
        in_params.srq_id = srq->srq_id;
        dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
 
-       if (ibsrq->pd->uobject)
+       if (ibsrq->uobject)
                qedr_free_srq_user_params(srq);
        else
                qedr_free_srq_kernel_params(srq);
        DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
                 udata ? "user library" : "kernel", pd);
 
-       rc = qedr_check_qp_attrs(ibpd, dev, attrs);
+       rc = qedr_check_qp_attrs(ibpd, dev, attrs, udata);
        if (rc)
                return ERR_PTR(rc);
 
 
                init_completion(&qp->free);
 
                qp->state = IB_QPS_RESET;
-               qp->is_kernel = !(pd->uobject && udata);
+               qp->is_kernel = !udata;
 
                if (!qp->is_kernel) {
                        dev_dbg(&dev->pdev->dev,
 
        unsigned long flags;
        int ret;
 
-       if (!(pd->uobject && udata)) {
+       if (!udata) {
                /* No support for kernel clients. */
                dev_warn(&dev->pdev->dev,
                         "no shared receive queue support for kernel client\n");
 
 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
                     struct ib_qp_init_attr *init,
                     struct rxe_create_qp_resp __user *uresp,
-                    struct ib_pd *ibpd);
+                    struct ib_pd *ibpd, struct ib_udata *udata);
 
 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
 
 
 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
                     struct ib_qp_init_attr *init,
                     struct rxe_create_qp_resp __user *uresp,
-                    struct ib_pd *ibpd)
+                    struct ib_pd *ibpd,
+                    struct ib_udata *udata)
 {
        int err;
        struct rxe_cq *rcq = to_rcq(init->recv_cq);
        struct rxe_cq *scq = to_rcq(init->send_cq);
        struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
-       struct ib_ucontext *context = ibpd->uobject ? ibpd->uobject->context : NULL;
+       struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
 
        rxe_add_ref(pd);
        rxe_add_ref(rcq);
 
 
        rxe_add_index(qp);
 
-       err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd);
+       err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
        if (err)
                goto err3;