static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
                        u32 wqe_idx, u32 max_sge)
 {
-       struct hns_roce_rinl_sge *sge_list;
        void *wqe = NULL;
-       u32 i;
 
        wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
        fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
-
-       /* rq support inline data */
-       if (hr_qp->rq_inl_buf.wqe_cnt) {
-               sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
-               hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt = (u32)wr->num_sge;
-               for (i = 0; i < wr->num_sge; i++) {
-                       sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
-                       sge_list[i].len = wr->sg_list[i].length;
-               }
-       }
 }
 
 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
        return 0;
 }
 
-static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
-                                       struct hns_roce_qp *qp,
-                                       struct ib_wc *wc)
-{
-       struct hns_roce_rinl_sge *sge_list;
-       u32 wr_num, wr_cnt, sge_num;
-       u32 sge_cnt, data_len, size;
-       void *wqe_buf;
-
-       wr_num = hr_reg_read(cqe, CQE_WQE_IDX);
-       wr_cnt = wr_num & (qp->rq.wqe_cnt - 1);
-
-       sge_list = qp->rq_inl_buf.wqe_list[wr_cnt].sg_list;
-       sge_num = qp->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
-       wqe_buf = hns_roce_get_recv_wqe(qp, wr_cnt);
-       data_len = wc->byte_len;
-
-       for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
-               size = min(sge_list[sge_cnt].len, data_len);
-               memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
-
-               data_len -= size;
-               wqe_buf += size;
-       }
-
-       if (unlikely(data_len)) {
-               wc->status = IB_WC_LOC_LEN_ERR;
-               return -EAGAIN;
-       }
-
-       return 0;
-}
-
 static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
                   int num_entries, struct ib_wc *wc)
 {
                wc->opcode = ib_opcode;
 }
 
-static inline bool is_rq_inl_enabled(struct ib_wc *wc, u32 hr_opcode,
-                                    struct hns_roce_v2_cqe *cqe)
-{
-       return wc->qp->qp_type != IB_QPT_UD && wc->qp->qp_type != IB_QPT_GSI &&
-              (hr_opcode == HNS_ROCE_V2_OPCODE_SEND ||
-               hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
-               hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
-              hr_reg_read(cqe, CQE_RQ_INLINE);
-}
-
 static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
 {
-       struct hns_roce_qp *qp = to_hr_qp(wc->qp);
        u32 hr_opcode;
        int ib_opcode;
-       int ret;
 
        wc->byte_len = le32_to_cpu(cqe->byte_cnt);
 
        else
                wc->opcode = ib_opcode;
 
-       if (is_rq_inl_enabled(wc, hr_opcode, cqe)) {
-               ret = hns_roce_handle_recv_inl_wqe(cqe, qp, wc);
-               if (unlikely(ret))
-                       return ret;
-       }
-
        wc->sl = hr_reg_read(cqe, CQE_SL);
        wc->src_qp = hr_reg_read(cqe, CQE_RMT_QPN);
        wc->slid = 0;
        hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H,
                     upper_32_bits(hr_qp->rdb.dma));
 
-       if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI)
-               hr_reg_write_bool(context, QPC_RQIE,
-                            hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE);
-
        hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
 
        if (ibqp->srq) {
 
        if (!has_rq) {
                hr_qp->rq.wqe_cnt = 0;
                hr_qp->rq.max_gs = 0;
-               hr_qp->rq_inl_buf.wqe_cnt = 0;
                cap->max_recv_wr = 0;
                cap->max_recv_sge = 0;
 
                                    hr_qp->rq.max_gs);
 
        hr_qp->rq.wqe_cnt = cnt;
-       if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE &&
-           hr_qp->ibqp.qp_type != IB_QPT_UD &&
-           hr_qp->ibqp.qp_type != IB_QPT_GSI)
-               hr_qp->rq_inl_buf.wqe_cnt = cnt;
-       else
-               hr_qp->rq_inl_buf.wqe_cnt = 0;
 
        cap->max_recv_wr = cnt;
        cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
        return 1;
 }
 
-static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
-                              struct ib_qp_init_attr *init_attr)
-{
-       u32 max_recv_sge = init_attr->cap.max_recv_sge;
-       u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt;
-       struct hns_roce_rinl_wqe *wqe_list;
-       int i;
-
-       /* allocate recv inline buf */
-       wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe),
-                          GFP_KERNEL);
-       if (!wqe_list)
-               goto err;
-
-       /* Allocate a continuous buffer for all inline sge we need */
-       wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge *
-                                     sizeof(struct hns_roce_rinl_sge)),
-                                     GFP_KERNEL);
-       if (!wqe_list[0].sg_list)
-               goto err_wqe_list;
-
-       /* Assign buffers of sg_list to each inline wqe */
-       for (i = 1; i < wqe_cnt; i++)
-               wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge];
-
-       hr_qp->rq_inl_buf.wqe_list = wqe_list;
-
-       return 0;
-
-err_wqe_list:
-       kfree(wqe_list);
-
-err:
-       return -ENOMEM;
-}
-
-static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
-{
-       if (hr_qp->rq_inl_buf.wqe_list)
-               kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
-       kfree(hr_qp->rq_inl_buf.wqe_list);
-}
-
 static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
                        struct ib_qp_init_attr *init_attr,
                        struct ib_udata *udata, unsigned long addr)
        struct hns_roce_buf_attr buf_attr = {};
        int ret;
 
-       if (!udata && hr_qp->rq_inl_buf.wqe_cnt) {
-               ret = alloc_rq_inline_buf(hr_qp, init_attr);
-               if (ret) {
-                       ibdev_err(ibdev,
-                                 "failed to alloc inline buf, ret = %d.\n",
-                                 ret);
-                       return ret;
-               }
-       } else {
-               hr_qp->rq_inl_buf.wqe_list = NULL;
-       }
-
        ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr);
        if (ret) {
                ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret);
        return 0;
 
 err_inline:
-       free_rq_inline_buf(hr_qp);
 
        return ret;
 }
 static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
 {
        hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr);
-       free_rq_inline_buf(hr_qp);
 }
 
 static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,