if (type >= RDMA_NL_NUM_CLIENTS)
                return false;
 
-       return (op < max_num_ops[type]) ? true : false;
+       return op < max_num_ops[type];
 }
 
 static const struct rdma_nl_cbs *
 
        qplqp->pd = &pd->qplib_pd;
        qplqp->qp_handle = (u64)qplqp;
        qplqp->max_inline_data = init_attr->cap.max_inline_data;
-       qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
-                           true : false);
+       qplqp->sig_type = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
        qptype = bnxt_re_init_qp_type(rdev, init_attr);
        if (qptype < 0) {
                rc = qptype;
        }
 
        is_eth = true;
-       is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
+       is_vlan = vlan_id && (vlan_id < 0x1000);
 
        ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
                          ip_version, is_udp, 0, &qp->qp1_hdr);
 
        cctx->modes.db_push = le32_to_cpu(resp.flags) & FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE;
 
        cctx->modes.dbr_pacing =
-               le32_to_cpu(resp.flags_ext2) & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED ?
-               true : false;
+               le32_to_cpu(resp.flags_ext2) &
+               FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED;
        return 0;
 }
 
         */
        if ((prio_map == 0 && rdev->qplib_res.prio) ||
            (prio_map != 0 && !rdev->qplib_res.prio)) {
-               rdev->qplib_res.prio = prio_map ? true : false;
-
+               rdev->qplib_res.prio = prio_map;
                bnxt_re_update_gid(rdev);
        }
 
 
        qp->state = sb->en_sqd_async_notify_state &
                        CREQ_QUERY_QP_RESP_SB_STATE_MASK;
        qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
-                                 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
-                                 true : false;
+                                 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
        qp->access = sb->access;
        qp->pkey_index = le16_to_cpu(sb->pkey);
        qp->qkey = le32_to_cpu(sb->qkey);
 
                return false;
        }
 
-       return hop_num ? true : false;
+       return hop_num;
 }
 
 static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 hem_idx,
 
        bool read_fence = false;
        u16 quanta;
 
-       info->push_wqe = qp->push_db ? true : false;
+       info->push_wqe = qp->push_db;
 
        op_info = &info->op.rdma_write;
        if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
        u16 quanta;
        u64 hdr;
 
-       info->push_wqe = qp->push_db ? true : false;
+       info->push_wqe = qp->push_db;
 
        op_info = &info->op.rdma_read;
        if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
        bool read_fence = false;
        u16 quanta;
 
-       info->push_wqe = qp->push_db ? true : false;
+       info->push_wqe = qp->push_db;
 
        op_info = &info->op.send;
        if (qp->max_sq_frag_cnt < op_info->num_sges)
        u32 i, total_size = 0;
        u16 quanta;
 
-       info->push_wqe = qp->push_db ? true : false;
+       info->push_wqe = qp->push_db;
        op_info = &info->op.rdma_write;
 
        if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
        u32 i, total_size = 0;
        u16 quanta;
 
-       info->push_wqe = qp->push_db ? true : false;
+       info->push_wqe = qp->push_db;
        op_info = &info->op.send;
 
        if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
        bool local_fence = false;
        struct ib_sge sge = {};
 
-       info->push_wqe = qp->push_db ? true : false;
+       info->push_wqe = qp->push_db;
        op_info = &info->op.inv_local_stag;
        local_fence = info->local_fence;
 
 
        refcount_set(&iwqp->refcnt, 1);
        spin_lock_init(&iwqp->lock);
        spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
-       iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
+       iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
        rf->qp_table[qp_num] = iwqp;
 
        if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
                set_ib_wc_op_sq(cq_poll_info, entry);
        } else {
                set_ib_wc_op_rq(cq_poll_info, entry,
-                               qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
-                               true : false);
+                               qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM);
                if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
                    cq_poll_info->stag_invalid_set) {
                        entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
 
        qp->sq.max_sges = attrs->cap.max_send_sge;
        qp->rq.max_sges = attrs->cap.max_recv_sge;
        qp->state = OCRDMA_QPS_RST;
-       qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
+       qp->signaled = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
 }
 
 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
 
 
        qp->prev_wqe_size = 0;
 
-       qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
+       qp->signaled = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
        qp->dev = dev;
        if (qedr_qp_has_sq(qp)) {
                qedr_reset_qp_hwq_info(&qp->sq);