case CHIP_NAVI10:
        case CHIP_NAVI14:
        case CHIP_NAVI12:
-#endif
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        case CHIP_RENOIR:
 #endif
                return amdgpu_dc != 0;
 
 config DRM_AMD_DC_DCN1_0
        def_bool n
        help
-         RV and NV family support for display engine
-
-config DRM_AMD_DC_DCN2_1
-       bool "DCN 2.1 family"
-       depends on DRM_AMD_DC && X86
-       help
-         Choose this option if you want to have
-         Renoir support for display engine
-
-config DRM_AMD_DC_DSC_SUPPORT
-       bool "DSC support"
-       default y
-       depends on DRM_AMD_DC && X86
-       depends on DRM_AMD_DC_DCN1_0
-       help
-         Choose this option if you want to have
-         Dynamic Stream Compression support
+         Raven, Navi and Renoir family support for display engine
 
 config DRM_AMD_DC_HDCP
        bool "Enable HDCP support in DC"
 
        case CHIP_NAVI12:
        case CHIP_NAVI10:
        case CHIP_NAVI14:
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        case CHIP_RENOIR:
-#endif
                if (dcn10_register_irq_handlers(dm->adev)) {
                        DRM_ERROR("DM: Failed to initialize IRQ\n");
                        goto fail;
                adev->mode_info.num_hpd = 5;
                adev->mode_info.num_dig = 5;
                break;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        case CHIP_RENOIR:
                adev->mode_info.num_crtc = 4;
                adev->mode_info.num_hpd = 4;
                adev->mode_info.num_dig = 4;
                break;
-#endif
        default:
                DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
                return -EINVAL;
            adev->asic_type == CHIP_NAVI10 ||
            adev->asic_type == CHIP_NAVI14 ||
            adev->asic_type == CHIP_NAVI12 ||
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
            adev->asic_type == CHIP_RENOIR ||
-#endif
            adev->asic_type == CHIP_RAVEN) {
                /* Fill GFX9 params */
                tiling_info->gfx9.num_pipes =
 
        return PP_SMU_RESULT_FAIL;
 }
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
 enum pp_smu_status pp_rn_get_dpm_clock_table(
                struct pp_smu *pp, struct dpm_clocks *clock_table)
 {
 
        return PP_SMU_RESULT_OK;
 }
-#endif
 
 void dm_pp_get_funcs(
                struct dc_context *ctx,
                funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
                break;
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
        case DCN_VERSION_2_1:
                funcs->ctx.ver = PP_SMU_VER_RN;
                funcs->rn_funcs.pp_smu.dm = ctx;
                funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;
                funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;
                break;
-#endif
        default:
                DRM_ERROR("smu version is not supported !\n");
                break;
 
 DC_LIBS += dcn20
 DC_LIBS += dsc
 DC_LIBS += dcn10 dml
-endif
-
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
 DC_LIBS += dcn21
 endif
 
 
        case DCN_VERSION_2_0:
                *h = dal_cmd_tbl_helper_dce112_get_table2();
                return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        case DCN_VERSION_2_1:
                *h = dal_cmd_tbl_helper_dce112_get_table2();
                return true;
-#endif
        case DCE_VERSION_12_0:
        case DCE_VERSION_12_1:
                *h = dal_cmd_tbl_helper_dce112_get_table2();
 
 AMD_DAL_CLK_MGR_DCN20 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn20/,$(CLK_MGR_DCN20))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20)
-endif
 
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
 ###############################################################################
 # DCN21
 ###############################################################################
 
 #include "dcn10/rv1_clk_mgr.h"
 #include "dcn10/rv2_clk_mgr.h"
 #include "dcn20/dcn20_clk_mgr.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #include "dcn21/rn_clk_mgr.h"
-#endif
 
 
 int clk_mgr_helper_get_active_display_cnt(
 
 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
        case FAMILY_RV:
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
                if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
                        rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
                        break;
                }
-#endif /* DCN2_1 */
                if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
                        rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
                        break;
 
        if (!dc->clk_mgr)
                goto fail;
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
        if (dc->res_pool->funcs->update_bw_bounding_box)
                dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
-#endif
 
        /* Creation of current_state must occur after dc->dml
         * is initialized in dc_create_resource_pool because
 
 #include "dcn10/dcn10_resource.h"
 #endif
 #include "dcn20/dcn20_resource.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #include "dcn21/dcn21_resource.h"
-#endif
 #include "dce120/dce120_resource.h"
 
 #define DC_LOGGER_INIT(logger)
                dc_version = DCN_VERSION_1_0;
                if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev))
                        dc_version = DCN_VERSION_1_01;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
                if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev))
                        dc_version = DCN_VERSION_2_1;
-#endif
                break;
 #endif
 
        case DCN_VERSION_2_0:
                res_pool = dcn20_create_resource_pool(init_data, dc);
                break;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        case DCN_VERSION_2_1:
                res_pool = dcn21_create_resource_pool(init_data, dc);
                break;
-#endif
 #endif
 
        default:
 
        bool dmub_command_table; /* for testing only */
        struct dc_bw_validation_profile bw_val_profile;
        bool disable_fec;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
        bool disable_48mhz_pwrdwn;
-#endif
        /* This forces a hard min on the DCFCLK requested to SMU/PP
         * watermarks are not affected.
         */
 
                SRII(PIXEL_RATE_CNTL, OTG, 4),\
                SRII(PIXEL_RATE_CNTL, OTG, 5)
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #define CS_COMMON_REG_LIST_DCN2_1(index, pllid) \
                SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
                SRII(PHASE, DP_DTO, 0),\
                SRII(PIXEL_RATE_CNTL, OTG, 1),\
                SRII(PIXEL_RATE_CNTL, OTG, 2),\
                SRII(PIXEL_RATE_CNTL, OTG, 3)
-#endif
 
 #define CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\
        CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\
 
        return status;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 static bool dcn21_dmcu_init(struct dmcu *dmcu)
 {
        struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
 
        return dcn10_dmcu_init(dmcu);
 }
-#endif
 
 static bool dcn10_dmcu_load_iram(struct dmcu *dmcu,
                unsigned int start_offset,
        .unlock_phy = dcn20_unlock_phy
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 static const struct dmcu_funcs dcn21_funcs = {
        .dmcu_init = dcn21_dmcu_init,
        .load_iram = dcn10_dmcu_load_iram,
        .unlock_phy = dcn20_unlock_phy
 };
 #endif
-#endif
 
 static void dce_dmcu_construct(
        struct dce_dmcu *dmcu_dce,
        return &dmcu_dce->base;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 struct dmcu *dcn21_dmcu_create(
        struct dc_context *ctx,
        const struct dce_dmcu_registers *regs,
        return &dmcu_dce->base;
 }
 #endif
-#endif
 
 void dce_dmcu_destroy(struct dmcu **dmcu)
 {
 
        const struct dce_dmcu_shift *dmcu_shift,
        const struct dce_dmcu_mask *dmcu_mask);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 struct dmcu *dcn21_dmcu_create(
        struct dc_context *ctx,
        const struct dce_dmcu_registers *regs,
        const struct dce_dmcu_shift *dmcu_shift,
        const struct dce_dmcu_mask *dmcu_mask);
-#endif
 
 void dce_dmcu_destroy(struct dmcu **dmcu);
 
 
        SR(DC_IP_REQUEST_CNTL), \
        BL_REG_LIST()
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #define HWSEQ_DCN21_REG_LIST()\
        HWSEQ_DCN_REG_LIST(), \
        HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \
        SR(D6VGA_CONTROL), \
        SR(DC_IP_REQUEST_CNTL), \
        BL_REG_LIST()
-#endif
 
 struct dce_hwseq_registers {
 
        HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
        HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #define HWSEQ_DCN21_MASK_SH_LIST(mask_sh)\
        HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
        HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
        HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \
        HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
        HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
-#endif
 
 #define HWSEQ_REG_FIELD_LIST(type) \
        type DCFE_CLOCK_ENABLE; \
 
        uint32_t DCN_VM_AGP_BASE;
        uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB;
        uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_A;
        uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_B;
        uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_C;
        uint32_t DCHVM_CLK_CTRL;
        uint32_t DCHVM_RIOMMU_CTRL0;
        uint32_t DCHVM_RIOMMU_STAT0;
-#endif
 };
 
 /* set field name */
                type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\
                type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #define HUBBUB_HVM_REG_FIELD_LIST(type) \
                type DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD;\
                type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A;\
                type HOSTVM_POWERSTATUS; \
                type RIOMMU_ACTIVE; \
                type HOSTVM_PREFETCH_DONE
-#endif
 
 struct dcn_hubbub_shift {
        DCN_HUBBUB_REG_FIELD_LIST(uint8_t);
        HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        HUBBUB_HVM_REG_FIELD_LIST(uint8_t);
-#endif
 };
 
 struct dcn_hubbub_mask {
        DCN_HUBBUB_REG_FIELD_LIST(uint32_t);
        HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        HUBBUB_HVM_REG_FIELD_LIST(uint32_t);
-#endif
 };
 
 struct dc;
 
        int i;
        bool allow_self_fresh_force_enable = true;
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        if (dc->hwss.s0i3_golden_init_wa && dc->hwss.s0i3_golden_init_wa(dc))
                return;
-#endif
        if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
                allow_self_fresh_force_enable =
                                dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
 
        uint32_t VMID_SETTINGS_0
 
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \
        DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \
        uint32_t FLIP_PARAMETERS_3;\
        uint32_t FLIP_PARAMETERS_6;\
        uint32_t VBLANK_PARAMETERS_5;\
        uint32_t VBLANK_PARAMETERS_6
-#endif
 
 #define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \
        DCN_HUBP_REG_FIELD_BASE_LIST(type); \
        type SURFACE_TRIPLE_BUFFER_ENABLE;\
        type VMID
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
 #define DCN21_HUBP_REG_FIELD_VARIABLE_LIST(type) \
        DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type);\
        type REFCYC_PER_VM_GROUP_FLIP;\
        type REFCYC_PER_PTE_GROUP_FLIP_C; \
        type REFCYC_PER_META_CHUNK_FLIP_C; \
        type VM_GROUP_SIZE
-#endif
 
 
 struct dcn_hubp2_registers {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        DCN21_HUBP_REG_COMMON_VARIABLE_LIST;
-#else
-       DCN2_HUBP_REG_COMMON_VARIABLE_LIST;
-#endif
 };
 
 struct dcn_hubp2_shift {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
-#else
-       DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
-#endif
 };
 
 struct dcn_hubp2_mask {
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
-#else
-       DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
-#endif
 };
 
 struct dcn20_hubp {
 
        context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
 
        if (vlevel < 2) {
                pipes[0].clks_cfg.voltage = 2;
        context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
 
        if (vlevel < 3) {
                pipes[0].clks_cfg.voltage = 3;
        context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
 
        pipes[0].clks_cfg.voltage = vlevel;
        pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
        context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-#endif
 }
 
 void dcn20_calculate_dlg_params(
 
        wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
        wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
        wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
        wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
        wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
-#endif
        dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
 
 }
 
        PP_SMU_UNSUPPORTED,
        PP_SMU_VER_RV,
        PP_SMU_VER_NV,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        PP_SMU_VER_RN,
-#endif
 
        PP_SMU_VER_MAX
 };
        union {
                struct pp_smu_funcs_rv rv_funcs;
                struct pp_smu_funcs_nv nv_funcs;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
                struct pp_smu_funcs_rn rn_funcs;
-#endif
 
        };
 };
 
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
-endif
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
 endif
 ifdef CONFIG_DRM_AMD_DC_DCN1_0
 DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
 DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
-endif
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
 DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
 endif
 
 
 #include "dcn20/display_rq_dlg_calc_20.h"
 #include "dcn20/display_mode_vba_20v2.h"
 #include "dcn20/display_rq_dlg_calc_20v2.h"
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
 #include "dcn21/display_mode_vba_21.h"
 #include "dcn21/display_rq_dlg_calc_21.h"
-#endif
 
 const struct dml_funcs dml20_funcs = {
        .validate = dml20_ModeSupportAndSystemConfigurationFull,
        .rq_dlg_get_rq_reg = dml20v2_rq_dlg_get_rq_reg
 };
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
 const struct dml_funcs dml21_funcs = {
         .validate = dml21_ModeSupportAndSystemConfigurationFull,
         .recalculate = dml21_recalculate,
         .rq_dlg_get_dlg_reg = dml21_rq_dlg_get_dlg_reg,
         .rq_dlg_get_rq_reg = dml21_rq_dlg_get_rq_reg
 };
-#endif
 
 void dml_init_instance(struct display_mode_lib *lib,
                const struct _vcs_dpi_soc_bounding_box_st *soc_bb,
        case DML_PROJECT_NAVI10v2:
                lib->funcs = dml20v2_funcs;
                break;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
         case DML_PROJECT_DCN21:
                 lib->funcs = dml21_funcs;
                 break;
-#endif
 
        default:
                break;
 
        DML_PROJECT_RAVEN1,
        DML_PROJECT_NAVI10,
        DML_PROJECT_NAVI10v2,
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
        DML_PROJECT_DCN21,
-#endif
 };
 
 struct display_mode_lib;
 
 AMD_DAL_GPIO_DCN20 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn20/,$(GPIO_DCN20))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN20)
-endif
 
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
+###############################################################################
+# DCN 21
+###############################################################################
 GPIO_DCN21 = hw_translate_dcn21.o hw_factory_dcn21.o
 
 AMD_DAL_GPIO_DCN21 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn21/,$(GPIO_DCN21))
 
  * Authors: AMD
  *
  */
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #ifndef __DAL_HW_FACTORY_DCN21_H__
 #define __DAL_HW_FACTORY_DCN21_H__
 
 void dal_hw_factory_dcn21_init(struct hw_factory *factory);
 
 #endif /* __DAL_HW_FACTORY_DCN20_H__ */
-#endif
 
  * Authors: AMD
  *
  */
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #ifndef __DAL_HW_TRANSLATE_DCN21_H__
 #define __DAL_HW_TRANSLATE_DCN21_H__
 
 void dal_hw_translate_dcn21_init(struct hw_translate *tr);
 
 #endif /* __DAL_HW_TRANSLATE_DCN21_H__ */
-#endif
 
 #include "dcn10/hw_factory_dcn10.h"
 #endif
 #include "dcn20/hw_factory_dcn20.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #include "dcn21/hw_factory_dcn21.h"
-#endif
 
 #include "diagnostics/hw_factory_diag.h"
 
        case DCN_VERSION_2_0:
                dal_hw_factory_dcn20_init(factory);
                return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        case DCN_VERSION_2_1:
                dal_hw_factory_dcn21_init(factory);
                return true;
-#endif
 #endif
 
        default:
 
 #include "dcn10/hw_translate_dcn10.h"
 #endif
 #include "dcn20/hw_translate_dcn20.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #include "dcn21/hw_translate_dcn21.h"
-#endif
 
 #include "diagnostics/hw_translate_diag.h"
 
        case DCN_VERSION_2_0:
                dal_hw_translate_dcn20_init(translate);
                return true;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        case DCN_VERSION_2_1:
                dal_hw_translate_dcn21_init(translate);
                return true;
-#endif
 #endif
 
        default:
 
 struct resource_pool;
 struct dc_state;
 struct resource_context;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 struct clk_bw_params;
-#endif
 
 struct resource_funcs {
        void (*destroy)(struct resource_pool **pool);
                        struct dc_state *context,
                        display_e2e_pipe_params_st *pipes,
                        int pipe_cnt);
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        void (*update_bw_bounding_box)(
                        struct dc *dc,
                        struct clk_bw_params *bw_params);
-#endif
 
 };
 
 
 #define DCN_MINIMUM_DISPCLK_Khz 100000
 #define DCN_MINIMUM_DPPCLK_Khz 100000
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
 /* Constants */
 #define DDR4_DRAM_WIDTH   64
 #define WM_A 0
 #define WM_C 2
 #define WM_D 3
 #define WM_SET_COUNT 4
-#endif
 
 #define DCN_MINIMUM_DISPCLK_Khz 100000
 #define DCN_MINIMUM_DPPCLK_Khz 100000
 
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
 /* Will these bw structures be ASIC specific? */
 
 #define MAX_NUM_DPM_LVL                8
        struct clk_limit_table clk_table;
        struct wm_table wm_table;
 };
-#endif
 /* Public interfaces */
 
 struct clk_states {
        bool psr_allow_active_cache;
        int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
        int dentist_vco_freq_khz;
-#ifdef CONFIG_DRM_AMD_DC_DCN2_1
        struct clk_bw_params *bw_params;
-#endif
 };
 
 /* forward declarations */
 
 struct dcn_watermarks {
        uint32_t pte_meta_urgent_ns;
        uint32_t urgent_ns;
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        uint32_t frac_urg_bw_nom;
        uint32_t frac_urg_bw_flip;
        int32_t urgent_latency_ns;
-#endif
        struct cstate_pstate_watermarks_st cstate_pstate;
 };
 
 
                        enum dc_clock_type clock_type,
                        struct dc_clock_config *clock_cfg);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        bool (*s0i3_golden_init_wa)(struct dc *dc);
-#endif
 };
 
 void color_space_to_black_color(
 
 AMD_DAL_IRQ_DCN2 = $(addprefix $(AMDDALPATH)/dc/irq/dcn20/,$(IRQ_DCN2))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN2)
-endif
 ###############################################################################
 # DCN 21
 ###############################################################################
-ifdef CONFIG_DRM_AMD_DC_DCN2_1
 IRQ_DCN21 = irq_service_dcn21.o
 
 AMD_DAL_IRQ_DCN21= $(addprefix $(AMDDALPATH)/dc/irq/dcn21/,$(IRQ_DCN21))
 
 #define ASICREV_IS_NAVI10_P(eChipRev)        (eChipRev < NV_NAVI12_P_A0)
 #define ASICREV_IS_NAVI12_P(eChipRev)        ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0))
 #define ASICREV_IS_NAVI14_M(eChipRev)        ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN))
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
 #define RENOIR_A0 0x91
 #define DEVICE_ID_RENOIR_1636 0x1636   // Renoir
 #define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < 0xFF))
-#endif
 
 /*
  * ASIC chip ID
 
        DCN_VERSION_1_0,
        DCN_VERSION_1_01,
        DCN_VERSION_2_0,
-#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
        DCN_VERSION_2_1,
-#endif
        DCN_VERSION_MAX
 };