unsigned long npages, bool readonly, dma_addr_t *dma_addr,
                     struct amdgpu_device *bo_adev, struct dma_fence **fence)
 {
-       struct amdgpu_bo_va bo_va;
        bool table_freed = false;
        uint64_t pte_flags;
        unsigned long last_start;
        pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
                 last_start, last_start + npages - 1, readonly);
 
-       if (prange->svm_bo && prange->ttm_res)
-               bo_va.is_xgmi = amdgpu_xgmi_same_hive(adev, bo_adev);
-
        for (i = offset; i < offset + npages; i++) {
                last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
                dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
 
        struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
        struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
        struct dc *dc = clk_mgr_base->ctx->dc;
-       int display_count;
        bool update_dppclk = false;
        bool update_dispclk = false;
-       bool enter_display_off = false;
        bool dpp_clock_lowered = false;
        bool force_reset = false;
        bool p_state_change_support;
                dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
        }
 
-       display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
-
-       if (display_count == 0)
-               enter_display_off = true;
+       clk_mgr_helper_get_active_display_cnt(dc, context);
 
        if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz))
                clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz;