/* The state page is after PPHWSP */
                lrc->ring_lcra =
-                       i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
+                       guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
                lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
                                (guc_engine_id << GUC_ELC_ENGINE_OFFSET);
 
-               lrc->ring_begin = i915_ggtt_offset(ce->ring->vma);
+               lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
                lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
                lrc->ring_next_free_location = lrc->ring_begin;
                lrc->ring_current_tail_pointer_value = 0;
         * The doorbell, process descriptor, and workqueue are all parts
         * of the client object, which the GuC will reference via the GGTT
         */
-       gfx_addr = i915_ggtt_offset(client->vma);
+       gfx_addr = guc_ggtt_offset(client->vma);
        desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
                                client->doorbell_offset;
        desc.db_trigger_cpu =
                (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
                (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
 
-       offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
+       offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
        guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
 }
 
        guc_policies_init(policies);
 
        ads->scheduler_policies =
-               i915_ggtt_offset(vma) + sizeof(struct guc_ads);
+               guc_ggtt_offset(vma) + sizeof(struct guc_ads);
 
        /* MMIO reg state */
        reg_state = (void *)policies + sizeof(struct guc_policies);
        /* any value greater than GUC_POWER_D0 */
        data[1] = GUC_POWER_D1;
        /* first page is shared data with GuC */
-       data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
+       data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
 
        return intel_guc_send(guc, data, ARRAY_SIZE(data));
 }
        data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
        data[1] = GUC_POWER_D0;
        /* first page is shared data with GuC */
-       data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
+       data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
 
        return intel_guc_send(guc, data, ARRAY_SIZE(data));
 }
 
                params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
 
        if (guc->ads_vma) {
-               u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
+               u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
                params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
                params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
        }
 
        /* If GuC submission is enabled, set up additional parameters here */
        if (i915.enable_guc_submission) {
-               u32 pgs = i915_ggtt_offset(dev_priv->guc.ctx_pool_vma);
+               u32 pgs = guc_ggtt_offset(dev_priv->guc.ctx_pool_vma);
                u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
 
                pgs >>= PAGE_SHIFT;
        I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
 
        /* Set the source address for the new blob */
-       offset = i915_ggtt_offset(vma) + guc_fw->header_offset;
+       offset = guc_ggtt_offset(vma) + guc_fw->header_offset;
        I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
        I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
 
 
 #include "i915_guc_reg.h"
 #include "intel_ringbuffer.h"
 
+#include "i915_vma.h"
+
 struct drm_i915_gem_request;
 
 /*
 void i915_guc_unregister(struct drm_i915_private *dev_priv);
 int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
 
+static inline u32 guc_ggtt_offset(struct i915_vma *vma)
+{
+       u32 offset = i915_ggtt_offset(vma);
+       GEM_BUG_ON(offset < GUC_WOPCM_TOP);
+       return offset;
+}
+
 #endif