struct pci_dev *bridge_dev;
        u8 __iomem *registers;
        phys_addr_t gtt_bus_addr;
-       phys_addr_t gma_bus_addr;
        u32 PGETBL_save;
        u32 __iomem *gtt;               /* I915G */
        bool clear_fake_agp; /* on first access via agp, fill with scratch */
                pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
                                      &gma_addr);
 
-       intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
+       intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
 
        if (INTEL_GTT_GEN >= 6)
            return true;
            return -EIO;
 
        intel_private.clear_fake_agp = true;
-       agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
+       agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr;
 
        return 0;
 }
 
 
        ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
 
-       dev_priv->dri1.gfx_hws_cpu_addr = ioremap_wc(dev->agp->base + hws->addr,
-                                                    4096);
+       dev_priv->dri1.gfx_hws_cpu_addr =
+               ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096);
        if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
                i915_dma_cleanup(dev);
                ring->status_page.gfx_addr = 0;
        }
 
        aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+       dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
 
        dev_priv->mm.gtt_mapping =
-               io_mapping_create_wc(dev->agp->base, aperture_size);
+               io_mapping_create_wc(dev_priv->mm.gtt_base_addr,
+                                    aperture_size);
        if (dev_priv->mm.gtt_mapping == NULL) {
                ret = -EIO;
                goto out_rmmap;
        }
 
-       i915_mtrr_setup(dev_priv, dev->agp->base, aperture_size);
+       i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr,
+                       aperture_size);
 
        /* The i915 workqueue is primarily used for batched retirement of
         * requests (and thus managing bo) once the task has been completed
        destroy_workqueue(dev_priv->wq);
 out_mtrrfree:
        if (dev_priv->mm.gtt_mtrr >= 0) {
-               mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
-                        dev->agp->agp_info.aper_size * 1024 * 1024);
+               mtrr_del(dev_priv->mm.gtt_mtrr,
+                        dev_priv->mm.gtt_base_addr,
+                        aperture_size);
                dev_priv->mm.gtt_mtrr = -1;
        }
        io_mapping_free(dev_priv->mm.gtt_mapping);
 
        io_mapping_free(dev_priv->mm.gtt_mapping);
        if (dev_priv->mm.gtt_mtrr >= 0) {
-               mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
-                        dev->agp->agp_info.aper_size * 1024 * 1024);
+               mtrr_del(dev_priv->mm.gtt_mtrr,
+                        dev_priv->mm.gtt_base_addr,
+                        dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE);
                dev_priv->mm.gtt_mtrr = -1;
        }
 
 
                unsigned long gtt_end;
 
                struct io_mapping *gtt_mapping;
+               phys_addr_t gtt_base_addr;
                int gtt_mtrr;
 
                /** PPGTT used for aliasing the PPGTT with the GTT */
 
 
        obj->fault_mappable = true;
 
-       pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
+       pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) +
                page_offset;
 
        /* Finally, remap it using the new GTT offset */
 
                 __func__, obj, obj->gtt_offset, handle,
                 obj->size / 1024);
 
-       gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
+       gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
+                             obj->base.size);
        if (gtt_mapping == NULL) {
                DRM_ERROR("failed to map GTT space\n");
                return;
 
                dev->mode_config.max_width = 8192;
                dev->mode_config.max_height = 8192;
        }
-       dev->mode_config.fb_base = dev->agp->base;
+       dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr;
 
        DRM_DEBUG_KMS("%d display pipe%s available.\n",
                      dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
 
        info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
        info->fix.smem_len = size;
 
-       info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
+       info->screen_base =
+               ioremap_wc(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
+                          size);
        if (!info->screen_base) {
                ret = -ENOSPC;
                goto out_unpin;
 
                                  struct intel_ring_buffer *ring)
 {
        struct drm_i915_gem_object *obj;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
        ring->dev = dev;
        if (ret)
                goto err_unref;
 
-       ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
-                                        ring->size);
+       ring->virtual_start =
+               ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
+                          ring->size);
        if (ring->virtual_start == NULL) {
                DRM_ERROR("Failed to map ringbuffer.\n");
                ret = -EINVAL;
 
        dma_addr_t scratch_page_dma;
        /* for ppgtt PDE access */
        u32 __iomem *gtt;
+       /* needed for ioremap in drm/i915 */
+       phys_addr_t gma_bus_addr;
 } *intel_gtt_get(void);
 
 void intel_gtt_chipset_flush(void);