char __user *data,
                int length)
  {
-       char __iomem *vaddr;
-       int unwritten;
+       char *vaddr;
+       int ret;
  
 -      vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
 +      vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
-       if (vaddr == NULL)
-               return -ENOMEM;
-       unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
+       ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
 -      kunmap_atomic(vaddr, KM_USER0);
 +      kunmap_atomic(vaddr);
  
-       if (unwritten)
-               return -EFAULT;
- 
-       return 0;
+       return ret;
  }
  
  static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
        char *vaddr_atomic;
        unsigned long unwritten;
  
 -      vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
 +      vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
        unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
                                                      user_data, length);
 -      io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
 +      io_mapping_unmap_atomic(vaddr_atomic);
-       if (unwritten)
-               return -EFAULT;
-       return 0;
+       return unwritten;
  }
  
  /* Here's the write path which can sleep for
                 char __user *data,
                 int length)
  {
-       char __iomem *vaddr;
-       unsigned long unwritten;
+       char *vaddr;
+       int ret;
  
 -      vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
 +      vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
-       if (vaddr == NULL)
-               return -ENOMEM;
-       unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
+       ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
 -      kunmap_atomic(vaddr, KM_USER0);
 +      kunmap_atomic(vaddr);
  
-       if (unwritten)
-               return -EFAULT;
-       return 0;
+       return ret;
  }
  
  /**
                }
  
                /* and points to somewhere within the target object. */
-               if (reloc->delta >= target_obj->size) {
+               if (reloc.delta >= target_obj->size) {
                        DRM_ERROR("Relocation beyond target object bounds: "
                                  "obj %p target %d delta %d size %d.\n",
-                                 obj, reloc->target_handle,
-                                 (int) reloc->delta, (int) target_obj->size);
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
-               }
- 
-               ret = i915_gem_object_set_to_gtt_domain(obj, 1);
-               if (ret != 0) {
-                       drm_gem_object_unreference(target_obj);
-                       i915_gem_object_unpin(obj);
-                       return -EINVAL;
+                                 obj, reloc.target_handle,
+                                 (int) reloc.delta, (int) target_obj->size);
+                       ret = -EINVAL;
+                       break;
                }
  
-               /* Map the page containing the relocation we're going to
-                * perform.
-                */
-               reloc_offset = obj_priv->gtt_offset + reloc->offset;
-               reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
-                                                     (reloc_offset &
-                                                      ~(PAGE_SIZE - 1)));
-               reloc_entry = (uint32_t __iomem *)(reloc_page +
-                                                  (reloc_offset & (PAGE_SIZE - 1)));
-               reloc_val = target_obj_priv->gtt_offset + reloc->delta;
- 
- #if WATCH_BUF
-               DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
-                         obj, (unsigned int) reloc->offset,
-                         readl(reloc_entry), reloc_val);
- #endif
-               writel(reloc_val, reloc_entry);
-               io_mapping_unmap_atomic(reloc_page);
- 
-               /* The updated presumed offset for this entry will be
-                * copied back out to the user.
-                */
-               reloc->presumed_offset = target_obj_priv->gtt_offset;
- 
-               drm_gem_object_unreference(target_obj);
-       }
- 
- #if WATCH_BUF
-       if (0)
-               i915_gem_dump_object(obj, 128, __func__, ~0);
- #endif
-       return 0;
- }
- 
- /* Throttle our rendering by waiting until the ring has completed our requests
-  * emitted over 20 msec ago.
-  *
-  * Note that if we were to use the current jiffies each time around the loop,
-  * we wouldn't escape the function with any frames outstanding if the time to
-  * render a frame was over 20ms.
-  *
-  * This should get us reasonable parallelism between CPU and GPU but also
-  * relatively low latency when blocking on a particular request to finish.
-  */
- static int
- i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
- {
-       struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
-       int ret = 0;
-       unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
+               reloc.delta += target_offset;
+               if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+                       uint32_t page_offset = reloc.offset & ~PAGE_MASK;
+                       char *vaddr;
  
-       mutex_lock(&dev->struct_mutex);
-       while (!list_empty(&i915_file_priv->mm.request_list)) {
-               struct drm_i915_gem_request *request;
 -                      vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT], KM_USER0);
++                      vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]);
+                       *(uint32_t *)(vaddr + page_offset) = reloc.delta;
 -                      kunmap_atomic(vaddr, KM_USER0);
++                      kunmap_atomic(vaddr);
+               } else {
+                       uint32_t __iomem *reloc_entry;
+                       void __iomem *reloc_page;
  
-               request = list_first_entry(&i915_file_priv->mm.request_list,
-                                          struct drm_i915_gem_request,
-                                          client_list);
+                       ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
+                       if (ret)
+                               break;
  
-               if (time_after_eq(request->emitted_jiffies, recent_enough))
-                       break;
+                       /* Map the page containing the relocation we're going to perform.  */
+                       reloc.offset += obj->gtt_offset;
+                       reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
 -                                                            reloc.offset & PAGE_MASK,
 -                                                            KM_USER0);
++                                                            reloc.offset & PAGE_MASK);
+                       reloc_entry = (uint32_t __iomem *)
+                               (reloc_page + (reloc.offset & ~PAGE_MASK));
+                       iowrite32(reloc.delta, reloc_entry);
 -                      io_mapping_unmap_atomic(reloc_page, KM_USER0);
++                      io_mapping_unmap_atomic(reloc_page);
+               }
  
-               ret = i915_wait_request(dev, request->seqno, request->ring);
-               if (ret != 0)
-                       break;
+               /* and update the user's relocation entry */
+               reloc.presumed_offset = target_offset;
+               if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
+                                             &reloc.presumed_offset,
+                                             sizeof(reloc.presumed_offset))) {
+                   ret = -EFAULT;
+                   break;
+               }
        }
-       mutex_unlock(&dev->struct_mutex);
  
+       drm_gem_object_unreference(target_obj);
        return ret;
  }
  
 
        u32 isr;
  };
  
 -intel_overlay_map_regs_atomic(struct intel_overlay *overlay,
 -                            int slot)
+ static struct overlay_registers *
 -        drm_i915_private_t *dev_priv = overlay->dev->dev_private;
++intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+ {
 -                                              overlay->reg_bo->gtt_offset,
 -                                              slot);
++      drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+       struct overlay_registers *regs;
+ 
+       if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+               regs = overlay->reg_bo->phys_obj->handle->vaddr;
+       else
+               regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
 -                                          int slot,
++                                              overlay->reg_bo->gtt_offset);
+ 
+       return regs;
+ }
+ 
+ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
 -              io_mapping_unmap_atomic(regs, slot);
+                                           struct overlay_registers *regs)
+ {
+       if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
++              io_mapping_unmap_atomic(regs);
+ }
+ 
+ 
  struct intel_overlay_error_state *
  intel_overlay_capture_error_state(struct drm_device *dev)
  {
  
        error->dovsta = I915_READ(DOVSTA);
        error->isr = I915_READ(ISR);
-       if (OVERLAY_NONPHYSICAL(overlay->dev))
-               error->base = (long) overlay->reg_bo->gtt_offset;
-       else
+       if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
                error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
+       else
+               error->base = (long) overlay->reg_bo->gtt_offset;
  
 -      regs = intel_overlay_map_regs_atomic(overlay, KM_IRQ0);
 +      regs = intel_overlay_map_regs_atomic(overlay);
        if (!regs)
                goto err;
  
        memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
-       intel_overlay_unmap_regs_atomic(overlay);
 -      intel_overlay_unmap_regs_atomic(overlay, KM_IRQ0, regs);
++      intel_overlay_unmap_regs_atomic(overlay, regs);
  
        return error;