return r;
 }
 
+static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
+{
+       if (r == -EDEADLK) {
+               r = radeon_gpu_reset(rdev);
+               if (!r)
+                       r = -EAGAIN;
+       }
+       return r;
+}
+
 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
        struct radeon_device *rdev = dev->dev_private;
        if (r) {
                DRM_ERROR("Failed to initialize parser !\n");
                radeon_cs_parser_fini(&parser, r);
+               r = radeon_cs_handle_lockup(rdev, r);
                radeon_mutex_unlock(&rdev->cs_mutex);
                return r;
        }
                if (r != -ERESTARTSYS)
                        DRM_ERROR("Failed to parse relocation %d!\n", r);
                radeon_cs_parser_fini(&parser, r);
+               r = radeon_cs_handle_lockup(rdev, r);
                radeon_mutex_unlock(&rdev->cs_mutex);
                return r;
        }
        }
 out:
        radeon_cs_parser_fini(&parser, r);
+       r = radeon_cs_handle_lockup(rdev, r);
        radeon_mutex_unlock(&rdev->cs_mutex);
        return r;
 }
 
        int r;
        int resched;
 
-       /* Prevent CS ioctl from interfering */
-       radeon_mutex_lock(&rdev->cs_mutex);
-
        radeon_save_bios_scratch_regs(rdev);
        /* block TTM */
        resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
                ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
        }
 
-       radeon_mutex_unlock(&rdev->cs_mutex);
-
        if (r) {
                /* bad news, how to tell it to userspace ? */
                dev_info(rdev->dev, "GPU reset failed\n");
 
                        /* change sequence value on all rings, so nobody else things there is a lockup */
                        for (i = 0; i < RADEON_NUM_RINGS; ++i)
                                rdev->fence_drv[i].last_seq -= 0x10000;
+
+                       rdev->fence_drv[fence->ring].last_activity = jiffies;
                        write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
 
                        if (radeon_ring_is_lockup(rdev, fence->ring, &rdev->ring[fence->ring])) {
 
                                /* mark the ring as not ready any more */
                                rdev->ring[fence->ring].ready = false;
-                               r = radeon_gpu_reset(rdev);
-                               if (r)
-                                       return r;
-
-                               write_lock_irqsave(&rdev->fence_lock, irq_flags);
-                               rdev->fence_drv[fence->ring].last_activity = jiffies;
-                               write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+                               return -EDEADLK;
                        }
                }
        }
 
        radeon_bo_unreserve(rbo);
 }
 
+static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
+{
+       if (r == -EDEADLK) {
+               radeon_mutex_lock(&rdev->cs_mutex);
+               r = radeon_gpu_reset(rdev);
+               if (!r)
+                       r = -EAGAIN;
+               radeon_mutex_unlock(&rdev->cs_mutex);
+       }
+       return r;
+}
 
 /*
  * GEM ioctls.
                                        args->initial_domain, false,
                                        false, &gobj);
        if (r) {
+               r = radeon_gem_handle_lockup(rdev, r);
                return r;
        }
        r = drm_gem_handle_create(filp, gobj, &handle);
        /* drop reference from allocate - handle holds it now */
        drm_gem_object_unreference_unlocked(gobj);
        if (r) {
+               r = radeon_gem_handle_lockup(rdev, r);
                return r;
        }
        args->handle = handle;
        r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
 
        drm_gem_object_unreference_unlocked(gobj);
+       r = radeon_gem_handle_lockup(robj->rdev, r);
        return r;
 }
 
                break;
        }
        drm_gem_object_unreference_unlocked(gobj);
+       r = radeon_gem_handle_lockup(robj->rdev, r);
        return r;
 }
 
        if (robj->rdev->asic->ioctl_wait_idle)
                robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
        drm_gem_object_unreference_unlocked(gobj);
+       r = radeon_gem_handle_lockup(robj->rdev, r);
        return r;
 }