int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
 int radeon_fence_driver_init(struct radeon_device *rdev);
 void radeon_fence_driver_fini(struct radeon_device *rdev);
-void radeon_fence_driver_force_completion(struct radeon_device *rdev);
+void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
 void radeon_fence_process(struct radeon_device *rdev, int ring);
 bool radeon_fence_signaled(struct radeon_fence *fence);
 
        struct drm_crtc *crtc;
        struct drm_connector *connector;
        int i, r;
-       bool force_completion = false;
 
        if (dev == NULL || dev->dev_private == NULL) {
                return -ENODEV;
                r = radeon_fence_wait_empty(rdev, i);
                if (r) {
                        /* delay GPU reset to resume */
-                       force_completion = true;
+                       radeon_fence_driver_force_completion(rdev, i);
                }
        }
-       if (force_completion) {
-               radeon_fence_driver_force_completion(rdev);
-       }
 
        radeon_save_bios_scratch_regs(rdev);
 
                        }
                }
        } else {
-               radeon_fence_driver_force_completion(rdev);
                for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+                       radeon_fence_driver_force_completion(rdev, i);
                        kfree(ring_data[i]);
                }
        }
 
                r = radeon_fence_wait_empty(rdev, ring);
                if (r) {
                        /* no need to trigger GPU reset as we are unloading */
-                       radeon_fence_driver_force_completion(rdev);
+                       radeon_fence_driver_force_completion(rdev, ring);
                }
                wake_up_all(&rdev->fence_queue);
                radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
  * radeon_fence_driver_force_completion - force all fence waiter to complete
  *
  * @rdev: radeon device pointer
+ * @ring: the ring to complete
  *
  * In case of GPU reset failure make sure no process keep waiting on fence
  * that will never complete.
  */
-void radeon_fence_driver_force_completion(struct radeon_device *rdev)
+void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
 {
-       int ring;
-
-       for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
-               if (!rdev->fence_drv[ring].initialized)
-                       continue;
+       if (rdev->fence_drv[ring].initialized)
                radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
-       }
 }
 
 
 
 
                r = radeon_ib_test(rdev, i, ring);
                if (r) {
+                       radeon_fence_driver_force_completion(rdev, i);
                        ring->ready = false;
                        rdev->needs_reset = false;