bool radeon_fence_signaled(struct radeon_fence *fence);
 int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
 int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_last(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
 struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
 void radeon_fence_unref(struct radeon_fence **fence);
 int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
 
        radeon_bo_evict_vram(rdev);
        /* wait for gpu to finish processing current batch */
        for (i = 0; i < RADEON_NUM_RINGS; i++)
-               radeon_fence_wait_last(rdev, i);
+               radeon_fence_wait_empty(rdev, i);
 
        radeon_save_bios_scratch_regs(rdev);
 
 
        return r;
 }
 
-int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
 {
        unsigned long irq_flags;
        struct radeon_fence *fence;
        for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
                if (!rdev->fence_drv[ring].initialized)
                        continue;
-               radeon_fence_wait_last(rdev, ring);
+               radeon_fence_wait_empty(rdev, ring);
                wake_up_all(&rdev->fence_drv[ring].queue);
                write_lock_irqsave(&rdev->fence_lock, irq_flags);
                radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);