#include "radeon_trace.h"
 
 /*
- * Fences
  * Fences mark an event in the GPUs pipeline and are used
  * for GPU/CPU synchronization.  When the fence is written,
  * it is expected that all buffers associated with that fence
 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
 {
        struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
+
        if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
-               if (drv->cpu_addr) {
+               if (drv->cpu_addr)
                        *drv->cpu_addr = cpu_to_le32(seq);
-               }
        } else {
                WREG32(drv->scratch_reg, seq);
        }
        u32 seq = 0;
 
        if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
-               if (drv->cpu_addr) {
+               if (drv->cpu_addr)
                        seq = le32_to_cpu(*drv->cpu_addr);
-               } else {
+               else
                        seq = lower_32_bits(atomic64_read(&drv->last_seq));
-               }
        } else {
                seq = RREG32(drv->scratch_reg);
        }
 
        /* we are protected by the ring emission mutex */
        *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
-       if ((*fence) == NULL) {
+       if ((*fence) == NULL)
                return -ENOMEM;
-       }
+
        (*fence)->rdev = rdev;
        (*fence)->seq = seq = ++rdev->fence_drv[ring].sync_seq[ring];
        (*fence)->ring = ring;
  * for the fence locking itself, so unlocked variants are used for
  * fence_signal, and remove_wait_queue.
  */
-static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, int flags, void *key)
+static int radeon_fence_check_signaled(wait_queue_entry_t *wait,
+                                      unsigned int mode, int flags, void *key)
 {
        struct radeon_fence *fence;
        u64 seq;
 static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
 {
        uint64_t seq, last_seq, last_emitted;
-       unsigned count_loop = 0;
+       unsigned int count_loop = 0;
        bool wake = false;
 
        /* Note there is a scenario here for an infinite loop but it's
                        seq |= last_emitted & 0xffffffff00000000LL;
                }
 
-               if (seq <= last_seq || seq > last_emitted) {
+               if (seq <= last_seq || seq > last_emitted)
                        break;
-               }
+
                /* If we loop over we don't want to return without
                 * checking if a fence is signaled as it means that the
                 * seq we just read is different from the previous on.
        else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
 
                /* good news we believe it's a lockup */
-               dev_warn(rdev->dev, "GPU lockup (current fence id "
-                        "0x%016llx last fence id 0x%016llx on ring %d)\n",
+               dev_warn(rdev->dev, "GPU lockup (current fence id 0x%016llx last fence id 0x%016llx on ring %d)\n",
                         (uint64_t)atomic64_read(&fence_drv->last_seq),
                         fence_drv->sync_seq[ring], ring);
 
  * radeon_fence_signaled().
  */
 static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
-                                     u64 seq, unsigned ring)
+                                     u64 seq, unsigned int ring)
 {
-       if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+       if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
                return true;
-       }
+
        /* poll new last sequence at least once */
        radeon_fence_process(rdev, ring);
-       if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+       if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
                return true;
-       }
+
        return false;
 }
 
 {
        struct radeon_fence *fence = to_radeon_fence(f);
        struct radeon_device *rdev = fence->rdev;
-       unsigned ring = fence->ring;
+       unsigned int ring = fence->ring;
        u64 seq = fence->seq;
 
-       if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+       if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
                return true;
-       }
 
        if (down_read_trylock(&rdev->exclusive_lock)) {
                radeon_fence_process(rdev, ring);
                up_read(&rdev->exclusive_lock);
 
-               if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+               if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
                        return true;
-               }
        }
        return false;
 }
  */
 static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
 {
-       unsigned i;
+       unsigned int i;
 
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
                if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
 
        seq[fence->ring] = fence->seq;
        r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout);
-       if (r <= 0) {
+       if (r <= 0)
                return r;
-       }
 
        dma_fence_signal(&fence->base);
        return r;
 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 {
        long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
-       if (r > 0) {
+
+       if (r > 0)
                return 0;
-       } else {
+       else
                return r;
-       }
 }
 
 /**
                          bool intr)
 {
        uint64_t seq[RADEON_NUM_RINGS];
-       unsigned i, num_rings = 0;
+       unsigned int i, num_rings = 0;
        long r;
 
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
                seq[i] = 0;
 
-               if (!fences[i]) {
+               if (!fences[i])
                        continue;
-               }
 
                seq[i] = fences[i]->seq;
                ++num_rings;
                return -ENOENT;
 
        r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
-       if (r < 0) {
+       if (r < 0)
                return r;
-       }
+
        return 0;
 }
 
 
        seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
        if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
-               /* nothing to wait for, last_seq is
-                  already the last emited fence */
+               /* nothing to wait for, last_seq is already
+                * the last emited fence
+                */
                return -ENOENT;
        }
+
        r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
        if (r < 0)
                return r;
+
        return 0;
 }
 
        struct radeon_fence *tmp = *fence;
 
        *fence = NULL;
-       if (tmp) {
+       if (tmp)
                dma_fence_put(&tmp->base);
-       }
 }
 
 /**
  * Returns the number of emitted fences on the ring.  Used by the
  * dynpm code to ring track activity.
  */
-unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
+unsigned int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
 {
        uint64_t emitted;
 
        emitted = rdev->fence_drv[ring].sync_seq[ring]
                - atomic64_read(&rdev->fence_drv[ring].last_seq);
        /* to avoid 32bits warp around */
-       if (emitted > 0x10000000) {
+       if (emitted > 0x10000000)
                emitted = 0x10000000;
-       }
-       return (unsigned)emitted;
+
+       return (unsigned int)emitted;
 }
 
 /**
 {
        struct radeon_fence_driver *fdrv;
 
-       if (!fence) {
+       if (!fence)
                return false;
-       }
 
-       if (fence->ring == dst_ring) {
+       if (fence->ring == dst_ring)
                return false;
-       }
 
        /* we are protected by the ring mutex */
        fdrv = &fence->rdev->fence_drv[dst_ring];
-       if (fence->seq <= fdrv->sync_seq[fence->ring]) {
+       if (fence->seq <= fdrv->sync_seq[fence->ring])
                return false;
-       }
 
        return true;
 }
 void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
 {
        struct radeon_fence_driver *dst, *src;
-       unsigned i;
+       unsigned int i;
 
-       if (!fence) {
+       if (!fence)
                return;
-       }
 
-       if (fence->ring == dst_ring) {
+       if (fence->ring == dst_ring)
                return;
-       }
 
        /* we are protected by the ring mutex */
        src = &fence->rdev->fence_drv[fence->ring];
        dst = &fence->rdev->fence_drv[dst_ring];
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
-               if (i == dst_ring) {
+               if (i == dst_ring)
                        continue;
-               }
+
                dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
        }
 }
        int ring;
 
        init_waitqueue_head(&rdev->fence_queue);
-       for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+       for (ring = 0; ring < RADEON_NUM_RINGS; ring++)
                radeon_fence_driver_init_ring(rdev, ring);
-       }
 
        radeon_debugfs_fence_init(rdev);
 }
 static const char *radeon_fence_get_timeline_name(struct dma_fence *f)
 {
        struct radeon_fence *fence = to_radeon_fence(f);
+
        switch (fence->ring) {
        case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx";
        case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1";
        case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd";
        case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1";
        case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2";
-       default: WARN_ON_ONCE(1); return "radeon.unk";
+       default:
+               WARN_ON_ONCE(1);
+               return "radeon.unk";
        }
 }