#include "amdgpu.h"
 
 #define AMDGPU_MUX_RESUBMIT_JIFFIES_TIMEOUT (HZ / 2)
+#define AMDGPU_MAX_LAST_UNSIGNALED_THRESHOLD_US 10000
 
 static const struct ring_info {
        unsigned int hw_pio;
        { AMDGPU_RING_PRIO_2, "gfx_high"},
 };
 
+static struct kmem_cache *amdgpu_mux_chunk_slab;
+
+static inline struct amdgpu_mux_entry *amdgpu_ring_mux_sw_entry(struct amdgpu_ring_mux *mux,
+                                                               struct amdgpu_ring *ring)
+{
+       return ring->entry_index < mux->ring_entry_size ?
+                       &mux->ring_entry[ring->entry_index] : NULL;
+}
+
+/* copy packages on sw ring range[begin, end) */
+static void amdgpu_ring_mux_copy_pkt_from_sw_ring(struct amdgpu_ring_mux *mux,
+                                                 struct amdgpu_ring *ring,
+                                                 u64 s_start, u64 s_end)
+{
+       u64 start, end;
+       struct amdgpu_ring *real_ring = mux->real_ring;
+
+       start = s_start & ring->buf_mask;
+       end = s_end & ring->buf_mask;
+
+       if (start == end) {
+               DRM_ERROR("no more data copied from sw ring\n");
+               return;
+       }
+       if (start > end) {
+               amdgpu_ring_alloc(real_ring, (ring->ring_size >> 2) + end - start);
+               amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[start],
+                                          (ring->ring_size >> 2) - start);
+               amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[0], end);
+       } else {
+               amdgpu_ring_alloc(real_ring, end - start);
+               amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[start], end - start);
+       }
+}
+
+static void amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux *mux)
+{
+       struct amdgpu_mux_entry *e = NULL;
+       struct amdgpu_mux_chunk *chunk;
+       uint32_t seq, last_seq;
+       int i;
+
+       /*find low priority entries:*/
+       if (!mux->s_resubmit)
+               return;
+
+       for (i = 0; i < mux->num_ring_entries; i++) {
+               if (mux->ring_entry[i].ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) {
+                       e = &mux->ring_entry[i];
+                       break;
+               }
+       }
+
+       if (!e) {
+               DRM_ERROR("%s no low priority ring found\n", __func__);
+               return;
+       }
+
+       last_seq = atomic_read(&e->ring->fence_drv.last_seq);
+       seq = mux->seqno_to_resubmit;
+       if (last_seq < seq) {
+               /*resubmit all the fences between (last_seq, seq]*/
+               list_for_each_entry(chunk, &e->list, entry) {
+                       if (chunk->sync_seq > last_seq && chunk->sync_seq <= seq) {
+                               amdgpu_fence_update_start_timestamp(e->ring,
+                                                                   chunk->sync_seq,
+                                                                   ktime_get());
+                               amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, e->ring,
+                                                                     chunk->start,
+                                                                     chunk->end);
+                               mux->wptr_resubmit = chunk->end;
+                               amdgpu_ring_commit(mux->real_ring);
+                       }
+               }
+       }
+
+       del_timer(&mux->resubmit_timer);
+       mux->s_resubmit = false;
+}
+
+static void amdgpu_ring_mux_schedule_resubmit(struct amdgpu_ring_mux *mux)
+{
+       mod_timer(&mux->resubmit_timer, jiffies + AMDGPU_MUX_RESUBMIT_JIFFIES_TIMEOUT);
+}
+
+static void amdgpu_mux_resubmit_fallback(struct timer_list *t)
+{
+       struct amdgpu_ring_mux *mux = from_timer(mux, t, resubmit_timer);
+
+       if (!spin_trylock(&mux->lock)) {
+               amdgpu_ring_mux_schedule_resubmit(mux);
+               DRM_ERROR("reschedule resubmit\n");
+               return;
+       }
+       amdgpu_mux_resubmit_chunks(mux);
+       spin_unlock(&mux->lock);
+}
+
 int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring,
                         unsigned int entry_size)
 {
        mux->real_ring = ring;
        mux->num_ring_entries = 0;
+
        mux->ring_entry = kcalloc(entry_size, sizeof(struct amdgpu_mux_entry), GFP_KERNEL);
        if (!mux->ring_entry)
                return -ENOMEM;
 
        mux->ring_entry_size = entry_size;
+       mux->s_resubmit = false;
+
+       amdgpu_mux_chunk_slab = kmem_cache_create("amdgpu_mux_chunk",
+                                                 sizeof(struct amdgpu_mux_chunk), 0,
+                                                 SLAB_HWCACHE_ALIGN, NULL);
+       if (!amdgpu_mux_chunk_slab) {
+               DRM_ERROR("create amdgpu_mux_chunk cache failed\n");
+               return -ENOMEM;
+       }
+
        spin_lock_init(&mux->lock);
+       timer_setup(&mux->resubmit_timer, amdgpu_mux_resubmit_fallback, 0);
 
        return 0;
 }
 
 void amdgpu_ring_mux_fini(struct amdgpu_ring_mux *mux)
 {
+       struct amdgpu_mux_entry *e;
+       struct amdgpu_mux_chunk *chunk, *chunk2;
+       int i;
+
+       for (i = 0; i < mux->num_ring_entries; i++) {
+               e = &mux->ring_entry[i];
+               list_for_each_entry_safe(chunk, chunk2, &e->list, entry) {
+                       list_del(&chunk->entry);
+                       kmem_cache_free(amdgpu_mux_chunk_slab, chunk);
+               }
+       }
+       kmem_cache_destroy(amdgpu_mux_chunk_slab);
        kfree(mux->ring_entry);
        mux->ring_entry = NULL;
        mux->num_ring_entries = 0;
        ring->entry_index = mux->num_ring_entries;
        e->ring = ring;
 
+       INIT_LIST_HEAD(&e->list);
        mux->num_ring_entries += 1;
        return 0;
 }
 
-static inline struct amdgpu_mux_entry *amdgpu_ring_mux_sw_entry(struct amdgpu_ring_mux *mux,
-                                                               struct amdgpu_ring *ring)
-{
-       return ring->entry_index < mux->ring_entry_size ?
-                       &mux->ring_entry[ring->entry_index] : NULL;
-}
-
-/* copy packages on sw ring range[begin, end) */
-static void amdgpu_ring_mux_copy_pkt_from_sw_ring(struct amdgpu_ring_mux *mux,
-                                                 struct amdgpu_ring *ring,
-                                                 u64 s_start, u64 s_end)
-{
-       u64 start, end;
-       struct amdgpu_ring *real_ring = mux->real_ring;
-
-       start = s_start & ring->buf_mask;
-       end = s_end & ring->buf_mask;
-
-       if (start == end) {
-               DRM_ERROR("no more data copied from sw ring\n");
-               return;
-       }
-       if (start > end) {
-               amdgpu_ring_alloc(real_ring, (ring->ring_size >> 2) + end - start);
-               amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[start],
-                                          (ring->ring_size >> 2) - start);
-               amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[0], end);
-       } else {
-               amdgpu_ring_alloc(real_ring, end - start);
-               amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[start], end - start);
-       }
-}
-
 void amdgpu_ring_mux_set_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, u64 wptr)
 {
        struct amdgpu_mux_entry *e;
 
+       spin_lock(&mux->lock);
+
+       if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT)
+               amdgpu_mux_resubmit_chunks(mux);
+
        e = amdgpu_ring_mux_sw_entry(mux, ring);
        if (!e) {
                DRM_ERROR("cannot find entry for sw ring\n");
+               spin_unlock(&mux->lock);
+               return;
+       }
+
+       /* We could skip this set wptr as preemption in process. */
+       if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && mux->pending_trailing_fence_signaled) {
+               spin_unlock(&mux->lock);
                return;
        }
 
-       spin_lock(&mux->lock);
        e->sw_cptr = e->sw_wptr;
+       /* Update cptr if the package already copied in resubmit functions */
+       if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && e->sw_cptr < mux->wptr_resubmit)
+               e->sw_cptr = mux->wptr_resubmit;
        e->sw_wptr = wptr;
        e->start_ptr_in_hw_ring = mux->real_ring->wptr;
 
-       amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, ring, e->sw_cptr, wptr);
-       e->end_ptr_in_hw_ring = mux->real_ring->wptr;
-       amdgpu_ring_commit(mux->real_ring);
-
+       /* Skip copying for the packages already resubmitted.*/
+       if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT || mux->wptr_resubmit < wptr) {
+               amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, ring, e->sw_cptr, wptr);
+               e->end_ptr_in_hw_ring = mux->real_ring->wptr;
+               amdgpu_ring_commit(mux->real_ring);
+       } else {
+               e->end_ptr_in_hw_ring = mux->real_ring->wptr;
+       }
        spin_unlock(&mux->lock);
 }
 
        return idx < ARRAY_SIZE(sw_ring_info) ?
                sw_ring_info[idx].hw_pio : AMDGPU_RING_PRIO_DEFAULT;
 }
+
+/*Scan on low prio rings to have unsignaled fence and high ring has no fence.*/
+int amdgpu_mcbp_scan(struct amdgpu_ring_mux *mux)
+{
+       struct amdgpu_ring *ring;
+       int i, need_preempt;
+
+       need_preempt = 0;
+       for (i = 0; i < mux->num_ring_entries; i++) {
+               ring = mux->ring_entry[i].ring;
+               if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT &&
+                   amdgpu_fence_count_emitted(ring) > 0)
+                       return 0;
+               if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT &&
+                   amdgpu_fence_last_unsignaled_time_us(ring) >
+                   AMDGPU_MAX_LAST_UNSIGNALED_THRESHOLD_US)
+                       need_preempt = 1;
+       }
+       return need_preempt && !mux->s_resubmit;
+}
+
+/* Trigger Mid-Command Buffer Preemption (MCBP) and find if we need to resubmit. */
+int amdgpu_mcbp_trigger_preempt(struct amdgpu_ring_mux *mux)
+{
+       int r;
+
+       spin_lock(&mux->lock);
+       mux->pending_trailing_fence_signaled = true;
+       r = amdgpu_ring_preempt_ib(mux->real_ring);
+       spin_unlock(&mux->lock);
+       return r;
+}
+
+void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
+
+       WARN_ON(!ring->is_sw_ring);
+       if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) {
+               if (amdgpu_mcbp_scan(mux) > 0)
+                       amdgpu_mcbp_trigger_preempt(mux);
+               return;
+       }
+
+       amdgpu_ring_mux_start_ib(mux, ring);
+}
+
+void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
+
+       WARN_ON(!ring->is_sw_ring);
+       if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
+               return;
+       amdgpu_ring_mux_end_ib(mux, ring);
+}
+
+void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
+{
+       struct amdgpu_mux_entry *e;
+       struct amdgpu_mux_chunk *chunk;
+
+       spin_lock(&mux->lock);
+       amdgpu_mux_resubmit_chunks(mux);
+       spin_unlock(&mux->lock);
+
+       e = amdgpu_ring_mux_sw_entry(mux, ring);
+       if (!e) {
+               DRM_ERROR("cannot find entry!\n");
+               return;
+       }
+
+       chunk = kmem_cache_alloc(amdgpu_mux_chunk_slab, GFP_KERNEL);
+       if (!chunk) {
+               DRM_ERROR("alloc amdgpu_mux_chunk_slab failed\n");
+               return;
+       }
+
+       chunk->start = ring->wptr;
+       list_add_tail(&chunk->entry, &e->list);
+}
+
+static void scan_and_remove_signaled_chunk(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
+{
+       uint32_t last_seq, size = 0;
+       struct amdgpu_mux_entry *e;
+       struct amdgpu_mux_chunk *chunk, *tmp;
+
+       e = amdgpu_ring_mux_sw_entry(mux, ring);
+       if (!e) {
+               DRM_ERROR("cannot find entry!\n");
+               return;
+       }
+
+       last_seq = atomic_read(&ring->fence_drv.last_seq);
+
+       list_for_each_entry_safe(chunk, tmp, &e->list, entry) {
+               if (chunk->sync_seq <= last_seq) {
+                       list_del(&chunk->entry);
+                       kmem_cache_free(amdgpu_mux_chunk_slab, chunk);
+               } else {
+                       size++;
+               }
+       }
+}
+
+void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
+{
+       struct amdgpu_mux_entry *e;
+       struct amdgpu_mux_chunk *chunk;
+
+       e = amdgpu_ring_mux_sw_entry(mux, ring);
+       if (!e) {
+               DRM_ERROR("cannot find entry!\n");
+               return;
+       }
+
+       chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry);
+       if (!chunk) {
+               DRM_ERROR("cannot find chunk!\n");
+               return;
+       }
+
+       chunk->end = ring->wptr;
+       chunk->sync_seq = READ_ONCE(ring->fence_drv.sync_seq);
+
+       scan_and_remove_signaled_chunk(mux, ring);
+}
+
+bool amdgpu_mcbp_handle_trailing_fence_irq(struct amdgpu_ring_mux *mux)
+{
+       struct amdgpu_mux_entry *e;
+       struct amdgpu_ring *ring = NULL;
+       int i;
+
+       if (!mux->pending_trailing_fence_signaled)
+               return false;
+
+       if (mux->real_ring->trail_seq != le32_to_cpu(*mux->real_ring->trail_fence_cpu_addr))
+               return false;
+
+       for (i = 0; i < mux->num_ring_entries; i++) {
+               e = &mux->ring_entry[i];
+               if (e->ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) {
+                       ring = e->ring;
+                       break;
+               }
+       }
+
+       if (!ring) {
+               DRM_ERROR("cannot find low priority ring\n");
+               return false;
+       }
+
+       amdgpu_fence_process(ring);
+       if (amdgpu_fence_count_emitted(ring) > 0) {
+               mux->s_resubmit = true;
+               mux->seqno_to_resubmit = ring->fence_drv.sync_seq;
+               amdgpu_ring_mux_schedule_resubmit(mux);
+       }
+
+       mux->pending_trailing_fence_signaled = false;
+       return true;
+}