#include <linux/uaccess.h>
 
 #include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
 #include <drm/virtgpu_drm.h>
 
 #include "virtgpu_drv.h"
 
+struct virtio_gpu_submit_post_dep {
+       struct drm_syncobj *syncobj;
+       struct dma_fence_chain *chain;
+       u64 point;
+};
+
 struct virtio_gpu_submit {
+       struct virtio_gpu_submit_post_dep *post_deps;
+       unsigned int num_out_syncobjs;
+
+       struct drm_syncobj **in_syncobjs;
+       unsigned int num_in_syncobjs;
+
        struct virtio_gpu_object_array *buflist;
        struct drm_virtgpu_execbuffer *exbuf;
        struct virtio_gpu_fence *out_fence;
        return 0;
 }
 
+static void virtio_gpu_free_syncobjs(struct drm_syncobj **syncobjs,
+                                    u32 nr_syncobjs)
+{
+       u32 i = nr_syncobjs;
+
+       while (i--) {
+               if (syncobjs[i])
+                       drm_syncobj_put(syncobjs[i]);
+       }
+
+       kvfree(syncobjs);
+}
+
+static int
+virtio_gpu_parse_deps(struct virtio_gpu_submit *submit)
+{
+       struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
+       struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
+       size_t syncobj_stride = exbuf->syncobj_stride;
+       u32 num_in_syncobjs = exbuf->num_in_syncobjs;
+       struct drm_syncobj **syncobjs;
+       int ret = 0, i;
+
+       if (!num_in_syncobjs)
+               return 0;
+
+       /*
+        * kvalloc at first tries to allocate memory using kmalloc and
+        * falls back to vmalloc only on failure. It also uses __GFP_NOWARN
+        * internally for allocations larger than a page size, preventing
+        * storm of KMSG warnings.
+        */
+       syncobjs = kvcalloc(num_in_syncobjs, sizeof(*syncobjs), GFP_KERNEL);
+       if (!syncobjs)
+               return -ENOMEM;
+
+       for (i = 0; i < num_in_syncobjs; i++) {
+               u64 address = exbuf->in_syncobjs + i * syncobj_stride;
+               struct dma_fence *fence;
+
+               memset(&syncobj_desc, 0, sizeof(syncobj_desc));
+
+               if (copy_from_user(&syncobj_desc,
+                                  u64_to_user_ptr(address),
+                                  min(syncobj_stride, sizeof(syncobj_desc)))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               if (syncobj_desc.flags & ~VIRTGPU_EXECBUF_SYNCOBJ_FLAGS) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               ret = drm_syncobj_find_fence(submit->file, syncobj_desc.handle,
+                                            syncobj_desc.point, 0, &fence);
+               if (ret)
+                       break;
+
+               ret = virtio_gpu_dma_fence_wait(submit, fence);
+
+               dma_fence_put(fence);
+               if (ret)
+                       break;
+
+               if (syncobj_desc.flags & VIRTGPU_EXECBUF_SYNCOBJ_RESET) {
+                       syncobjs[i] = drm_syncobj_find(submit->file,
+                                                      syncobj_desc.handle);
+                       if (!syncobjs[i]) {
+                               ret = -EINVAL;
+                               break;
+                       }
+               }
+       }
+
+       if (ret) {
+               virtio_gpu_free_syncobjs(syncobjs, i);
+               return ret;
+       }
+
+       submit->num_in_syncobjs = num_in_syncobjs;
+       submit->in_syncobjs = syncobjs;
+
+       return ret;
+}
+
+static void virtio_gpu_reset_syncobjs(struct drm_syncobj **syncobjs,
+                                     u32 nr_syncobjs)
+{
+       u32 i;
+
+       for (i = 0; i < nr_syncobjs; i++) {
+               if (syncobjs[i])
+                       drm_syncobj_replace_fence(syncobjs[i], NULL);
+       }
+}
+
+static void
+virtio_gpu_free_post_deps(struct virtio_gpu_submit_post_dep *post_deps,
+                         u32 nr_syncobjs)
+{
+       u32 i = nr_syncobjs;
+
+       while (i--) {
+               kfree(post_deps[i].chain);
+               drm_syncobj_put(post_deps[i].syncobj);
+       }
+
+       kvfree(post_deps);
+}
+
+static int virtio_gpu_parse_post_deps(struct virtio_gpu_submit *submit)
+{
+       struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
+       struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
+       struct virtio_gpu_submit_post_dep *post_deps;
+       u32 num_out_syncobjs = exbuf->num_out_syncobjs;
+       size_t syncobj_stride = exbuf->syncobj_stride;
+       int ret = 0, i;
+
+       if (!num_out_syncobjs)
+               return 0;
+
+       post_deps = kvcalloc(num_out_syncobjs, sizeof(*post_deps), GFP_KERNEL);
+       if (!post_deps)
+               return -ENOMEM;
+
+       for (i = 0; i < num_out_syncobjs; i++) {
+               u64 address = exbuf->out_syncobjs + i * syncobj_stride;
+
+               memset(&syncobj_desc, 0, sizeof(syncobj_desc));
+
+               if (copy_from_user(&syncobj_desc,
+                                  u64_to_user_ptr(address),
+                                  min(syncobj_stride, sizeof(syncobj_desc)))) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               post_deps[i].point = syncobj_desc.point;
+
+               if (syncobj_desc.flags) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               if (syncobj_desc.point) {
+                       post_deps[i].chain = dma_fence_chain_alloc();
+                       if (!post_deps[i].chain) {
+                               ret = -ENOMEM;
+                               break;
+                       }
+               }
+
+               post_deps[i].syncobj = drm_syncobj_find(submit->file,
+                                                       syncobj_desc.handle);
+               if (!post_deps[i].syncobj) {
+                       kfree(post_deps[i].chain);
+                       ret = -EINVAL;
+                       break;
+               }
+       }
+
+       if (ret) {
+               virtio_gpu_free_post_deps(post_deps, i);
+               return ret;
+       }
+
+       submit->num_out_syncobjs = num_out_syncobjs;
+       submit->post_deps = post_deps;
+
+       return 0;
+}
+
+static void
+virtio_gpu_process_post_deps(struct virtio_gpu_submit *submit)
+{
+       struct virtio_gpu_submit_post_dep *post_deps = submit->post_deps;
+
+       if (post_deps) {
+               struct dma_fence *fence = &submit->out_fence->f;
+               u32 i;
+
+               for (i = 0; i < submit->num_out_syncobjs; i++) {
+                       if (post_deps[i].chain) {
+                               drm_syncobj_add_point(post_deps[i].syncobj,
+                                                     post_deps[i].chain,
+                                                     fence, post_deps[i].point);
+                               post_deps[i].chain = NULL;
+                       } else {
+                               drm_syncobj_replace_fence(post_deps[i].syncobj,
+                                                         fence);
+                       }
+               }
+       }
+}
+
 static int virtio_gpu_fence_event_create(struct drm_device *dev,
                                         struct drm_file *file,
                                         struct virtio_gpu_fence *fence,
 
 static void virtio_gpu_cleanup_submit(struct virtio_gpu_submit *submit)
 {
+       virtio_gpu_reset_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
+       virtio_gpu_free_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
+       virtio_gpu_free_post_deps(submit->post_deps, submit->num_out_syncobjs);
+
        if (!IS_ERR(submit->buf))
                kvfree(submit->buf);
 
                drm_fence_event = false;
 
        if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) ||
+           exbuf->num_out_syncobjs ||
            exbuf->num_bo_handles ||
            drm_fence_event)
                out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
        if (ret)
                goto cleanup;
 
+       ret = virtio_gpu_parse_post_deps(&submit);
+       if (ret)
+               goto cleanup;
+
+       ret = virtio_gpu_parse_deps(&submit);
+       if (ret)
+               goto cleanup;
+
        /*
         * Await in-fences in the end of the job submission path to
         * optimize the path by proceeding directly to the submission
         * the job submission path.
         */
        virtio_gpu_install_out_fence_fd(&submit);
+       virtio_gpu_process_post_deps(&submit);
        virtio_gpu_complete_submit(&submit);
 cleanup:
        virtio_gpu_cleanup_submit(&submit);