* this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/dma-fence-array.h>
 #include <linux/reservation.h>
+#include <linux/sync_file.h>
 #include "etnaviv_cmdbuf.h"
 #include "etnaviv_drv.h"
 #include "etnaviv_gpu.h"
        for (i = 0; i < submit->nr_bos; i++) {
                struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
                bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
+               bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
 
-               ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write);
+               ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
+                                                explicit);
                if (ret)
                        break;
        }
        struct etnaviv_gem_submit *submit;
        struct etnaviv_cmdbuf *cmdbuf;
        struct etnaviv_gpu *gpu;
+       struct dma_fence *in_fence = NULL;
        void *stream;
        int ret;
 
                return -EINVAL;
        }
 
+       if (args->flags & ~ETNA_SUBMIT_FLAGS) {
+               DRM_ERROR("invalid flags: 0x%x\n", args->flags);
+               return -EINVAL;
+       }
+
        /*
         * Copy the command submission and bo array to kernel space in
         * one go, and do this outside of any locks.
                goto err_submit_cmds;
        }
 
+       submit->flags = args->flags;
+
        ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
        if (ret)
                goto err_submit_objects;
                goto err_submit_objects;
        }
 
+       if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
+               in_fence = sync_file_get_fence(args->fence_fd);
+               if (!in_fence) {
+                       ret = -EINVAL;
+                       goto err_submit_objects;
+               }
+
+               /*
+                * Wait if the fence is from a foreign context, or if the fence
+                * array contains any fence from a foreign context.
+                */
+               if (!dma_fence_match_context(in_fence, gpu->fence_context)) {
+                       ret = dma_fence_wait(in_fence, true);
+                       if (ret)
+                               goto err_submit_objects;
+               }
+       }
+
        ret = submit_fence_sync(submit);
        if (ret)
                goto err_submit_objects;
                flush_workqueue(priv->wq);
 
 err_submit_objects:
+       if (in_fence)
+               dma_fence_put(in_fence);
        submit_cleanup(submit);
 
 err_submit_cmds:
 
 }
 
 int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
-       unsigned int context, bool exclusive)
+       unsigned int context, bool exclusive, bool explicit)
 {
        struct reservation_object *robj = etnaviv_obj->resv;
        struct reservation_object_list *fobj;
                        return ret;
        }
 
+       if (explicit)
+               return 0;
+
        /*
         * If we have any shared fences, then the exclusive fence
         * should be ignored as it will already have been signalled.
 
 #endif
 
 int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
-       unsigned int context, bool exclusive);
+       unsigned int context, bool exclusive, bool implicit);
 
 void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
 
  * one or more cmdstream buffers.  This allows for conditional execution
  * (context-restore), and IB buffers needed for per tile/bin draw cmds.
  */
+#define ETNA_SUBMIT_NO_IMPLICIT         0x0001
+#define ETNA_SUBMIT_FENCE_FD_IN         0x0002
+#define ETNA_SUBMIT_FLAGS              (ETNA_SUBMIT_NO_IMPLICIT | \
+                                        ETNA_SUBMIT_FENCE_FD_IN)
 #define ETNA_PIPE_3D      0x00
 #define ETNA_PIPE_2D      0x01
 #define ETNA_PIPE_VG      0x02
        __u64 bos;            /* in, ptr to array of submit_bo's */
        __u64 relocs;         /* in, ptr to array of submit_reloc's */
        __u64 stream;         /* in, ptr to cmdstream */
+       __u32 flags;          /* in, mask of ETNA_SUBMIT_x */
+       __s32 fence_fd;       /* in, fence fd (see ETNA_SUBMIT_FENCE_FD_IN) */
 };
 
 /* The normal way to synchronize with the GPU is just to CPU_PREP on