};
 
 struct dma_fence *
-msm_fence_alloc(struct msm_fence_context *fctx)
+msm_fence_alloc(void)
 {
        struct msm_fence *f;
 
        if (!f)
                return ERR_PTR(-ENOMEM);
 
+       return &f->base;
+}
+
+void
+msm_fence_init(struct dma_fence *fence, struct msm_fence_context *fctx)
+{
+       struct msm_fence *f = to_msm_fence(fence);
+
        f->fctx = fctx;
 
        dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
                       fctx->context, ++fctx->last_fence);
-
-       return &f->base;
 }
 
 bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence);
 void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
 
-struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
+struct dma_fence * msm_fence_alloc(void);
+void msm_fence_init(struct dma_fence *fence, struct msm_fence_context *fctx);
 
 static inline bool
 fence_before(uint32_t a, uint32_t b)
 
        if (!submit)
                return ERR_PTR(-ENOMEM);
 
+       submit->hw_fence = msm_fence_alloc();
+       if (IS_ERR(submit->hw_fence)) {
+               ret = PTR_ERR(submit->hw_fence);
+               kfree(submit);
+               return ERR_PTR(ret);
+       }
+
        ret = drm_sched_job_init(&submit->base, queue->entity, queue);
        if (ret) {
+               kfree(submit->hw_fence);
                kfree(submit);
                return ERR_PTR(ret);
        }
 
        struct msm_gpu *gpu = submit->gpu;
        int i;
 
-       submit->hw_fence = msm_fence_alloc(fctx);
+       msm_fence_init(submit->hw_fence, fctx);
 
        for (i = 0; i < submit->nr_bos; i++) {
                struct drm_gem_object *obj = &submit->bos[i].obj->base;