return "unbound";
 }
 
+static void irq_dma_fence_array_work(struct irq_work *wrk)
+{
+       struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
+
+       dma_fence_signal(&array->base);
+       dma_fence_put(&array->base);
+}
+
 static void dma_fence_array_cb_func(struct dma_fence *f,
                                    struct dma_fence_cb *cb)
 {
        struct dma_fence_array *array = array_cb->array;
 
        if (atomic_dec_and_test(&array->num_pending))
-               dma_fence_signal(&array->base);
-       dma_fence_put(&array->base);
+               irq_work_queue(&array->work);
+       else
+               dma_fence_put(&array->base);
 }
 
 static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
        spin_lock_init(&array->lock);
        dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
                       context, seqno);
+       init_irq_work(&array->work, irq_dma_fence_array_work);
 
        array->num_fences = num_fences;
        atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
 
 #define __LINUX_DMA_FENCE_ARRAY_H
 
 #include <linux/dma-fence.h>
+#include <linux/irq_work.h>
 
 /**
  * struct dma_fence_array_cb - callback helper for fence array
        unsigned num_fences;
        atomic_t num_pending;
        struct dma_fence **fences;
+
+       struct irq_work work;
 };
 
 extern const struct dma_fence_ops dma_fence_array_ops;