__le16  status;
 } __packed;
 
+static inline bool bo_queued(struct qaic_bo *bo)
+{
+       return !list_empty(&bo->xfer_list);
+}
+
 inline int get_dbc_req_elem_size(void)
 {
        return sizeof(struct dbc_req);
        }
        complete_all(&bo->xfer_done);
        INIT_LIST_HEAD(&bo->slices);
+       INIT_LIST_HEAD(&bo->xfer_list);
 }
 
 static struct qaic_bo *qaic_alloc_init_bo(void)
        struct bo_slice *slice;
        unsigned long flags;
        struct qaic_bo *bo;
-       bool queued;
        int i, j;
        int ret;
 
                }
 
                spin_lock_irqsave(&dbc->xfer_lock, flags);
-               queued = bo->queued;
-               bo->queued = true;
-               if (queued) {
+               if (bo_queued(bo)) {
                        spin_unlock_irqrestore(&dbc->xfer_lock, flags);
                        ret = -EINVAL;
                        goto unlock_bo;
                        else
                                ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail);
                        if (ret) {
-                               bo->queued = false;
                                spin_unlock_irqrestore(&dbc->xfer_lock, flags);
                                goto unlock_bo;
                        }
                spin_lock_irqsave(&dbc->xfer_lock, flags);
                bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list);
                obj = &bo->base;
-               bo->queued = false;
-               list_del(&bo->xfer_list);
+               list_del_init(&bo->xfer_list);
                spin_unlock_irqrestore(&dbc->xfer_lock, flags);
                dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
                drm_gem_object_put(obj);
                         */
                        dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
                        bo->nr_slice_xfer_done = 0;
-                       bo->queued = false;
-                       list_del(&bo->xfer_list);
+                       list_del_init(&bo->xfer_list);
                        bo->perf_stats.req_processed_ts = ktime_get_ns();
                        complete_all(&bo->xfer_done);
                        drm_gem_object_put(&bo->base);
 
        /* Check if BO is committed to H/W for DMA */
        spin_lock_irqsave(&dbc->xfer_lock, flags);
-       if (bo->queued) {
+       if (bo_queued(bo)) {
                spin_unlock_irqrestore(&dbc->xfer_lock, flags);
                ret = -EBUSY;
                goto unlock_ch_srcu;
        spin_lock_irqsave(&dbc->xfer_lock, flags);
        while (!list_empty(&dbc->xfer_list)) {
                bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list);
-               bo->queued = false;
-               list_del(&bo->xfer_list);
+               list_del_init(&bo->xfer_list);
                spin_unlock_irqrestore(&dbc->xfer_lock, flags);
                bo->nr_slice_xfer_done = 0;
                bo->req_id = 0;