}
 }
 
-static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+static inline void ttm_bo_move_to_pinned(struct ttm_buffer_object *bo)
+{
+       struct ttm_device *bdev = bo->bdev;
+
+       list_move_tail(&bo->lru, &bdev->pinned);
+
+       if (bdev->funcs->del_from_lru_notify)
+               bdev->funcs->del_from_lru_notify(bo);
+}
+
+static inline void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
 {
        struct ttm_device *bdev = bo->bdev;
 
                dma_resv_assert_held(bo->base.resv);
 
        if (bo->pin_count) {
-               ttm_bo_del_from_lru(bo);
+               ttm_bo_move_to_pinned(bo);
                return;
        }
 
                return ret;
        }
 
-       ttm_bo_del_from_lru(bo);
+       ttm_bo_move_to_pinned(bo);
        list_del_init(&bo->ddestroy);
        spin_unlock(&bo->bdev->lru_lock);
        ttm_bo_cleanup_memtype_use(bo);
                return 0;
        }
 
-       ttm_bo_del_from_lru(bo);
+       ttm_bo_move_to_pinned(bo);
        /* TODO: Cleanup the locking */
        spin_unlock(&bo->bdev->lru_lock);
 
 
        INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
        spin_lock_init(&bdev->lru_lock);
        INIT_LIST_HEAD(&bdev->ddestroy);
+       INIT_LIST_HEAD(&bdev->pinned);
        bdev->dev_mapping = mapping;
        mutex_lock(&ttm_global_mutex);
        list_add_tail(&bdev->device_list, &glob->device_list);