unsigned fail_count;            /* Cumulative failure count */
 
        struct dm_path path;
-       struct work_struct deactivate_path;
        struct work_struct activate_path;
 };
 
 static void process_queued_ios(struct work_struct *work);
 static void trigger_event(struct work_struct *work);
 static void activate_path(struct work_struct *work);
-static void deactivate_path(struct work_struct *work);
 
 
 /*-----------------------------------------------
 
        if (pgpath) {
                pgpath->is_active = 1;
-               INIT_WORK(&pgpath->deactivate_path, deactivate_path);
                INIT_WORK(&pgpath->activate_path, activate_path);
        }
 
        kfree(pgpath);
 }
 
-static void deactivate_path(struct work_struct *work)
-{
-       struct pgpath *pgpath =
-               container_of(work, struct pgpath, deactivate_path);
-
-       blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
-}
-
 static struct priority_group *alloc_priority_group(void)
 {
        struct priority_group *pg;
                      pgpath->path.dev->name, m->nr_valid_paths);
 
        schedule_work(&m->trigger_event);
-       queue_work(kmultipathd, &pgpath->deactivate_path);
 
 out:
        spin_unlock_irqrestore(&m->lock, flags);