static void transport_complete_task_attr(struct se_cmd *cmd);
 static void transport_handle_queue_full(struct se_cmd *cmd,
                struct se_device *dev);
-static void transport_direct_request_timeout(struct se_cmd *cmd);
 static void transport_free_dev_tasks(struct se_cmd *cmd);
 static int transport_generic_get_mem(struct se_cmd *cmd);
 static void transport_put_cmd(struct se_cmd *cmd);
 }
 EXPORT_SYMBOL(transport_complete_sync_cache);
 
-static void target_complete_timeout_work(struct work_struct *work)
-{
-       struct se_cmd *cmd = container_of(work, struct se_cmd, work);
-       unsigned long flags;
-
-       /*
-        * Reset cmd->t_se_count to allow transport_put_cmd()
-        * to allow last call to free memory resources.
-        */
-       spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (atomic_read(&cmd->t_transport_timeout) > 1) {
-               int tmp = (atomic_read(&cmd->t_transport_timeout) - 1);
-
-               atomic_sub(tmp, &cmd->t_se_count);
-       }
-       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
-       transport_put_cmd(cmd);
-}
-
 static void target_complete_failure_work(struct work_struct *work)
 {
        struct se_cmd *cmd = container_of(work, struct se_cmd, work);
        if (dev)
                atomic_inc(&dev->depth_left);
 
-       del_timer(&task->task_timer);
-
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        task->task_flags &= ~TF_ACTIVE;
 
         * to complete for an exception condition
         */
        if (task->task_flags & TF_REQUEST_STOP) {
-               /*
-                * Decrement cmd->t_se_count if this task had
-                * previously thrown its timeout exception handler.
-                */
-               if (task->task_flags & TF_TIMEOUT) {
-                       atomic_dec(&cmd->t_se_count);
-                       task->task_flags &= ~TF_TIMEOUT;
-               }
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
                complete(&task->task_stop_comp);
                return;
        }
-       /*
-        * If the task's timeout handler has fired, use the t_task_cdbs_timeout
-        * left counter to determine when the struct se_cmd is ready to be queued to
-        * the processing thread.
-        */
-       if (task->task_flags & TF_TIMEOUT) {
-               if (!atomic_dec_and_test(&cmd->t_task_cdbs_timeout_left)) {
-                       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-                       return;
-               }
-               INIT_WORK(&cmd->work, target_complete_timeout_work);
-               goto out_queue;
-       }
-       atomic_dec(&cmd->t_task_cdbs_timeout_left);
-
        /*
         * Decrement the outstanding t_task_cdbs_left count.  The last
         * struct se_task from struct se_cmd will complete itself into the
                INIT_WORK(&cmd->work, target_complete_ok_work);
        }
 
-out_queue:
        cmd->t_state = TRANSPORT_COMPLETE;
        atomic_set(&cmd->t_transport_active, 1);
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
        INIT_LIST_HEAD(&task->t_list);
        INIT_LIST_HEAD(&task->t_execute_list);
        INIT_LIST_HEAD(&task->t_state_list);
-       init_timer(&task->task_timer);
        init_completion(&task->task_stop_comp);
        task->task_se_cmd = cmd;
        task->task_data_direction = data_direction;
                spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
 
                pr_debug("Task %p waiting to complete\n", task);
-               del_timer_sync(&task->task_timer);
                wait_for_completion(&task->task_stop_comp);
                pr_debug("Task %p stopped successfully\n", task);
 
                transport_complete_task_attr(cmd);
 
        if (complete) {
-               transport_direct_request_timeout(cmd);
                cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
        }
 
        transport_handle_queue_full(cmd, cmd->se_dev);
 }
 
-static void transport_direct_request_timeout(struct se_cmd *cmd)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (!atomic_read(&cmd->t_transport_timeout)) {
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               return;
-       }
-       if (atomic_read(&cmd->t_task_cdbs_timeout_left)) {
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               return;
-       }
-
-       atomic_sub(atomic_read(&cmd->t_transport_timeout),
-                  &cmd->t_se_count);
-       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-}
-
 static inline u32 transport_lba_21(unsigned char *cdb)
 {
        return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
        spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
 }
 
-/*
- * Called from interrupt context.
- */
-static void transport_task_timeout_handler(unsigned long data)
-{
-       struct se_task *task = (struct se_task *)data;
-       struct se_cmd *cmd = task->task_se_cmd;
-       unsigned long flags;
-
-       pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
-
-       spin_lock_irqsave(&cmd->t_state_lock, flags);
-
-       /*
-        * Determine if transport_complete_task() has already been called.
-        */
-       if (!(task->task_flags & TF_ACTIVE)) {
-               pr_debug("transport task: %p cmd: %p timeout !TF_ACTIVE\n",
-                        task, cmd);
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               return;
-       }
-
-       atomic_inc(&cmd->t_se_count);
-       atomic_inc(&cmd->t_transport_timeout);
-       cmd->t_tasks_failed = 1;
-
-       task->task_flags |= TF_TIMEOUT;
-       task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
-       task->task_scsi_status = 1;
-
-       if (task->task_flags & TF_REQUEST_STOP) {
-               pr_debug("transport task: %p cmd: %p timeout TF_REQUEST_STOP"
-                               " == 1\n", task, cmd);
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               complete(&task->task_stop_comp);
-               return;
-       }
-
-       if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
-               pr_debug("transport task: %p cmd: %p timeout non zero"
-                               " t_task_cdbs_left\n", task, cmd);
-               spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               return;
-       }
-       pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
-                       task, cmd);
-
-       INIT_WORK(&cmd->work, target_complete_failure_work);
-       cmd->t_state = TRANSPORT_COMPLETE;
-       atomic_set(&cmd->t_transport_active, 1);
-       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
-       queue_work(target_completion_wq, &cmd->work);
-}
-
-static void transport_start_task_timer(struct se_task *task)
-{
-       struct se_device *dev = task->task_se_cmd->se_dev;
-       int timeout;
-
-       /*
-        * If the task_timeout is disabled, exit now.
-        */
-       timeout = dev->se_sub_dev->se_dev_attrib.task_timeout;
-       if (!timeout)
-               return;
-
-       task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
-       task->task_timer.data = (unsigned long) task;
-       task->task_timer.function = transport_task_timeout_handler;
-       add_timer(&task->task_timer);
-}
-
 static inline int transport_tcq_window_closed(struct se_device *dev)
 {
        if (dev->dev_tcq_window_closed++ <
            cmd->t_task_list_num)
                atomic_set(&cmd->t_transport_sent, 1);
 
-       transport_start_task_timer(task);
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
        /*
         * The struct se_cmd->transport_emulate_cdb() function pointer is used
                        spin_lock_irqsave(&cmd->t_state_lock, flags);
                        task->task_flags &= ~TF_ACTIVE;
                        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-                       del_timer_sync(&task->task_timer);
                        atomic_set(&cmd->t_transport_sent, 0);
                        transport_stop_tasks_for_cmd(cmd);
                        atomic_inc(&dev->depth_left);
                        spin_lock_irqsave(&cmd->t_state_lock, flags);
                        task->task_flags &= ~TF_ACTIVE;
                        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-                       del_timer_sync(&task->task_timer);
                        atomic_set(&cmd->t_transport_sent, 0);
                        transport_stop_tasks_for_cmd(cmd);
                        atomic_inc(&dev->depth_left);
        while (!list_empty(&dispose_list)) {
                task = list_first_entry(&dispose_list, struct se_task, t_list);
 
-               /*
-                * We already cancelled all pending timers in
-                * transport_complete_task, but that was just a pure del_timer,
-                * so do a full del_timer_sync here to make sure any handler
-                * that was running at that point has finished execution.
-                */
-               del_timer_sync(&task->task_timer);
-
                if (task->task_sg != cmd->t_data_sg &&
                    task->task_sg != cmd->t_bidi_data_sg)
                        kfree(task->task_sg);
        cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi);
        atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num);
        atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num);
-       atomic_set(&cmd->t_task_cdbs_timeout_left, cmd->t_task_list_num);
 
        /*
         * For WRITEs, let the fabric know its buffer is ready..