return 1;
        }
 
+       spin_lock_bh(&oct->cmd_resp_wqlock);
+       oct->cmd_resp_state = OCT_DRV_OFFLINE;
+       spin_unlock_bh(&oct->cmd_resp_wqlock);
+
        for (i = 0; i < oct->ifcount; i++) {
                lio = GET_LIO(oct->props[i].netdev);
                for (j = 0; j < lio->linfo.num_rxpciq; j++)
 
 
        struct cavium_wq dma_comp_wq;
 
+       /** Lock for dma response list */
+       spinlock_t cmd_resp_wqlock;
+       u32 cmd_resp_state;
+
        struct cavium_wq check_db_wq[MAX_POSSIBLE_OCTEON_INSTR_QUEUES];
 
        struct cavium_wk nic_poll_work;
        void *priv;
 };
 
+#define  OCT_DRV_ONLINE 1
+#define  OCT_DRV_OFFLINE 2
 #define  OCTEON_CN6XXX(oct)           ((oct->chip_id == OCTEON_CN66XX) || \
                                       (oct->chip_id == OCTEON_CN68XX))
 #define CHIP_FIELD(oct, TYPE, field)             \
 
        int retval;
        struct octeon_soft_command *sc = NULL;
 
+       spin_lock_bh(&oct->cmd_resp_wqlock);
+       /* Allow only rx ctrl command to stop traffic on the chip
+        * during offline operations
+        */
+       if ((oct->cmd_resp_state == OCT_DRV_OFFLINE) &&
+           (nctrl->ncmd.s.cmd != OCTNET_CMD_RX_CTL)) {
+               spin_unlock_bh(&oct->cmd_resp_wqlock);
+               dev_err(&oct->pci_dev->dev,
+                       "%s cmd:%d not processed since driver offline\n",
+                       __func__, nctrl->ncmd.s.cmd);
+               return -1;
+       }
+
        sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl);
        if (!sc) {
                dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n",
                        __func__);
+               spin_unlock_bh(&oct->cmd_resp_wqlock);
                return -1;
        }
 
        retval = octeon_send_soft_command(oct, sc);
        if (retval == IQ_SEND_FAILED) {
                octeon_free_soft_command(oct, sc);
-               dev_err(&oct->pci_dev->dev, "%s soft command send failed status: %x\n",
-                       __func__, retval);
+               dev_err(&oct->pci_dev->dev, "%s soft command:%d send failed status: %x\n",
+                       __func__, nctrl->ncmd.s.cmd, retval);
+               spin_unlock_bh(&oct->cmd_resp_wqlock);
                return -1;
        }
 
+       spin_unlock_bh(&oct->cmd_resp_wqlock);
        return retval;
 }
 
                spin_lock_init(&oct->response_list[i].lock);
                atomic_set(&oct->response_list[i].pending_req_count, 0);
        }
+       spin_lock_init(&oct->cmd_resp_wqlock);
 
        oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
        if (!oct->dma_comp_wq.wq) {
        cwq = &oct->dma_comp_wq;
        INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
        cwq->wk.ctxptr = oct;
+       oct->cmd_resp_state = OCT_DRV_ONLINE;
        queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
 
        return ret;