qla24xx_vport_disable(fc_vport, disable);
 
-       if (ql2xmultique_tag) {
+       if (ha->flags.cpu_affinity_enabled) {
                req = ha->req_q_map[1];
                goto vport_queue;
        } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
                    vha->host_no, vha->vp_idx, vha));
         }
 
-       if (vha->req->id && !ql2xmultique_tag) {
+       if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
                if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
                        qla_printk(KERN_WARNING, ha,
                                "Queue delete failed.\n");
 
                                    ha->phy_version);
                                if (rval != QLA_SUCCESS)
                                        goto failed;
-
                                ha->flags.npiv_supported = 0;
                                if (IS_QLA2XXX_MIDTYPE(ha) &&
                                         (ha->fw_attributes & BIT_2)) {
        struct req_que *req;
        struct rsp_que *rsp;
 
-       if (ql2xmultique_tag)
+       if (vha->hw->flags.cpu_affinity_enabled)
                req = vha->hw->req_q_map[0];
        else
                req = vha->req;
                return -EINVAL;
 
        rval = qla2x00_fw_ready(base_vha);
-       if (ql2xmultique_tag)
+       if (ha->flags.cpu_affinity_enabled)
                req = ha->req_q_map[0];
        else
                req = vha->req;
 
        int ques, req, ret;
        struct qla_hw_data *ha = vha->hw;
 
+       if (!(ha->fw_attributes & BIT_6)) {
+               qla_printk(KERN_INFO, ha,
+                       "Firmware is not multi-queue capable\n");
+               goto fail;
+       }
        if (ql2xmultique_tag) {
-               /* CPU affinity mode */
-               ha->wq = create_workqueue("qla2xxx_wq");
                /* create a request queue for IO */
                options |= BIT_7;
                req = qla25xx_create_req_que(ha, options, 0, 0, -1,
                                "Can't create request queue\n");
                        goto fail;
                }
+               ha->wq = create_workqueue("qla2xxx_wq");
                vha->req = ha->req_q_map[req];
                options |= BIT_1;
                for (ques = 1; ques < ha->max_rsp_queues; ques++) {
                                goto fail2;
                        }
                }
+               ha->flags.cpu_affinity_enabled = 1;
+
                DEBUG2(qla_printk(KERN_INFO, ha,
                        "CPU affinity mode enabled, no. of response"
                        " queues:%d, no. of request queues:%d\n",
        return 0;
 fail2:
        qla25xx_delete_queues(vha);
+       destroy_workqueue(ha->wq);
+       ha->wq = NULL;
 fail:
        ha->mqenable = 0;
+       kfree(ha->req_q_map);
+       kfree(ha->rsp_q_map);
+       ha->max_req_queues = ha->max_rsp_queues = 1;
        return 1;
 }
 
        if (ret)
                goto probe_init_failed;
        /* Alloc arrays of request and response ring ptrs */
+que_init:
        if (!qla2x00_alloc_queues(ha)) {
                qla_printk(KERN_WARNING, ha,
                "[ERROR] Failed to allocate memory for queue"
                goto probe_failed;
        }
 
-       if (ha->mqenable)
-               if (qla25xx_setup_mode(base_vha))
+       if (ha->mqenable) {
+               if (qla25xx_setup_mode(base_vha)) {
                        qla_printk(KERN_WARNING, ha,
                                "Can't create queues, falling back to single"
                                " queue mode\n");
+                       goto que_init;
+               }
+       }
 
        if (ha->flags.running_gold_fw)
                goto skip_dpc;