**/
 static void ibmvfc_free_event(struct ibmvfc_event *evt)
 {
-       struct ibmvfc_host *vhost = evt->vhost;
-       struct ibmvfc_event_pool *pool = &vhost->pool;
+       struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
 
        BUG_ON(!ibmvfc_valid_event(pool, evt));
        BUG_ON(atomic_inc_return(&evt->free) != 1);
-       list_add_tail(&evt->queue, &vhost->free);
+       list_add_tail(&evt->queue_list, &evt->queue->free);
 }
 
 /**
        } else
                evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
 
-       list_del(&evt->queue);
+       list_del(&evt->queue_list);
        del_timer(&evt->timer);
        ibmvfc_trc_end(evt);
        evt->done(evt);
        struct ibmvfc_event *evt, *pos;
 
        ibmvfc_dbg(vhost, "Purging all requests\n");
-       list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
+       list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
                ibmvfc_fail_request(evt, error_code);
 }
 
  *
  * Returns zero on success.
  **/
-static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
+static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
+                                 struct ibmvfc_queue *queue)
 {
        int i;
-       struct ibmvfc_event_pool *pool = &vhost->pool;
+       struct ibmvfc_event_pool *pool = &queue->evt_pool;
 
        ENTER;
        pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
                return -ENOMEM;
        }
 
+       INIT_LIST_HEAD(&queue->sent);
+       INIT_LIST_HEAD(&queue->free);
+
        for (i = 0; i < pool->size; ++i) {
                struct ibmvfc_event *evt = &pool->events[i];
                atomic_set(&evt->free, 1);
                evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
                evt->xfer_iu = pool->iu_storage + i;
                evt->vhost = vhost;
+               evt->queue = queue;
                evt->ext_list = NULL;
-               list_add_tail(&evt->queue, &vhost->free);
+               list_add_tail(&evt->queue_list, &queue->free);
        }
 
        LEAVE;
  * @vhost:     ibmvfc host who owns the event pool
  *
  **/
-static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
+static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
+                                  struct ibmvfc_queue *queue)
 {
        int i;
-       struct ibmvfc_event_pool *pool = &vhost->pool;
+       struct ibmvfc_event_pool *pool = &queue->evt_pool;
 
        ENTER;
        for (i = 0; i < pool->size; ++i) {
-               list_del(&pool->events[i].queue);
+               list_del(&pool->events[i].queue_list);
                BUG_ON(atomic_read(&pool->events[i].free) != 1);
                if (pool->events[i].ext_list)
                        dma_pool_free(vhost->sg_pool,
  *
  * Returns a free event from the pool.
  **/
-static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
+static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
 {
        struct ibmvfc_event *evt;
 
-       BUG_ON(list_empty(&vhost->free));
-       evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
+       BUG_ON(list_empty(&queue->free));
+       evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
        atomic_set(&evt->free, 0);
-       list_del(&evt->queue);
+       list_del(&evt->queue_list);
        return evt;
 }
 
        else
                BUG();
 
-       list_add_tail(&evt->queue, &vhost->sent);
+       list_add_tail(&evt->queue_list, &evt->queue->sent);
        timer_setup(&evt->timer, ibmvfc_timeout, 0);
 
        if (timeout) {
 
        if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
                                  be64_to_cpu(crq_as_u64[1])))) {
-               list_del(&evt->queue);
+               list_del(&evt->queue_list);
                del_timer(&evt->timer);
 
                /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
        }
 
        cmnd->result = (DID_OK << 16);
-       evt = ibmvfc_get_event(vhost);
+       evt = ibmvfc_get_event(&vhost->crq);
        ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
        evt->cmnd = cmnd;
        cmnd->scsi_done = done;
        }
 
        vhost->aborting_passthru = 1;
-       evt = ibmvfc_get_event(vhost);
+       evt = ibmvfc_get_event(&vhost->crq);
        ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
 
        tmf = &evt->iu.tmf;
        if (unlikely((rc = ibmvfc_host_chkready(vhost))))
                goto unlock_out;
 
-       evt = ibmvfc_get_event(vhost);
+       evt = ibmvfc_get_event(&vhost->crq);
        ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
        plogi = &evt->iu.plogi;
        memset(plogi, 0, sizeof(*plogi));
                goto out;
        }
 
-       evt = ibmvfc_get_event(vhost);
+       evt = ibmvfc_get_event(&vhost->crq);
        ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
        mad = &evt->iu.passthru;
 
 
        spin_lock_irqsave(vhost->host->host_lock, flags);
        if (vhost->state == IBMVFC_ACTIVE) {
-               evt = ibmvfc_get_event(vhost);
+               evt = ibmvfc_get_event(&vhost->crq);
                ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
                tmf = ibmvfc_init_vfc_cmd(evt, sdev);
                iu = ibmvfc_get_fcp_iu(vhost, tmf);
        do {
                wait = 0;
                spin_lock_irqsave(vhost->host->host_lock, flags);
-               list_for_each_entry(evt, &vhost->sent, queue) {
+               list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
                        if (match(evt, device)) {
                                evt->eh_comp = ∁
                                wait++;
                        if (!timeout) {
                                wait = 0;
                                spin_lock_irqsave(vhost->host->host_lock, flags);
-                               list_for_each_entry(evt, &vhost->sent, queue) {
+                               list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
                                        if (match(evt, device)) {
                                                evt->eh_comp = NULL;
                                                wait++;
        ENTER;
        spin_lock_irqsave(vhost->host->host_lock, flags);
        found_evt = NULL;
-       list_for_each_entry(evt, &vhost->sent, queue) {
+       list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
                if (evt->cmnd && evt->cmnd->device == sdev) {
                        found_evt = evt;
                        break;
        }
 
        if (vhost->logged_in) {
-               evt = ibmvfc_get_event(vhost);
+               evt = ibmvfc_get_event(&vhost->crq);
                ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
 
                tmf = &evt->iu.tmf;
 
        spin_lock_irqsave(vhost->host->host_lock, flags);
        found_evt = NULL;
-       list_for_each_entry(evt, &vhost->sent, queue) {
+       list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
                if (evt->cmnd && evt->cmnd->device == sdev) {
                        found_evt = evt;
                        break;
        }
 
        if (vhost->state == IBMVFC_ACTIVE) {
-               evt = ibmvfc_get_event(vhost);
+               evt = ibmvfc_get_event(&vhost->crq);
                ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
                tmf = ibmvfc_init_vfc_cmd(evt, sdev);
                iu = ibmvfc_get_fcp_iu(vhost, tmf);
         * things we send. Make sure this response is to something we
         * actually sent
         */
-       if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
+       if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) {
                dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
                        crq->ioba);
                return;
        }
 
        del_timer(&evt->timer);
-       list_del(&evt->queue);
+       list_del(&evt->queue_list);
        ibmvfc_trc_end(evt);
        evt->done(evt);
 }
                return;
 
        kref_get(&tgt->kref);
-       evt = ibmvfc_get_event(vhost);
+       evt = ibmvfc_get_event(&vhost->crq);
        vhost->discovery_threads++;
        ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
        evt->tgt = tgt;
 
        kref_get(&tgt->kref);
        tgt->logo_rcvd = 0;
-       evt = ibmvfc_get_event(vhost);
+       evt = ibmvfc_get_event(&vhost->crq);
        vhost->discovery_threads++;
        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
        ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
        struct ibmvfc_event *evt;
 
        kref_get(&tgt->kref);
-       evt = ibmvfc_get_event(vhost);
+       evt = ibmvfc_get_event(&vhost->crq);
        ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
        evt->tgt = tgt;
        mad = &evt->iu.implicit_logout;
                return;
 
        kref_get(&tgt->kref);
-       evt = ibmvfc_get_event(vhost);
+       evt = ibmvfc_get_event(&vhost->crq);
        vhost->discovery_threads++;
        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
        ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
 
        vhost->abort_threads++;
        kref_get(&tgt->kref);
-       evt = ibmvfc_get_event(vhost);
+       evt = ibmvfc_get_event(&vhost->crq);
        ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
 
        evt->tgt = tgt;
                return;
 
        kref_get(&tgt->kref);
-       evt = ibmvfc_get_event(vhost);
+       evt = ibmvfc_get_event(&vhost->crq);
        vhost->discovery_threads++;
        ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
        evt->tgt = tgt;
                return;
 
        kref_get(&tgt->kref);
-       evt = ibmvfc_get_event(vhost);
+       evt = ibmvfc_get_event(&vhost->crq);
        vhost->discovery_threads++;
        evt->tgt = tgt;
        ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
 {
        struct ibmvfc_discover_targets *mad;
-       struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
+       struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
 
        ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
        mad = &evt->iu.discover_targets;
 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
 {
        struct ibmvfc_npiv_login_mad *mad;
-       struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
+       struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
 
        ibmvfc_gather_partition_info(vhost);
        ibmvfc_set_login_info(vhost);
 
        switch (mad_status) {
        case IBMVFC_MAD_SUCCESS:
-               if (list_empty(&vhost->sent) &&
+               if (list_empty(&vhost->crq.sent) &&
                    vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
                        ibmvfc_init_host(vhost);
                        return;
        struct ibmvfc_npiv_logout_mad *mad;
        struct ibmvfc_event *evt;
 
-       evt = ibmvfc_get_event(vhost);
+       evt = ibmvfc_get_event(&vhost->crq);
        ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
 
        mad = &evt->iu.npiv_logout;
        shost->unique_id = shost->host_no;
 
        vhost = shost_priv(shost);
-       INIT_LIST_HEAD(&vhost->sent);
-       INIT_LIST_HEAD(&vhost->free);
        INIT_LIST_HEAD(&vhost->targets);
        sprintf(vhost->name, IBMVFC_NAME);
        vhost->host = shost;
                goto kill_kthread;
        }
 
-       if ((rc = ibmvfc_init_event_pool(vhost))) {
+       if ((rc = ibmvfc_init_event_pool(vhost, &vhost->crq))) {
                dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
                goto release_crq;
        }
 remove_shost:
        scsi_remove_host(shost);
 release_event_pool:
-       ibmvfc_free_event_pool(vhost);
+       ibmvfc_free_event_pool(vhost, &vhost->crq);
 release_crq:
        ibmvfc_release_crq_queue(vhost);
 kill_kthread:
        spin_lock_irqsave(vhost->host->host_lock, flags);
        ibmvfc_purge_requests(vhost, DID_ERROR);
        spin_unlock_irqrestore(vhost->host->host_lock, flags);
-       ibmvfc_free_event_pool(vhost);
+       ibmvfc_free_event_pool(vhost, &vhost->crq);
 
        ibmvfc_free_mem(vhost);
        spin_lock(&ibmvfc_driver_lock);