u64 limit = BLK_BOUNCE_HIGH;
        int ret;
        struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+       struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
 
        if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
                limit = *mmc_dev(host)->dma_mask;
                return -ENOMEM;
 
        memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
+       memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
        mq->mqrq_cur = mqrq_cur;
+       mq->mqrq_prev = mqrq_prev;
        mq->queue->queuedata = mq;
 
        blk_queue_prep_rq(mq->queue, mmc_prep_request);
                                        "allocate bounce cur buffer\n",
                                        mmc_card_name(card));
                        }
+                       mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+                       if (!mqrq_prev->bounce_buf) {
+                               printk(KERN_WARNING "%s: unable to "
+                                       "allocate bounce prev buffer\n",
+                                       mmc_card_name(card));
+                               kfree(mqrq_cur->bounce_buf);
+                               mqrq_cur->bounce_buf = NULL;
+                       }
                }
 
-               if (mqrq_cur->bounce_buf) {
+               if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
                        blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
                        blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
                        blk_queue_max_segments(mq->queue, bouncesz / 512);
                        if (ret)
                                goto cleanup_queue;
 
+                       mqrq_prev->sg = mmc_alloc_sg(1, &ret);
+                       if (ret)
+                               goto cleanup_queue;
+
+                       mqrq_prev->bounce_sg =
+                               mmc_alloc_sg(bouncesz / 512, &ret);
+                       if (ret)
+                               goto cleanup_queue;
                }
        }
 #endif
 
-       if (!mqrq_cur->bounce_buf) {
+       if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
                blk_queue_bounce_limit(mq->queue, limit);
                blk_queue_max_hw_sectors(mq->queue,
                        min(host->max_blk_count, host->max_req_size / 512));
                if (ret)
                        goto cleanup_queue;
 
+
+               mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
+               if (ret)
+                       goto cleanup_queue;
        }
 
        sema_init(&mq->thread_sem, 1);
  free_bounce_sg:
        kfree(mqrq_cur->bounce_sg);
        mqrq_cur->bounce_sg = NULL;
+       kfree(mqrq_prev->bounce_sg);
+       mqrq_prev->bounce_sg = NULL;
 
  cleanup_queue:
        kfree(mqrq_cur->sg);
        kfree(mqrq_cur->bounce_buf);
        mqrq_cur->bounce_buf = NULL;
 
+       kfree(mqrq_prev->sg);
+       mqrq_prev->sg = NULL;
+       kfree(mqrq_prev->bounce_buf);
+       mqrq_prev->bounce_buf = NULL;
+
        blk_cleanup_queue(mq->queue);
        return ret;
 }
        struct request_queue *q = mq->queue;
        unsigned long flags;
        struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
+       struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
 
        /* Make sure the queue isn't suspended, as that will deadlock */
        mmc_queue_resume(mq);
        kfree(mqrq_cur->bounce_buf);
        mqrq_cur->bounce_buf = NULL;
 
+       kfree(mqrq_prev->bounce_sg);
+       mqrq_prev->bounce_sg = NULL;
+
+       kfree(mqrq_prev->sg);
+       mqrq_prev->sg = NULL;
+
+       kfree(mqrq_prev->bounce_buf);
+       mqrq_prev->bounce_buf = NULL;
+
        mq->card = NULL;
 }
 EXPORT_SYMBOL(mmc_cleanup_queue);