int ret = 1, disable_multi = 0, retry = 0, type;
        enum mmc_blk_status status;
        struct mmc_queue_req *mq_rq;
-       struct request *req;
+       struct request *req = rqc;
        struct mmc_async_req *areq;
 
        if (!rqc && !mq->mqrq_prev->req)
 
        do {
                if (rqc) {
+                       /*
+                        * When 4KB native sector is enabled, only 8 blocks
+                        * multiple read or write is allowed
+                        */
+                       if ((brq->data.blocks & 0x07) &&
+                           (card->ext_csd.data_sector_size == 4096)) {
+                               pr_err("%s: Transfer size is not 4KB sector size aligned\n",
+                                       req->rq_disk->disk_name);
+                               goto cmd_abort;
+                       }
                        mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
                        areq = &mq->mqrq_cur->mmc_active;
                } else
        snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
                 "mmcblk%d%s", md->name_idx, subname ? subname : "");
 
-       blk_queue_logical_block_size(md->queue.queue, 512);
+       if (mmc_card_mmc(card))
+               blk_queue_logical_block_size(md->queue.queue,
+                                            card->ext_csd.data_sector_size);
+       else
+               blk_queue_logical_block_size(md->queue.queue, 512);
+
        set_capacity(md->disk, size);
 
        if (mmc_host_cmd23(card->host)) {