u32 seed)
 {
        struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-       unsigned int align = q->dma_pad_mask | queue_dma_alignment(q);
+       unsigned int align = blk_lim_dma_alignment_and_pad(&q->limits);
        struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages;
        struct bio_vec stack_vec[UIO_FASTIOV], *bvec = stack_vec;
        unsigned int direction, nr_bvecs;
 
                        const struct iov_iter *iter, gfp_t gfp_mask)
 {
        bool copy = false, map_bvec = false;
-       unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
+       unsigned long align = blk_lim_dma_alignment_and_pad(&q->limits);
        struct bio *bio = NULL;
        struct iov_iter i;
        int ret = -EINVAL;
 
 }
 EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
 
-/**
- * blk_queue_update_dma_pad - update pad mask
- * @q:     the request queue for the device
- * @mask:  pad mask
- *
- * Update dma pad mask.
- *
- * Appending pad buffer to a request modifies the last entry of a
- * scatter list such that it includes the pad buffer.
- **/
-void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
-{
-       if (mask > q->dma_pad_mask)
-               q->dma_pad_mask = mask;
-}
-EXPORT_SYMBOL(blk_queue_update_dma_pad);
-
 /**
  * blk_set_queue_depth - tell the block layer about the device queue depth
  * @q:         the request queue for the device
 
 int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
                struct ata_device *dev)
 {
-       struct request_queue *q = sdev->request_queue;
        int depth = 1;
 
        if (!ata_id_has_unload(dev->id))
                sdev->sector_size = ATA_SECT_SIZE;
 
                /* set DMA padding */
-               blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
+               lim->dma_pad_mask = ATA_DMA_PAD_SZ - 1;
 
                /* make room for appending the drain */
                lim->max_segments--;
 
        /* OHare has issues with non cache aligned DMA on some chipsets */
        if (priv->kind == controller_ohare) {
                lim->dma_alignment = 31;
-               blk_queue_update_dma_pad(sdev->request_queue, 31);
+               lim->dma_pad_mask = 31;
 
                /* Tell the world about it */
                ata_dev_info(dev, "OHare alignment limits applied\n");
        if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
                /* Allright these are bad, apply restrictions */
                lim->dma_alignment = 15;
-               blk_queue_update_dma_pad(sdev->request_queue, 15);
+               lim->dma_pad_mask = 15;
 
                /* We enable MWI and hack cache line size directly here, this
                 * is specific to this chipset and not normal values, we happen
 
         */
        count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
 
-       if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) {
+       if (blk_rq_bytes(rq) & rq->q->limits.dma_pad_mask) {
                unsigned int pad_len =
-                       (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
+                       (rq->q->limits.dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
 
                last_sg->length += pad_len;
                cmd->extra_len += pad_len;
 
 }
 
 /**
- * ufshcd_slave_configure - adjust SCSI device configurations
+ * ufshcd_device_configure - adjust SCSI device configurations
  * @sdev: pointer to SCSI device
+ * @lim: queue limits
  *
  * Return: 0 (success).
  */
-static int ufshcd_slave_configure(struct scsi_device *sdev)
+static int ufshcd_device_configure(struct scsi_device *sdev,
+               struct queue_limits *lim)
 {
        struct ufs_hba *hba = shost_priv(sdev->host);
        struct request_queue *q = sdev->request_queue;
 
-       blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+       lim->dma_pad_mask = PRDT_DATA_BYTE_COUNT_PAD - 1;
 
        /*
         * Block runtime-pm until all consumers are added.
        .queuecommand           = ufshcd_queuecommand,
        .mq_poll                = ufshcd_poll,
        .slave_alloc            = ufshcd_slave_alloc,
-       .slave_configure        = ufshcd_slave_configure,
+       .device_configure       = ufshcd_device_configure,
        .slave_destroy          = ufshcd_slave_destroy,
        .change_queue_depth     = ufshcd_change_queue_depth,
        .eh_abort_handler       = ufshcd_abort,
 
         * due to possible offsets.
         */
        unsigned int            dma_alignment;
+       unsigned int            dma_pad_mask;
 
        struct blk_integrity    integrity;
 };
         */
        int                     id;
 
-       unsigned int            dma_pad_mask;
-
        /*
         * queue settings
         */
                            sector_t offset);
 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
                sector_t offset, const char *pfx);
-extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
 
 struct blk_independent_access_ranges *
                                   bdev_logical_block_size(bdev) - 1);
 }
 
+static inline int blk_lim_dma_alignment_and_pad(struct queue_limits *lim)
+{
+       return lim->dma_alignment | lim->dma_pad_mask;
+}
+
 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
                                 unsigned int len)
 {
-       unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+       unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits);
+
        return !(addr & alignment) && !(len & alignment);
 }