}
 EXPORT_SYMBOL(blk_queue_alignment_offset);
 
+void blk_queue_update_readahead(struct request_queue *q)
+{
+       /*
+        * For read-ahead of large files to be effective, we need to read ahead
+        * at least twice the optimal I/O size.
+        */
+       q->backing_dev_info->ra_pages =
+               max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
+       q->backing_dev_info->io_pages =
+               queue_max_sectors(q) >> (PAGE_SHIFT - 9);
+}
+EXPORT_SYMBOL_GPL(blk_queue_update_readahead);
+
 /**
  * blk_limits_io_min - set minimum request size for a device
  * @limits: the queue limits
 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
 {
        blk_limits_io_opt(&q->limits, opt);
+       q->backing_dev_info->ra_pages =
+               max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
 }
 EXPORT_SYMBOL(blk_queue_io_opt);
 
                       top, bottom);
        }
 
-       t->backing_dev_info->io_pages =
-               t->limits.max_sectors >> (PAGE_SHIFT - 9);
+       blk_queue_update_readahead(disk->queue);
 }
 EXPORT_SYMBOL(disk_stack_limits);
 
 
                percpu_ref_switch_to_percpu(&q->q_usage_counter);
        }
 
+       blk_queue_update_readahead(q);
+
        ret = blk_trace_init_sysfs(dev);
        if (ret)
                return ret;
 
        WARN_ON(d->gd);
        WARN_ON(d->flags & DEVFL_UP);
        blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
-       q->backing_dev_info->ra_pages = SZ_2M / PAGE_SIZE;
        blk_queue_io_opt(q, SZ_2M);
        d->bufpool = mp;
        d->blkq = gd->queue = q;
 
 
        if (b) {
                blk_stack_limits(&q->limits, &b->limits, 0);
-
-               if (q->backing_dev_info->ra_pages !=
-                   b->backing_dev_info->ra_pages) {
-                       drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
-                                q->backing_dev_info->ra_pages,
-                                b->backing_dev_info->ra_pages);
-                       q->backing_dev_info->ra_pages =
-                                               b->backing_dev_info->ra_pages;
-               }
+               blk_queue_update_readahead(q);
        }
        fixup_discard_if_not_supported(q);
        fixup_write_zeroes(device, q);
 
        if (ret)
                return ret;
 
-       dc->disk.disk->queue->backing_dev_info->ra_pages =
-               max(dc->disk.disk->queue->backing_dev_info->ra_pages,
-                   q->backing_dev_info->ra_pages);
        blk_queue_io_opt(dc->disk.disk->queue,
                max(queue_io_opt(dc->disk.disk->queue), queue_io_opt(q)));
 
 
        }
 #endif
 
-       /* Allow reads to exceed readahead limits */
-       q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
+       blk_queue_update_readahead(q);
 }
 
 unsigned int dm_table_get_num_targets(struct dm_table *t)
 
                 mdname(mddev),
                 (unsigned long long)mddev->array_sectors);
 
-       if (mddev->queue) {
-               /* calculate the max read-ahead size.
-                * For read-ahead of large files to be effective, we need to
-                * readahead at least twice a whole stripe. i.e. number of devices
-                * multiplied by chunk size times 2.
-                * If an individual device has an ra_pages greater than the
-                * chunk size, then we will not drive that device as hard as it
-                * wants.  We consider this a configuration error: a larger
-                * chunksize should be used in that case.
-                */
-               int stripe = mddev->raid_disks *
-                       (mddev->chunk_sectors << 9) / PAGE_SIZE;
-               if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
-                       mddev->queue->backing_dev_info->ra_pages = 2* stripe;
-       }
-
        dump_zones(mddev);
 
        ret = md_integrity_register(mddev);
 
        mddev->resync_max_sectors = size;
        set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
 
-       if (mddev->queue) {
-               int stripe = conf->geo.raid_disks *
-                       ((mddev->chunk_sectors << 9) / PAGE_SIZE);
-
-               /* Calculate max read-ahead size.
-                * We need to readahead at least twice a whole stripe....
-                * maybe...
-                */
-               stripe /= conf->geo.near_copies;
-               if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
-                       mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
-       }
-
        if (md_integrity_register(mddev))
                goto out_free_conf;
 
        conf->reshape_safe = MaxSector;
        spin_unlock_irq(&conf->device_lock);
 
-       /* read-ahead size must cover two whole stripes, which is
-        * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
-        */
-       if (conf->mddev->queue) {
-               int stripe = conf->geo.raid_disks *
-                       ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
-               stripe /= conf->geo.near_copies;
-               if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
-                       conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
+       if (conf->mddev->queue)
                raid10_set_io_opt(conf);
-       }
        conf->fullsync = 0;
 }
 
 
                int data_disks = conf->previous_raid_disks - conf->max_degraded;
                int stripe = data_disks *
                        ((mddev->chunk_sectors << 9) / PAGE_SIZE);
-               if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
-                       mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
 
                chunk_size = mddev->chunk_sectors << 9;
                blk_queue_io_min(mddev->queue, chunk_size);
                spin_unlock_irq(&conf->device_lock);
                wake_up(&conf->wait_for_overlap);
 
-               /* read-ahead size must cover two whole stripes, which is
-                * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
-                */
-               if (conf->mddev->queue) {
-                       int data_disks = conf->raid_disks - conf->max_degraded;
-                       int stripe = data_disks * ((conf->chunk_sectors << 9)
-                                                  / PAGE_SIZE);
-                       if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
-                               conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
+               if (conf->mddev->queue)
                        raid5_set_io_opt(conf);
-               }
        }
 }
 
 
                nvme_update_disk_info(ns->head->disk, ns, id);
                blk_stack_limits(&ns->head->disk->queue->limits,
                                 &ns->queue->limits, 0);
+               blk_queue_update_readahead(ns->head->disk->queue);
                nvme_update_bdev_size(ns->head->disk);
        }
 #endif
 
 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
 extern void blk_queue_alignment_offset(struct request_queue *q,
                                       unsigned int alignment);
+void blk_queue_update_readahead(struct request_queue *q);
 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);