return 0;
 
        fbio = bio;
-       cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+       cluster = blk_queue_cluster(q);
        seg_size = 0;
        nr_phys_segs = 0;
        for_each_bio(bio) {
 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
                                   struct bio *nxt)
 {
-       if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
+       if (!blk_queue_cluster(q))
                return 0;
 
        if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
        int nsegs, cluster;
 
        nsegs = 0;
-       cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+       cluster = blk_queue_cluster(q);
 
        /*
         * for each bio in rq
 
        lim->alignment_offset = 0;
        lim->io_opt = 0;
        lim->misaligned = 0;
-       lim->no_cluster = 0;
+       lim->cluster = 1;
 }
 EXPORT_SYMBOL(blk_set_default_limits);
 
 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
 {
        blk_stack_limits(&t->limits, &b->limits, 0);
-
-       if (!t->queue_lock)
-               WARN_ON_ONCE(1);
-       else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
-               unsigned long flags;
-               spin_lock_irqsave(t->queue_lock, flags);
-               queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
-               spin_unlock_irqrestore(t->queue_lock, flags);
-       }
 }
 EXPORT_SYMBOL(blk_queue_stack_limits);
 
        t->io_min = max(t->io_min, b->io_min);
        t->io_opt = lcm(t->io_opt, b->io_opt);
 
-       t->no_cluster |= b->no_cluster;
+       t->cluster &= b->cluster;
        t->discard_zeroes_data &= b->discard_zeroes_data;
 
        /* Physical block size a multiple of the logical block size? */
                       sector_t offset)
 {
        struct request_queue *t = disk->queue;
-       struct request_queue *b = bdev_get_queue(bdev);
 
        if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
                char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
                printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
                       top, bottom);
        }
-
-       if (!t->queue_lock)
-               WARN_ON_ONCE(1);
-       else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
-               unsigned long flags;
-
-               spin_lock_irqsave(t->queue_lock, flags);
-               if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
-                       queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
-               spin_unlock_irqrestore(t->queue_lock, flags);
-       }
 }
 EXPORT_SYMBOL(disk_stack_limits);
 
 
 
 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
 {
-       if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
+       if (blk_queue_cluster(q))
                return queue_var_show(queue_max_segment_size(q), (page));
 
        return queue_var_show(PAGE_CACHE_SIZE, (page));
 
         */
        q->limits = *limits;
 
-       if (limits->no_cluster)
-               queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
-       else
-               queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
-
        if (!dm_table_supports_discards(t))
                queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
        else
 
                goto abort;
        mddev->queue->queuedata = mddev;
 
-       /* Can be unlocked because the queue is new: no concurrency */
-       queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
-
        blk_queue_make_request(mddev->queue, md_make_request);
 
        disk = alloc_disk(1 << shift);
 
 
        blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
 
-       /* New queue, no concurrency on queue_flags */
        if (!shost->use_clustering)
-               queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
+               q->limits.cluster = 0;
 
        /*
         * set a reasonable default alignment on word boundaries: the
 
 
        unsigned char           misaligned;
        unsigned char           discard_misaligned;
-       unsigned char           no_cluster;
+       unsigned char           cluster;
        signed char             discard_zeroes_data;
 };
 
 #endif
 };
 
-#define QUEUE_FLAG_CLUSTER     0       /* cluster several segments into 1 */
 #define QUEUE_FLAG_QUEUED      1       /* uses generic tag queueing */
 #define QUEUE_FLAG_STOPPED     2       /* queue is stopped */
 #define        QUEUE_FLAG_SYNCFULL     3       /* read queue has been filled */
 #define QUEUE_FLAG_SECDISCARD  19      /* supports SECDISCARD */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
-                                (1 << QUEUE_FLAG_CLUSTER) |            \
                                 (1 << QUEUE_FLAG_STACKABLE)    |       \
                                 (1 << QUEUE_FLAG_SAME_COMP)    |       \
                                 (1 << QUEUE_FLAG_ADD_RANDOM))
 
 #define rq_data_dir(rq)                ((rq)->cmd_flags & 1)
 
+static inline unsigned int blk_queue_cluster(struct request_queue *q)
+{
+       return q->limits.cluster;
+}
+
 /*
  * We regard a request as sync, if either a read or a sync write
  */