return q->nr_congestion_off;
  }
  
- #if defined(CONFIG_BLK_DEV_INTEGRITY)
- 
- #define rq_for_each_integrity_segment(bvl, _rq, _iter)                \
-       __rq_for_each_bio(_iter.bio, _rq)                       \
-               bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i)
- 
- #endif /* BLK_DEV_INTEGRITY */
- 
  static inline int blk_cpu_to_group(int cpu)
  {
 +      int group = NR_CPUS;
  #ifdef CONFIG_SCHED_MC
        const struct cpumask *mask = cpu_coregroup_mask(cpu);
 -      return cpumask_first(mask);
 +      group = cpumask_first(mask);
  #elif defined(CONFIG_SCHED_SMT)
 -      return cpumask_first(topology_thread_cpumask(cpu));
 +      group = cpumask_first(topology_thread_cpumask(cpu));
  #else
        return cpu;
  #endif
 
  struct disk_part_tbl {
        struct rcu_head rcu_head;
        int len;
 -      struct hd_struct *last_lookup;
 +      struct hd_struct __rcu *last_lookup;
+       struct gendisk *disk;
 -      struct hd_struct *part[];
 +      struct hd_struct __rcu *part[];
  };
  
  struct gendisk {