parent = bfqg_parent(bfqg);
- lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
+ lockdep_assert_held(&bfqg_to_blkg(bfqg)->disk->queue->queue_lock);
if (unlikely(!parent))
return;
{
struct blkcg_gq *blkg = pd_to_blkg(pd);
struct bfq_group *bfqg = blkg_to_bfqg(blkg);
- struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
+ struct bfq_data *bfqd = blkg->disk->queue->elevator->elevator_data;
struct bfq_entity *entity = &bfqg->entity;
struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
struct cgroup_subsys_state *pos_css;
u64 sum = 0;
- lockdep_assert_held(&blkg->q->queue_lock);
+ lockdep_assert_held(&blkg->disk->queue->queue_lock);
rcu_read_lock();
blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
struct cgroup_subsys_state *pos_css;
unsigned int i;
- lockdep_assert_held(&blkg->q->queue_lock);
+ lockdep_assert_held(&blkg->disk->queue->queue_lock);
memset(sum, 0, sizeof(*sum));
rcu_read_lock();
if (blkg->pd[i])
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
- if (blkg->q)
- blk_put_queue(blkg->q);
+ if (blkg->disk->queue)
+ blk_put_queue(blkg->disk->queue);
free_percpu(blkg->iostat_cpu);
percpu_ref_exit(&blkg->refcnt);
kfree(blkg);
if (!blk_get_queue(disk->queue))
goto err_free;
- blkg->q = disk->queue;
+ blkg->disk = disk;
INIT_LIST_HEAD(&blkg->q_node);
spin_lock_init(&blkg->async_bio_lock);
bio_list_init(&blkg->async_bios);
static void blkg_update_hint(struct blkcg *blkcg, struct blkcg_gq *blkg)
{
- lockdep_assert_held(&blkg->q->queue_lock);
+ lockdep_assert_held(&blkg->disk->queue->queue_lock);
if (blkcg != &blkcg_root && blkg != rcu_dereference(blkcg->blkg_hint))
rcu_assign_pointer(blkcg->blkg_hint, blkg);
struct blkcg *blkcg = blkg->blkcg;
int i;
- lockdep_assert_held(&blkg->q->queue_lock);
+ lockdep_assert_held(&blkg->disk->queue->queue_lock);
lockdep_assert_held(&blkcg->lock);
/* Something wrong if we are trying to remove same group twice */
blkg->online = false;
- radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
+ radix_tree_delete(&blkcg->blkg_tree, blkg->disk->queue->id);
list_del_init(&blkg->q_node);
hlist_del_init_rcu(&blkg->blkcg_node);
const char *blkg_dev_name(struct blkcg_gq *blkg)
{
- if (!blkg->q->disk || !blkg->q->disk->bdi->dev)
+ if (!blkg->disk->bdi->dev)
return NULL;
- return bdi_dev_name(blkg->q->disk->bdi);
+ return bdi_dev_name(blkg->disk->bdi);
}
/**
rcu_read_lock();
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
- spin_lock_irq(&blkg->q->queue_lock);
- if (blkcg_policy_enabled(blkg->q, pol))
+ spin_lock_irq(&blkg->disk->queue->queue_lock);
+ if (blkcg_policy_enabled(blkg->disk->queue, pol))
total += prfill(sf, blkg->pd[pol->plid], data);
- spin_unlock_irq(&blkg->q->queue_lock);
+ spin_unlock_irq(&blkg->disk->queue->queue_lock);
}
rcu_read_unlock();
rcu_read_lock();
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
- spin_lock_irq(&blkg->q->queue_lock);
+ spin_lock_irq(&blkg->disk->queue->queue_lock);
blkcg_print_one_stat(blkg, sf);
- spin_unlock_irq(&blkg->q->queue_lock);
+ spin_unlock_irq(&blkg->disk->queue->queue_lock);
}
rcu_read_unlock();
return 0;
while (!hlist_empty(&blkcg->blkg_list)) {
struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
struct blkcg_gq, blkcg_node);
- struct request_queue *q = blkg->q;
+ struct request_queue *q = blkg->disk->queue;
if (need_resched() || !spin_trylock(&q->queue_lock)) {
/*
struct blkg_iostat last;
};
-/* association between a blk cgroup and a request queue */
+/* association between a blk cgroup and a gendisk */
struct blkcg_gq {
- /* Pointer to the associated request_queue */
- struct request_queue *q;
+ struct gendisk *disk;
struct list_head q_node;
struct hlist_node blkcg_node;
struct blkcg *blkcg;
/*
* A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
- * request_queue (q). This is used by blkcg policies which need to track
- * information per blkcg - q pair.
+ * gendisk. This is used by blkcg policies which need to track
+ * information per blkcg - gendisk pair.
*
* There can be multiple active blkcg policies and each blkg:policy pair is
* represented by a blkg_policy_data which is allocated and freed by each
/*
* Policies that need to keep per-blkcg data which is independent from any
- * request_queue associated to it should implement cpd_alloc/free_fn()
+ * gendisk associated to it should implement cpd_alloc/free_fn()
* methods. A policy can allocate private data area by allocating larger
* data structure which embeds blkcg_policy_data at the beginning.
* cpd_init() is invoked to let each policy handle per-blkcg data.
return q->root_blkg;
blkg = rcu_dereference(blkcg->blkg_hint);
- if (blkg && blkg->q == q)
+ if (blkg && blkg->disk->queue == q)
return blkg;
blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
- if (blkg && blkg->q != q)
+ if (blkg && blkg->disk->queue != q)
blkg = NULL;
return blkg;
}
#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \
- (p_blkg)->q)))
+ (p_blkg)->disk->queue)))
/**
* blkg_for_each_descendant_post - post-order walk of a blkg's descendants
#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \
- (p_blkg)->q)))
+ (p_blkg)->disk->queue)))
bool __blkcg_punt_bio_submit(struct bio *bio);
{
struct ioc_gq *iocg = pd_to_iocg(pd);
struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
- struct ioc *ioc = q_to_ioc(blkg->q);
+ struct ioc *ioc = q_to_ioc(blkg->disk->queue);
struct ioc_now now;
struct blkcg_gq *tblkg;
unsigned long flags;
{
struct iolatency_grp *iolat = pd_to_lat(pd);
struct blkcg_gq *blkg = lat_to_blkg(iolat);
- struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
+ struct rq_qos *rqos = blkcg_rq_qos(blkg->disk->queue);
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
u64 now = ktime_to_ns(ktime_get());
int cpu;
- if (blk_queue_nonrot(blkg->q))
+ if (blk_queue_nonrot(blkg->disk->queue))
iolat->ssd = true;
else
iolat->ssd = false;
latency_stat_init(iolat, &iolat->cur_stat);
rq_wait_init(&iolat->rq_wait);
spin_lock_init(&iolat->child_lat.lock);
- iolat->rq_depth.queue_depth = blkg->q->nr_requests;
+ iolat->rq_depth.queue_depth = blkg->disk->queue->nr_requests;
iolat->rq_depth.max_depth = UINT_MAX;
iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
iolat->blkiolat = blkiolat;
{
struct throtl_grp *tg = pd_to_tg(pd);
struct blkcg_gq *blkg = tg_to_blkg(tg);
- struct throtl_data *td = blkg->q->td;
+ struct throtl_data *td = blkg->disk->queue->td;
struct throtl_service_queue *sq = &tg->service_queue;
/*
/* throtl_data may be gone, so figure out request queue by blkg */
if (tg)
- q = tg->pd.blkg->q;
+ q = tg->pd.blkg->disk->queue;
else
q = td->queue;