ring->st_oo_req++;
}
spin_unlock_irqrestore(&ring->pending_free_lock, flags);
+
+ ring->st_req_direct++;
} else {
req = xen_blkbk_alloc_req(nsegs, true /* indirect req */);
+ if (!req)
+ ring->st_oo_req_indirect++;
+ ring->st_req_indirect++;
}
return req;
static void print_stats(struct xen_blkif_ring *ring)
{
- pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
- " | ds %4llu | pg: %4u/%4d\n",
- current->comm, ring->st_oo_req,
- ring->st_rd_req, ring->st_wr_req,
- ring->st_f_req, ring->st_ds_req,
- ring->persistent_gnt_c,
- xen_blkif_max_pgrants);
+ pr_info("(%s): d %8llu | i %8llu | oo %3llu |"
+ " ooi %3llu | rd %4llu | wr %4llu | f %4llu"
+ " | ds %4llu | pg: %4u/%4d\n", current->comm,
+ ring->st_req_direct, ring->st_req_indirect,
+ ring->st_oo_req, ring->st_oo_req_indirect,
+ ring->st_rd_req, ring->st_wr_req,
+ ring->st_f_req, ring->st_ds_req,
+ ring->persistent_gnt_c,
+ xen_blkif_max_pgrants);
+
ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
ring->st_rd_req = 0;
ring->st_wr_req = 0;
ring->st_oo_req = 0;
ring->st_ds_req = 0;
+ ring->st_req_direct = 0;
+ ring->st_req_indirect = 0;
+ ring->st_oo_req_indirect = 0;
}
int xen_blkif_schedule(void *arg)
unsigned long long st_ds_req;
unsigned long long st_rd_sect;
unsigned long long st_wr_sect;
+ unsigned long long st_req_direct;
+ unsigned long long st_req_indirect;
+ unsigned long long st_oo_req_indirect;
/* Used by the kworker that offload work from the persistent purge. */
struct list_head persistent_purge_list;
VBD_SHOW_ALLRING(ds_req, "%llu\n");
VBD_SHOW_ALLRING(rd_sect, "%llu\n");
VBD_SHOW_ALLRING(wr_sect, "%llu\n");
+VBD_SHOW_ALLRING(oo_req_indirect, "%llu\n");
+VBD_SHOW_ALLRING(req_direct, "%llu\n");
+VBD_SHOW_ALLRING(req_indirect, "%llu\n");
static struct attribute *xen_vbdstat_attrs[] = {
&dev_attr_oo_req.attr,
&dev_attr_ds_req.attr,
&dev_attr_rd_sect.attr,
&dev_attr_wr_sect.attr,
+ &dev_attr_oo_req_indirect.attr,
+ &dev_attr_req_direct.attr,
+ &dev_attr_req_indirect.attr,
NULL
};