static unsigned int xen_blkbk_unmap_prepare(
struct xen_blkif_ring *ring,
- struct grant_page **pages,
+ struct grant_page *pages,
unsigned int num,
struct gnttab_unmap_grant_ref *unmap_ops,
struct page **unmap_pages)
unsigned int i, invcount = 0;
for (i = 0; i < num; i++) {
- if (pages[i]->persistent_gnt != NULL) {
- put_persistent_gnt(ring, pages[i]->persistent_gnt);
+ if (pages[i].persistent_gnt != NULL) {
+ put_persistent_gnt(ring, pages[i].persistent_gnt);
continue;
}
- if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
+ if (pages[i].handle == BLKBACK_INVALID_HANDLE)
continue;
- unmap_pages[invcount] = pages[i]->page;
- gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
- GNTMAP_host_map, pages[i]->handle);
- pages[i]->handle = BLKBACK_INVALID_HANDLE;
+ unmap_pages[invcount] = pages[i].page;
+ gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i].page),
+ GNTMAP_host_map, pages[i].handle);
+ pages[i].handle = BLKBACK_INVALID_HANDLE;
invcount++;
}
{
struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
struct xen_blkif_ring *ring = req->ring;
- struct grant_page **pages = req->segments;
+ struct grant_page *pages = req->segments;
unsigned int invcount;
invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
* no real need.
*/
static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
- struct grant_page *pages[],
+ struct grant_page *pages,
int num)
{
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
}
static int xen_blkbk_map(struct xen_blkif_ring *ring,
- struct grant_page *pages[],
+ struct grant_page *pages,
int num, bool ro)
{
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
if (use_persistent_gnts) {
persistent_gnt = get_persistent_gnt(
ring,
- pages[i]->gref);
+ pages[i].gref);
}
if (persistent_gnt) {
* We are using persistent grants and
* the grant is already mapped
*/
- pages[i]->page = persistent_gnt->page;
- pages[i]->persistent_gnt = persistent_gnt;
+ pages[i].page = persistent_gnt->page;
+ pages[i].persistent_gnt = persistent_gnt;
} else {
- if (get_free_page(ring, &pages[i]->page))
+ if (get_free_page(ring, &pages[i].page))
goto out_of_memory;
- addr = vaddr(pages[i]->page);
- pages_to_gnt[segs_to_map] = pages[i]->page;
- pages[i]->persistent_gnt = NULL;
+ addr = vaddr(pages[i].page);
+ pages_to_gnt[segs_to_map] = pages[i].page;
+ pages[i].persistent_gnt = NULL;
flags = GNTMAP_host_map;
if (!use_persistent_gnts && ro)
flags |= GNTMAP_readonly;
gnttab_set_map_op(&map[segs_to_map++], addr,
- flags, pages[i]->gref,
+ flags, pages[i].gref,
blkif->domid);
}
map_until = i + 1;
* the page from the other domain.
*/
for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
- if (!pages[seg_idx]->persistent_gnt) {
+ if (!pages[seg_idx].persistent_gnt) {
/* This is a newly mapped grant */
BUG_ON(new_map_idx >= segs_to_map);
if (unlikely(map[new_map_idx].status != 0)) {
pr_debug("invalid buffer -- could not remap it\n");
- put_free_pages(ring, &pages[seg_idx]->page, 1);
- pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
+ put_free_pages(ring, &pages[seg_idx].page, 1);
+ pages[seg_idx].handle = BLKBACK_INVALID_HANDLE;
ret |= 1;
goto next;
}
- pages[seg_idx]->handle = map[new_map_idx].handle;
+ pages[seg_idx].handle = map[new_map_idx].handle;
} else {
continue;
}
}
persistent_gnt->gnt = map[new_map_idx].ref;
persistent_gnt->handle = map[new_map_idx].handle;
- persistent_gnt->page = pages[seg_idx]->page;
+ persistent_gnt->page = pages[seg_idx].page;
if (add_persistent_gnt(ring,
persistent_gnt)) {
kfree(persistent_gnt);
persistent_gnt = NULL;
goto next;
}
- pages[seg_idx]->persistent_gnt = persistent_gnt;
+ pages[seg_idx].persistent_gnt = persistent_gnt;
pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
persistent_gnt->gnt, ring->persistent_gnt_c,
xen_blkif_max_pgrants);
static int xen_blkbk_parse_indirect(struct blkif_request *req,
struct pending_req *pending_req,
- struct seg_buf seg[],
+ struct seg_buf *seg,
struct phys_req *preq)
{
- struct grant_page **pages = pending_req->indirect_pages;
+ struct grant_page *pages = pending_req->indirect_pages;
struct xen_blkif_ring *ring = pending_req->ring;
int indirect_grefs, rc, n, nseg, i;
struct blkif_request_segment *segments = NULL;
BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
for (i = 0; i < indirect_grefs; i++)
- pages[i]->gref = req->u.indirect.indirect_grefs[i];
+ pages[i].gref = req->u.indirect.indirect_grefs[i];
rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
if (rc)
/* Map indirect segments */
if (segments)
kunmap_atomic(segments);
- segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
+ segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME].page);
}
i = n % SEGS_PER_INDIRECT_FRAME;
- pending_req->segments[n]->gref = segments[i].gref;
+ pending_req->segments[n].gref = segments[i].gref;
first_sect = READ_ONCE(segments[i].first_sect);
last_sect = READ_ONCE(segments[i].last_sect);
int operation;
struct blk_plug plug;
bool drain = false;
- struct grant_page **pages = pending_req->segments;
+ struct grant_page *pages = pending_req->segments;
unsigned short req_operation;
req_operation = req->operation == BLKIF_OP_INDIRECT ?
preq.dev = req->u.rw.handle;
preq.sector_number = req->u.rw.sector_number;
for (i = 0; i < nseg; i++) {
- pages[i]->gref = req->u.rw.seg[i].gref;
+ pages[i].gref = req->u.rw.seg[i].gref;
seg[i].nsec = req->u.rw.seg[i].last_sect -
req->u.rw.seg[i].first_sect + 1;
seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
for (i = 0; i < nseg; i++) {
while ((bio == NULL) ||
- (bio_add_page(bio,
- pages[i]->page,
+ (bio_add_page(bio, pages[i].page,
seg[i].nsec << 9,
seg[i].offset) == 0)) {
static int xen_blkif_disconnect(struct xen_blkif *blkif)
{
struct pending_req *req, *n;
- unsigned int j, r;
+ unsigned int r;
bool busy = false;
for (r = 0; r < blkif->nr_rings; r++) {
list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
list_del(&req->free_list);
- for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
- kfree(req->segments[j]);
-
- for (j = 0; j < MAX_INDIRECT_PAGES; j++)
- kfree(req->indirect_pages[j]);
-
- kfree(req);
+ xen_blkbk_free_req(req);
i++;
}
xenbus_transaction_end(xbt, 1);
}
+void xen_blkbk_free_req(struct pending_req *req)
+{
+ kfree(req->indirect_pages);
+ kfree(req->unmap);
+ kfree(req->unmap_pages);
+ kfree(req->biolist);
+ kfree(req->segments);
+ kfree(req->seg);
+ kfree(req);
+
+ return;
+}
+
+struct pending_req *xen_blkbk_alloc_req(unsigned int nseg, bool indirect)
+{
+ struct pending_req *req;
+
+ BUG_ON(nseg > MAX_INDIRECT_SEGMENTS);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ req->seg = kzalloc(nseg * sizeof(*req->seg), GFP_KERNEL);
+ req->segments = kzalloc(nseg * sizeof(*req->segments), GFP_KERNEL);
+ req->biolist = kzalloc(nseg * sizeof(*req->biolist), GFP_KERNEL);
+ req->unmap_pages = kzalloc(nseg * sizeof(*req->unmap_pages), GFP_KERNEL);
+ req->unmap = kzalloc(nseg * sizeof(*req->unmap), GFP_KERNEL);
+
+ if (indirect) {
+ req->indirect_pages = kzalloc(INDIRECT_PAGES(nseg) *
+ sizeof(*req->indirect_pages),
+ GFP_KERNEL);
+ } else { /* Not strictly necessary because req is kzalloc'd */
+ req->indirect_pages = NULL;
+ }
+
+ if (!req->seg || !req->biolist || !req->unmap || !req->segments ||
+ !req->unmap_pages || (indirect && !req->indirect_pages)) {
+
+ xen_blkbk_free_req(req);
+ req = NULL;
+ }
+
+ return req;
+}
+
/*
* Each ring may have multi pages, depends on "ring-page-order".
*/
{
unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
struct pending_req *req, *n;
- int err, i, j;
+ int err, i;
struct xen_blkif *blkif = ring->blkif;
struct xenbus_device *dev = blkif->be->dev;
unsigned int ring_page_order, nr_grefs, evtchn;
blkif->nr_ring_pages = nr_grefs;
for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
- req = kzalloc(sizeof(*req), GFP_KERNEL);
+ req = xen_blkbk_alloc_req(MAX_INDIRECT_SEGMENTS, true /* indirect */);
+
if (!req)
goto fail;
+
list_add_tail(&req->free_list, &ring->pending_free);
- for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
- req->segments[j] = kzalloc(sizeof(*req->segments[0]), GFP_KERNEL);
- if (!req->segments[j])
- goto fail;
- }
- for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
- req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
- GFP_KERNEL);
- if (!req->indirect_pages[j])
- goto fail;
- }
}
/* Map the shared frame, irq etc. */
fail:
list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
list_del(&req->free_list);
- for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
- if (!req->segments[j])
- break;
- kfree(req->segments[j]);
- }
- for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
- if (!req->indirect_pages[j])
- break;
- kfree(req->indirect_pages[j]);
- }
- kfree(req);
+ xen_blkbk_free_req(req);
}
return -ENOMEM;