]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
xen-blkback: make struct pending_req less monolithic
authorAnkur Arora <ankur.a.arora@oracle.com>
Thu, 4 Jan 2018 12:36:41 +0000 (07:36 -0500)
committerAnkur Arora <ankur.a.arora@oracle.com>
Wed, 17 Jan 2018 17:59:36 +0000 (12:59 -0500)
Changes to struct pending_req to allocate the internal arrays
separately.

Orabug: 26670475

Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkback/xenbus.c

index 1bf143ee7b52073a3020d7f90a513a0805444a10..c3aab638c1e5b8a5e2cf3ee336b13525d81918f1 100644 (file)
@@ -686,7 +686,7 @@ void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
 
 static unsigned int xen_blkbk_unmap_prepare(
        struct xen_blkif_ring *ring,
-       struct grant_page **pages,
+       struct grant_page *pages,
        unsigned int num,
        struct gnttab_unmap_grant_ref *unmap_ops,
        struct page **unmap_pages)
@@ -694,16 +694,16 @@ static unsigned int xen_blkbk_unmap_prepare(
        unsigned int i, invcount = 0;
 
        for (i = 0; i < num; i++) {
-               if (pages[i]->persistent_gnt != NULL) {
-                       put_persistent_gnt(ring, pages[i]->persistent_gnt);
+               if (pages[i].persistent_gnt != NULL) {
+                       put_persistent_gnt(ring, pages[i].persistent_gnt);
                        continue;
                }
-               if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
+               if (pages[i].handle == BLKBACK_INVALID_HANDLE)
                        continue;
-               unmap_pages[invcount] = pages[i]->page;
-               gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
-                                   GNTMAP_host_map, pages[i]->handle);
-               pages[i]->handle = BLKBACK_INVALID_HANDLE;
+               unmap_pages[invcount] = pages[i].page;
+               gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i].page),
+                                   GNTMAP_host_map, pages[i].handle);
+               pages[i].handle = BLKBACK_INVALID_HANDLE;
                invcount++;
        }
 
@@ -746,7 +746,7 @@ static void xen_blkbk_unmap_and_respond(struct pending_req *req)
 {
        struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
        struct xen_blkif_ring *ring = req->ring;
-       struct grant_page **pages = req->segments;
+       struct grant_page *pages = req->segments;
        unsigned int invcount;
 
        invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
@@ -771,7 +771,7 @@ static void xen_blkbk_unmap_and_respond(struct pending_req *req)
  * no real need.
  */
 static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
-                            struct grant_page *pages[],
+                           struct grant_page *pages,
                             int num)
 {
        struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
@@ -795,7 +795,7 @@ static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
 }
 
 static int xen_blkbk_map(struct xen_blkif_ring *ring,
-                        struct grant_page *pages[],
+                        struct grant_page *pages,
                         int num, bool ro)
 {
        struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
@@ -823,7 +823,7 @@ again:
                if (use_persistent_gnts) {
                        persistent_gnt = get_persistent_gnt(
                                ring,
-                               pages[i]->gref);
+                               pages[i].gref);
                }
 
                if (persistent_gnt) {
@@ -831,19 +831,19 @@ again:
                         * We are using persistent grants and
                         * the grant is already mapped
                         */
-                       pages[i]->page = persistent_gnt->page;
-                       pages[i]->persistent_gnt = persistent_gnt;
+                       pages[i].page = persistent_gnt->page;
+                       pages[i].persistent_gnt = persistent_gnt;
                } else {
-                       if (get_free_page(ring, &pages[i]->page))
+                       if (get_free_page(ring, &pages[i].page))
                                goto out_of_memory;
-                       addr = vaddr(pages[i]->page);
-                       pages_to_gnt[segs_to_map] = pages[i]->page;
-                       pages[i]->persistent_gnt = NULL;
+                       addr = vaddr(pages[i].page);
+                       pages_to_gnt[segs_to_map] = pages[i].page;
+                       pages[i].persistent_gnt = NULL;
                        flags = GNTMAP_host_map;
                        if (!use_persistent_gnts && ro)
                                flags |= GNTMAP_readonly;
                        gnttab_set_map_op(&map[segs_to_map++], addr,
-                                         flags, pages[i]->gref,
+                                         flags, pages[i].gref,
                                          blkif->domid);
                }
                map_until = i + 1;
@@ -862,17 +862,17 @@ again:
         * the page from the other domain.
         */
        for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
-               if (!pages[seg_idx]->persistent_gnt) {
+               if (!pages[seg_idx].persistent_gnt) {
                        /* This is a newly mapped grant */
                        BUG_ON(new_map_idx >= segs_to_map);
                        if (unlikely(map[new_map_idx].status != 0)) {
                                pr_debug("invalid buffer -- could not remap it\n");
-                               put_free_pages(ring, &pages[seg_idx]->page, 1);
-                               pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
+                               put_free_pages(ring, &pages[seg_idx].page, 1);
+                               pages[seg_idx].handle = BLKBACK_INVALID_HANDLE;
                                ret |= 1;
                                goto next;
                        }
-                       pages[seg_idx]->handle = map[new_map_idx].handle;
+                       pages[seg_idx].handle = map[new_map_idx].handle;
                } else {
                        continue;
                }
@@ -894,14 +894,14 @@ again:
                        }
                        persistent_gnt->gnt = map[new_map_idx].ref;
                        persistent_gnt->handle = map[new_map_idx].handle;
-                       persistent_gnt->page = pages[seg_idx]->page;
+                       persistent_gnt->page = pages[seg_idx].page;
                        if (add_persistent_gnt(ring,
                                               persistent_gnt)) {
                                kfree(persistent_gnt);
                                persistent_gnt = NULL;
                                goto next;
                        }
-                       pages[seg_idx]->persistent_gnt = persistent_gnt;
+                       pages[seg_idx].persistent_gnt = persistent_gnt;
                        pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
                                 persistent_gnt->gnt, ring->persistent_gnt_c,
                                 xen_blkif_max_pgrants);
@@ -945,10 +945,10 @@ static int xen_blkbk_map_seg(struct pending_req *pending_req)
 
 static int xen_blkbk_parse_indirect(struct blkif_request *req,
                                    struct pending_req *pending_req,
-                                   struct seg_buf seg[],
+                                   struct seg_buf *seg,
                                    struct phys_req *preq)
 {
-       struct grant_page **pages = pending_req->indirect_pages;
+       struct grant_page *pages = pending_req->indirect_pages;
        struct xen_blkif_ring *ring = pending_req->ring;
        int indirect_grefs, rc, n, nseg, i;
        struct blkif_request_segment *segments = NULL;
@@ -958,7 +958,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
        BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
 
        for (i = 0; i < indirect_grefs; i++)
-               pages[i]->gref = req->u.indirect.indirect_grefs[i];
+               pages[i].gref = req->u.indirect.indirect_grefs[i];
 
        rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
        if (rc)
@@ -971,11 +971,11 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
                        /* Map indirect segments */
                        if (segments)
                                kunmap_atomic(segments);
-                       segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
+                       segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME].page);
                }
                i = n % SEGS_PER_INDIRECT_FRAME;
 
-               pending_req->segments[n]->gref = segments[i].gref;
+               pending_req->segments[n].gref = segments[i].gref;
 
                first_sect = READ_ONCE(segments[i].first_sect);
                last_sect = READ_ONCE(segments[i].last_sect);
@@ -1228,7 +1228,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
        int operation;
        struct blk_plug plug;
        bool drain = false;
-       struct grant_page **pages = pending_req->segments;
+       struct grant_page *pages = pending_req->segments;
        unsigned short req_operation;
 
        req_operation = req->operation == BLKIF_OP_INDIRECT ?
@@ -1288,7 +1288,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
                preq.dev               = req->u.rw.handle;
                preq.sector_number     = req->u.rw.sector_number;
                for (i = 0; i < nseg; i++) {
-                       pages[i]->gref = req->u.rw.seg[i].gref;
+                       pages[i].gref = req->u.rw.seg[i].gref;
                        seg[i].nsec = req->u.rw.seg[i].last_sect -
                                req->u.rw.seg[i].first_sect + 1;
                        seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
@@ -1351,8 +1351,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
 
        for (i = 0; i < nseg; i++) {
                while ((bio == NULL) ||
-                      (bio_add_page(bio,
-                                    pages[i]->page,
+                      (bio_add_page(bio, pages[i].page,
                                     seg[i].nsec << 9,
                                     seg[i].offset) == 0)) {
 
index aca9397277e95c268de0ecaee4a791a6644bd5fa..7dd81544bd9a744612eff4dbd71eafa8c11b3a7d 100644 (file)
@@ -356,13 +356,13 @@ struct pending_req {
        unsigned short          operation;
        int                     status;
        struct list_head        free_list;
-       struct grant_page       *segments[MAX_INDIRECT_SEGMENTS];
+       struct grant_page       *segments;
        /* Indirect descriptors */
-       struct grant_page       *indirect_pages[MAX_INDIRECT_PAGES];
-       struct seg_buf          seg[MAX_INDIRECT_SEGMENTS];
-       struct bio              *biolist[MAX_INDIRECT_SEGMENTS];
-       struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS];
-       struct page                   *unmap_pages[MAX_INDIRECT_SEGMENTS];
+       struct grant_page       *indirect_pages;
+       struct seg_buf          *seg;
+       struct bio              **biolist;
+       struct gnttab_unmap_grant_ref *unmap;
+       struct page             **unmap_pages;
        struct gntab_unmap_queue_data gnttab_unmap_data;
 };
 
@@ -401,6 +401,9 @@ int xen_blkbk_barrier(struct xenbus_transaction xbt,
 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
 void xen_blkbk_unmap_purged_grants(struct work_struct *work);
 
+struct pending_req *xen_blkbk_alloc_req(unsigned int nseg, bool indirect);
+void xen_blkbk_free_req(struct pending_req *req);
+
 static inline void blkif_get_x86_32_req(struct blkif_request *dst,
                                        struct blkif_x86_32_request *src)
 {
index 01ad2ca61760790eaedae883e93e7e3cb983272e..dbcb5de52f86107afa80ae5363fda68dc5b1ec49 100644 (file)
@@ -242,7 +242,7 @@ static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
 static int xen_blkif_disconnect(struct xen_blkif *blkif)
 {
        struct pending_req *req, *n;
-       unsigned int j, r;
+       unsigned int r;
        bool busy = false;
 
        for (r = 0; r < blkif->nr_rings; r++) {
@@ -280,13 +280,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
                list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
                        list_del(&req->free_list);
 
-                       for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
-                               kfree(req->segments[j]);
-
-                       for (j = 0; j < MAX_INDIRECT_PAGES; j++)
-                               kfree(req->indirect_pages[j]);
-
-                       kfree(req);
+                       xen_blkbk_free_req(req);
                        i++;
                }
 
@@ -923,6 +917,53 @@ again:
        xenbus_transaction_end(xbt, 1);
 }
 
+void xen_blkbk_free_req(struct pending_req *req)
+{
+       kfree(req->indirect_pages);
+       kfree(req->unmap);
+       kfree(req->unmap_pages);
+       kfree(req->biolist);
+       kfree(req->segments);
+       kfree(req->seg);
+       kfree(req);
+
+       return;
+}
+
+struct pending_req *xen_blkbk_alloc_req(unsigned int nseg, bool indirect)
+{
+       struct pending_req *req;
+
+       BUG_ON(nseg > MAX_INDIRECT_SEGMENTS);
+
+       req = kzalloc(sizeof(*req), GFP_KERNEL);
+       if (!req)
+               return NULL;
+
+       req->seg = kzalloc(nseg * sizeof(*req->seg), GFP_KERNEL);
+       req->segments = kzalloc(nseg * sizeof(*req->segments), GFP_KERNEL);
+       req->biolist = kzalloc(nseg * sizeof(*req->biolist), GFP_KERNEL);
+       req->unmap_pages = kzalloc(nseg * sizeof(*req->unmap_pages), GFP_KERNEL);
+       req->unmap = kzalloc(nseg * sizeof(*req->unmap), GFP_KERNEL);
+
+       if (indirect) {
+               req->indirect_pages = kzalloc(INDIRECT_PAGES(nseg) *
+                                             sizeof(*req->indirect_pages),
+                                             GFP_KERNEL);
+       } else { /* Not strictly necessary because req is kzalloc'd */
+               req->indirect_pages = NULL;
+       }
+
+       if (!req->seg || !req->biolist || !req->unmap || !req->segments ||
+           !req->unmap_pages || (indirect && !req->indirect_pages)) {
+
+               xen_blkbk_free_req(req);
+               req = NULL;
+       }
+
+       return req;
+}
+
 /*
  * Each ring may have multi pages, depends on "ring-page-order".
  */
@@ -930,7 +971,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
 {
        unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
        struct pending_req *req, *n;
-       int err, i, j;
+       int err, i;
        struct xen_blkif *blkif = ring->blkif;
        struct xenbus_device *dev = blkif->be->dev;
        unsigned int ring_page_order, nr_grefs, evtchn;
@@ -982,21 +1023,12 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
        blkif->nr_ring_pages = nr_grefs;
 
        for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
-               req = kzalloc(sizeof(*req), GFP_KERNEL);
+               req = xen_blkbk_alloc_req(MAX_INDIRECT_SEGMENTS, true /* indirect */);
+
                if (!req)
                        goto fail;
+
                list_add_tail(&req->free_list, &ring->pending_free);
-               for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
-                       req->segments[j] = kzalloc(sizeof(*req->segments[0]), GFP_KERNEL);
-                       if (!req->segments[j])
-                               goto fail;
-               }
-               for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
-                       req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
-                                                        GFP_KERNEL);
-                       if (!req->indirect_pages[j])
-                               goto fail;
-               }
        }
 
        /* Map the shared frame, irq etc. */
@@ -1011,17 +1043,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
 fail:
        list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
                list_del(&req->free_list);
-               for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
-                       if (!req->segments[j])
-                               break;
-                       kfree(req->segments[j]);
-               }
-               for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
-                       if (!req->indirect_pages[j])
-                               break;
-                       kfree(req->indirect_pages[j]);
-               }
-               kfree(req);
+               xen_blkbk_free_req(req);
        }
        return -ENOMEM;