]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
xen-blkback: pull nseg validation out in a function
authorAnkur Arora <ankur.a.arora@oracle.com>
Thu, 4 Jan 2018 12:39:10 +0000 (07:39 -0500)
committerAnkur Arora <ankur.a.arora@oracle.com>
Wed, 17 Jan 2018 17:59:41 +0000 (12:59 -0500)
Mechanical change in preparation for moving this validation
in __do_block_io_op().

Orabug: 26670475

Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
drivers/block/xen-blkback/blkback.c

index c3aab638c1e5b8a5e2cf3ee336b13525d81918f1..e46ae25de6f395861a8fa16f0e7bece0fa18d7b1 100644 (file)
@@ -1107,7 +1107,36 @@ static void end_block_io_op(struct bio *bio, int error)
        bio_put(bio);
 }
 
+static bool validate_rw_op(const struct blkif_request *req, unsigned int nseg)
+{
+       unsigned short req_operation = req->operation == BLKIF_OP_INDIRECT ?
+                       req->u.indirect.indirect_op : req->operation;
+
+       if (((req->operation == BLKIF_OP_INDIRECT) &&   /* valid indirect-op */
+            (req_operation != BLKIF_OP_READ) &&
+            (req_operation != BLKIF_OP_WRITE))) {
+               goto fail;
+       }
 
+       if (unlikely(nseg == 0)) { /* if nseg == 0, is op in the correct set? */
+               if (req_operation != BLKIF_OP_FLUSH_DISKCACHE &&
+                   req_operation != BLKIF_OP_WRITE_BARRIER)
+                       goto fail;
+       } else { /* if nseg > 0, check if nseg makes sense. */
+               if (((req->operation != BLKIF_OP_INDIRECT) &&
+                       (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
+                   ((req->operation == BLKIF_OP_INDIRECT) &&
+                       (nseg > MAX_INDIRECT_SEGMENTS))) {
+                       goto fail;
+               }
+       }
+
+       return true;
+fail:
+       pr_debug("Invalid indirect operation or bad number of "
+                        "segments in request (%d)\n", nseg);
+       return false;
+}
 
 /*
  * Function to copy the from the ring buffer the 'struct blkif_request'
@@ -1234,13 +1263,6 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
        req_operation = req->operation == BLKIF_OP_INDIRECT ?
                        req->u.indirect.indirect_op : req->operation;
 
-       if ((req->operation == BLKIF_OP_INDIRECT) &&
-           (req_operation != BLKIF_OP_READ) &&
-           (req_operation != BLKIF_OP_WRITE)) {
-               pr_debug("Invalid indirect operation (%u)\n", req_operation);
-               goto fail_response;
-       }
-
        switch (req_operation) {
        case BLKIF_OP_READ:
                ring->st_rd_req++;
@@ -1266,12 +1288,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
        nseg = req->operation == BLKIF_OP_INDIRECT ?
               req->u.indirect.nr_segments : req->u.rw.nr_segments;
 
-       if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
-           unlikely((req->operation != BLKIF_OP_INDIRECT) &&
-                    (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
-           unlikely((req->operation == BLKIF_OP_INDIRECT) &&
-                    (nseg > MAX_INDIRECT_SEGMENTS))) {
-               pr_debug("Bad number of segments in request (%d)\n", nseg);
+       if (validate_rw_op(req, nseg) == false) {
                /* Haven't submitted any bio's yet. */
                goto fail_response;
        }