* and is aligned to this size as defined in IO hints.
         */
        if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        io = dm_per_bio_data(bio, cc->per_bio_data_size);
        crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
 
                if (bio_data_dir(bio) == READ) {
                        if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) &&
                            !test_bit(ERROR_WRITES, &fc->flags))
-                               return -EIO;
+                               return DM_MAPIO_KILL;
                        goto map_bio;
                }
 
                /*
                 * By default, error all I/O.
                 */
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
 map_bio:
 
                DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
                      (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
                      (unsigned long long)ic->provided_data_sectors);
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
        if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
                DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
                      ic->sectors_per_block,
                      (unsigned long long)dio->range.logical_sector, bio_sectors(bio));
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
        if (ic->sectors_per_block > 1) {
                        if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
                                DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
                                        bv.bv_offset, bv.bv_len, ic->sectors_per_block);
-                               return -EIO;
+                               return DM_MAPIO_KILL;
                        }
                }
        }
                                wanted_tag_size *= ic->tag_size;
                        if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
                                DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size);
-                               return -EIO;
+                               return DM_MAPIO_KILL;
                        }
                }
        } else {
                if (unlikely(bip != NULL)) {
                        DMERR("Unexpected integrity data when using internal hash");
-                       return -EIO;
+                       return DM_MAPIO_KILL;
                }
        }
 
        if (unlikely(ic->mode == 'R') && unlikely(dio->write))
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
        dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
 
                spin_lock_irq(&lc->blocks_lock);
                lc->logging_enabled = false;
                spin_unlock_irq(&lc->blocks_lock);
-               return -ENOMEM;
+               return DM_MAPIO_KILL;
        }
        INIT_LIST_HEAD(&block->list);
        pb->block = block;
                        spin_lock_irq(&lc->blocks_lock);
                        lc->logging_enabled = false;
                        spin_unlock_irq(&lc->blocks_lock);
-                       return -ENOMEM;
+                       return DM_MAPIO_KILL;
                }
 
                src = kmap_atomic(bv.bv_page);
 
                if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
                        return DM_MAPIO_REQUEUE;
                dm_report_EIO(m);
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
        mpio->pgpath = pgpath;
        blk_start_plug(&plug);
        while ((bio = bio_list_pop(&bios))) {
                r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
-               if (r < 0 || r == DM_MAPIO_REQUEUE) {
+               switch (r) {
+               case DM_MAPIO_KILL:
+                       r = -EIO;
+                       /*FALLTHRU*/
+               case DM_MAPIO_REQUEUE:
                        bio->bi_error = r;
                        bio_endio(bio);
-               } else if (r == DM_MAPIO_REMAPPED)
+                       break;
+               case DM_MAPIO_REMAPPED:
                        generic_make_request(bio);
+                       break;
+               }
        }
        blk_finish_plug(&plug);
 }
 
 
        r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
        if (r < 0 && r != -EWOULDBLOCK)
-               return r;
+               return DM_MAPIO_KILL;
 
        /*
         * If region is not in-sync queue the bio.
         */
        if (!r || (r == -EWOULDBLOCK)) {
                if (bio->bi_opf & REQ_RAHEAD)
-                       return -EIO;
+                       return DM_MAPIO_KILL;
 
                queue_bio(ms, bio, rw);
                return DM_MAPIO_SUBMITTED;
         */
        m = choose_mirror(ms, bio->bi_iter.bi_sector);
        if (unlikely(!m))
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        dm_bio_record(&bio_record->details, bio);
        bio_record->m = m;
 
        /* Full snapshots are not usable */
        /* To get here the table must be live so s->active is always set. */
        if (!s->valid)
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        /* FIXME: should only take write lock if we need
         * to copy an exception */
 
        if (!s->valid || (unlikely(s->snapshot_overflowed) &&
            bio_data_dir(bio) == WRITE)) {
-               r = -EIO;
+               r = DM_MAPIO_KILL;
                goto out_unlock;
        }
 
 
                        if (!s->valid || s->snapshot_overflowed) {
                                free_pending_exception(pe);
-                               r = -EIO;
+                               r = DM_MAPIO_KILL;
                                goto out_unlock;
                        }
 
                                        DMERR("Snapshot overflowed: Unable to allocate exception.");
                                } else
                                        __invalidate_snapshot(s, -ENOMEM);
-                               r = -EIO;
+                               r = DM_MAPIO_KILL;
                                goto out_unlock;
                        }
                }
 
 
 static int io_err_map(struct dm_target *tt, struct bio *bio)
 {
-       return -EIO;
+       return DM_MAPIO_KILL;
 }
 
 static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
 
        if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
            ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
                DMERR_LIMIT("unaligned io");
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
        if (bio_end_sector(bio) >>
            (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
                DMERR_LIMIT("io out of range");
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
        if (bio_data_dir(bio) == WRITE)
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        io = dm_per_bio_data(bio, ti->per_io_data_size);
        io->v = v;
 
        case REQ_OP_READ:
                if (bio->bi_opf & REQ_RAHEAD) {
                        /* readahead of null bytes only wastes buffer cache */
-                       return -EIO;
+                       return DM_MAPIO_KILL;
                }
                zero_fill_bio(bio);
                break;
                /* writes get silently dropped */
                break;
        default:
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
        bio_endio(bio);
 
        r = ti->type->map(ti, clone);
        dm_offload_end(&o);
 
-       if (r == DM_MAPIO_REMAPPED) {
+       switch (r) {
+       case DM_MAPIO_SUBMITTED:
+               break;
+       case DM_MAPIO_REMAPPED:
                /* the bio has been remapped so dispatch it */
-
                trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
                                      tio->io->bio->bi_bdev->bd_dev, sector);
-
                generic_make_request(clone);
-       } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
+               break;
+       case DM_MAPIO_KILL:
+               r = -EIO;
+               /*FALLTHRU*/
+       case DM_MAPIO_REQUEUE:
                /* error the io and bail out, or requeue it if needed */
                dec_pending(tio->io, r);
                free_tio(tio);
-       } else if (r != DM_MAPIO_SUBMITTED) {
+               break;
+       default:
                DMWARN("unimplemented target map return value: %d", r);
                BUG();
        }