* background work.
         */
        int (*get_background_work)(struct dm_cache_policy *p, bool idle,
-                                  struct policy_work **result);
+                                  struct policy_work **result);
 
        /*
         * You must pass in the same work pointer that you were given, not
 
                type = &key_type_encrypted;
                set_key = set_key_encrypted;
        } else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) &&
-                  !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
+                  !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
                type = &key_type_trusted;
                set_key = set_key_trusted;
        } else {
 
                else
 skip_check:
                        dec_in_flight(dio);
-
        } else {
                INIT_WORK(&dio->work, integrity_metadata);
                queue_work(ic->metadata_wq, &dio->work);
                } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
                        if (val < 1 << SECTOR_SHIFT ||
                            val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
-                           (val & (val -1))) {
+                           (val & (val - 1))) {
                                r = -EINVAL;
                                ti->error = "Invalid block_size argument";
                                goto bad;
        if (ic->internal_hash) {
                size_t recalc_tags_size;
                ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
-               if (!ic->recalc_wq ) {
+               if (!ic->recalc_wq) {
                        ti->error = "Cannot allocate workqueue";
                        r = -ENOMEM;
                        goto bad;
 
        log_clear_bit(lc, lc->recovering_bits, region);
        if (in_sync) {
                log_set_bit(lc, lc->sync_bits, region);
-                lc->sync_count++;
-        } else if (log_test_bit(lc->sync_bits, region)) {
+               lc->sync_count++;
+       } else if (log_test_bit(lc->sync_bits, region)) {
                lc->sync_count--;
                log_clear_bit(lc, lc->sync_bits, region);
        }
 
 static region_t core_get_sync_count(struct dm_dirty_log *log)
 {
-        struct log_c *lc = (struct log_c *) log->context;
+       struct log_c *lc = (struct log_c *) log->context;
 
-        return lc->sync_count;
+       return lc->sync_count;
 }
 
 #define        DMEMIT_SYNC \
 
        const int mode;
        const char *param;
 } _raid456_journal_mode[] = {
-       { R5C_JOURNAL_MODE_WRITE_THROUGH , "writethrough" },
-       { R5C_JOURNAL_MODE_WRITE_BACK    , "writeback" }
+       { R5C_JOURNAL_MODE_WRITE_THROUGH, "writethrough" },
+       { R5C_JOURNAL_MODE_WRITE_BACK,    "writeback" }
 };
 
 /* Return MD raid4/5/6 journal mode for dm @journal_mode one */
  *    [stripe_cache <sectors>]         Stripe cache size for higher RAIDs
  *    [region_size <sectors>]          Defines granularity of bitmap
  *    [journal_dev <dev>]              raid4/5/6 journaling deviice
- *                                     (i.e. write hole closing log)
+ *                                     (i.e. write hole closing log)
  *
  * RAID10-only options:
  *    [raid10_copies <# copies>]       Number of copies.  (Default: 2)
        }
 
        /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) or grown device size */
-        if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
+       if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
            (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) ||
             (rs->requested_bitmap_chunk_sectors &&
               mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
 
        if (IS_ERR(ms->io_client)) {
                ti->error = "Error creating dm_io client";
                kfree(ms);
-               return NULL;
+               return NULL;
        }
 
        ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
 
                n = get_child(n, CHILDREN_PER_NODE - 1);
 
        if (n >= t->counts[l])
-               return (sector_t) - 1;
+               return (sector_t) -1;
 
        return get_node(t, l, n)[KEYS_PER_NODE - 1];
 }
                if (ti->type->iterate_devices &&
                    ti->type->iterate_devices(ti, func, data))
                        return true;
-        }
+       }
 
        return false;
 }
 
        discard_parent = bio_alloc(NULL, 1, 0, GFP_NOIO);
        discard_parent->bi_end_io = passdown_endio;
        discard_parent->bi_private = m;
-       if (m->maybe_shared)
-               passdown_double_checking_shared_status(m, discard_parent);
-       else {
+       if (m->maybe_shared)
+               passdown_double_checking_shared_status(m, discard_parent);
+       else {
                struct discard_op op;
 
                begin_discard(&op, tc, discard_parent);
 
                req.notify.context = &endio;
 
                /* writing via async dm-io (implied by notify.fn above) won't return an error */
-               (void) dm_io(&req, 1, ®ion, NULL);
+               (void) dm_io(&req, 1, ®ion, NULL);
                i = j;
        }
 
 
  * nodes, so saves metadata space.
  */
 static int split_two_into_three(struct shadow_spine *s, unsigned int parent_index,
-                                struct dm_btree_value_type *vt, uint64_t key)
+                               struct dm_btree_value_type *vt, uint64_t key)
 {
        int r;
        unsigned int middle_index;
                if (shadow_current(s) != right)
                        unlock_block(s->info, right);
 
-               return r;
+               return r;
        }
 
 
 static bool need_insert(struct btree_node *node, uint64_t *keys,
                        unsigned int level, unsigned int index)
 {
-        return ((index >= le32_to_cpu(node->header.nr_entries)) ||
+       return ((index >= le32_to_cpu(node->header.nr_entries)) ||
                (le64_to_cpu(node->keys[index]) != keys[level]));
 }
 
 
 }
 
 int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
-                                dm_block_t begin, dm_block_t end, dm_block_t *b)
+                                dm_block_t begin, dm_block_t end, dm_block_t *b)
 {
        int r;
        uint32_t count;
 
 int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
                          dm_block_t end, dm_block_t *result);
 int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
-                                dm_block_t begin, dm_block_t end, dm_block_t *result);
+                                dm_block_t begin, dm_block_t end, dm_block_t *result);
 
 /*
  * The next three functions return (via nr_allocations) the net number of