* do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
  */
 static void
-do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
+do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
                     size_t len, struct async_submit_ctl *submit)
 {
        void **srcs;
                        BUG_ON(i > disks - 3); /* P or Q can't be zero */
                        srcs[i] = (void*)raid6_empty_zero_page;
                } else {
-                       srcs[i] = page_address(blocks[i]) + offset;
+                       srcs[i] = page_address(blocks[i]) + offsets[i];
+
                        if (i < disks - 2) {
                                stop = i;
                                if (start == -1)
        async_tx_sync_epilog(submit);
 }
 
+static inline bool
+is_dma_pq_aligned_offs(struct dma_device *dev, unsigned int *offs,
+                                    int src_cnt, size_t len)
+{
+       int i;
+
+       for (i = 0; i < src_cnt; i++) {
+               if (!is_dma_pq_aligned(dev, offs[i], 0, len))
+                       return false;
+       }
+       return true;
+}
+
 /**
  * async_gen_syndrome - asynchronously calculate a raid6 syndrome
  * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
- * @offset: common offset into each block (src and dest) to start transaction
+ * @offsets: offset array into each block (src and dest) to start transaction
  * @disks: number of blocks (including missing P or Q, see below)
  * @len: length of operation in bytes
  * @submit: submission/completion modifiers
  * path.
  */
 struct dma_async_tx_descriptor *
-async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
+async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks,
                   size_t len, struct async_submit_ctl *submit)
 {
        int src_cnt = disks - 2;
        if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
            (src_cnt <= dma_maxpq(device, 0) ||
             dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
-           is_dma_pq_aligned(device, offset, 0, len)) {
+           is_dma_pq_aligned_offs(device, offsets, disks, len)) {
                struct dma_async_tx_descriptor *tx;
                enum dma_ctrl_flags dma_flags = 0;
                unsigned char coefs[MAX_DISKS];
                for (i = 0, j = 0; i < src_cnt; i++) {
                        if (blocks[i] == NULL)
                                continue;
-                       unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset,
-                                                     len, DMA_TO_DEVICE);
+                       unmap->addr[j] = dma_map_page(device->dev, blocks[i],
+                                               offsets[i], len, DMA_TO_DEVICE);
                        coefs[j] = raid6_gfexp[i];
                        unmap->to_cnt++;
                        j++;
                unmap->bidi_cnt++;
                if (P(blocks, disks))
                        unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
-                                                       offset, len, DMA_BIDIRECTIONAL);
+                                                       P(offsets, disks),
+                                                       len, DMA_BIDIRECTIONAL);
                else {
                        unmap->addr[j++] = 0;
                        dma_flags |= DMA_PREP_PQ_DISABLE_P;
                unmap->bidi_cnt++;
                if (Q(blocks, disks))
                        unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
-                                                      offset, len, DMA_BIDIRECTIONAL);
+                                                       Q(offsets, disks),
+                                                       len, DMA_BIDIRECTIONAL);
                else {
                        unmap->addr[j++] = 0;
                        dma_flags |= DMA_PREP_PQ_DISABLE_Q;
 
        if (!P(blocks, disks)) {
                P(blocks, disks) = pq_scribble_page;
-               BUG_ON(len + offset > PAGE_SIZE);
+               P(offsets, disks) = 0;
        }
        if (!Q(blocks, disks)) {
                Q(blocks, disks) = pq_scribble_page;
-               BUG_ON(len + offset > PAGE_SIZE);
+               Q(offsets, disks) = 0;
        }
-       do_sync_gen_syndrome(blocks, offset, disks, len, submit);
+       do_sync_gen_syndrome(blocks, offsets, disks, len, submit);
 
        return NULL;
 }
  * @len: length of operation in bytes
  * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
  * @spare: temporary result buffer for the synchronous case
+ * @s_off: spare buffer page offset
  * @submit: submission / completion modifiers
  *
  * The same notes from async_gen_syndrome apply to the 'blocks',
  * specified.
  */
 struct dma_async_tx_descriptor *
-async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
+async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks,
                   size_t len, enum sum_check_flags *pqres, struct page *spare,
-                  struct async_submit_ctl *submit)
+                  unsigned int s_off, struct async_submit_ctl *submit)
 {
        struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
        struct dma_device *device = chan ? chan->device : NULL;
                unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
 
        if (unmap && disks <= dma_maxpq(device, 0) &&
-           is_dma_pq_aligned(device, offset, 0, len)) {
+           is_dma_pq_aligned_offs(device, offsets, disks, len)) {
                struct device *dev = device->dev;
                dma_addr_t pq[2];
                int i, j = 0, src_cnt = 0;
                for (i = 0; i < disks-2; i++)
                        if (likely(blocks[i])) {
                                unmap->addr[j] = dma_map_page(dev, blocks[i],
-                                                             offset, len,
+                                                             offsets[i], len,
                                                              DMA_TO_DEVICE);
                                coefs[j] = raid6_gfexp[i];
                                unmap->to_cnt++;
                        dma_flags |= DMA_PREP_PQ_DISABLE_P;
                } else {
                        pq[0] = dma_map_page(dev, P(blocks, disks),
-                                            offset, len,
+                                            P(offsets, disks), len,
                                             DMA_TO_DEVICE);
                        unmap->addr[j++] = pq[0];
                        unmap->to_cnt++;
                        dma_flags |= DMA_PREP_PQ_DISABLE_Q;
                } else {
                        pq[1] = dma_map_page(dev, Q(blocks, disks),
-                                            offset, len,
+                                            Q(offsets, disks), len,
                                             DMA_TO_DEVICE);
                        unmap->addr[j++] = pq[1];
                        unmap->to_cnt++;
                async_tx_submit(chan, tx, submit);
        } else {
                struct page *p_src = P(blocks, disks);
+               unsigned int p_off = P(offsets, disks);
                struct page *q_src = Q(blocks, disks);
+               unsigned int q_off = Q(offsets, disks);
                enum async_tx_flags flags_orig = submit->flags;
                dma_async_tx_callback cb_fn_orig = submit->cb_fn;
                void *scribble = submit->scribble;
                if (p_src) {
                        init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
                                          NULL, NULL, scribble);
-                       tx = async_xor(spare, blocks, offset, disks-2, len, submit);
+                       tx = async_xor_offs(spare, s_off,
+                                       blocks, offsets, disks-2, len, submit);
                        async_tx_quiesce(&tx);
-                       p = page_address(p_src) + offset;
-                       s = page_address(spare) + offset;
+                       p = page_address(p_src) + p_off;
+                       s = page_address(spare) + s_off;
                        *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
                }
 
                if (q_src) {
                        P(blocks, disks) = NULL;
                        Q(blocks, disks) = spare;
+                       Q(offsets, disks) = s_off;
                        init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
-                       tx = async_gen_syndrome(blocks, offset, disks, len, submit);
+                       tx = async_gen_syndrome(blocks, offsets, disks,
+                                       len, submit);
                        async_tx_quiesce(&tx);
-                       q = page_address(q_src) + offset;
-                       s = page_address(spare) + offset;
+                       q = page_address(q_src) + q_off;
+                       s = page_address(spare) + s_off;
                        *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
                }
 
                /* restore P, Q and submit */
                P(blocks, disks) = p_src;
+               P(offsets, disks) = p_off;
                Q(blocks, disks) = q_src;
+               Q(offsets, disks) = q_off;
 
                submit->cb_fn = cb_fn_orig;
                submit->cb_param = cb_param_orig;
 
 #define NDISKS 64 /* Including P and Q */
 
 static struct page *dataptrs[NDISKS];
+unsigned int dataoffs[NDISKS];
 static addr_conv_t addr_conv[NDISKS];
 static struct page *data[NDISKS+3];
 static struct page *spare;
        for (i = 0; i < disks; i++) {
                prandom_bytes(page_address(data[i]), PAGE_SIZE);
                dataptrs[i] = data[i];
+               dataoffs[i] = 0;
        }
 }
 
 }
 
 /* Recover two failed blocks. */
-static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, struct page **ptrs)
+static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
+               struct page **ptrs, unsigned int *offs)
 {
        struct async_submit_ctl submit;
        struct completion cmp;
                if (faila == disks-2) {
                        /* P+Q failure.  Just rebuild the syndrome. */
                        init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
-                       tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
+                       tx = async_gen_syndrome(ptrs, offs,
+                                       disks, bytes, &submit);
                } else {
                        struct page *blocks[NDISKS];
                        struct page *dest;
                        tx = async_xor(dest, blocks, 0, count, bytes, &submit);
 
                        init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv);
-                       tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
+                       tx = async_gen_syndrome(ptrs, offs,
+                                       disks, bytes, &submit);
                }
        } else {
                if (failb == disks-2) {
                        /* data+P failure. */
                        init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
-                       tx = async_raid6_datap_recov(disks, bytes, faila, ptrs, &submit);
+                       tx = async_raid6_datap_recov(disks, bytes,
+                                       faila, ptrs, &submit);
                } else {
                        /* data+data failure. */
                        init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
-                       tx = async_raid6_2data_recov(disks, bytes, faila, failb, ptrs, &submit);
+                       tx = async_raid6_2data_recov(disks, bytes,
+                                       faila, failb, ptrs, &submit);
                }
        }
        init_completion(&cmp);
        init_async_submit(&submit, ASYNC_TX_ACK, tx, callback, &cmp, addr_conv);
-       tx = async_syndrome_val(ptrs, 0, disks, bytes, &result, spare, &submit);
+       tx = async_syndrome_val(ptrs, offs,
+                       disks, bytes, &result, spare, 0, &submit);
        async_tx_issue_pending(tx);
 
        if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0)
        dataptrs[i] = recovi;
        dataptrs[j] = recovj;
 
-       raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs);
+       raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs, dataoffs);
 
        erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE);
        errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE);
        /* Generate assumed good syndrome */
        init_completion(&cmp);
        init_async_submit(&submit, ASYNC_TX_ACK, NULL, callback, &cmp, addr_conv);
-       tx = async_gen_syndrome(dataptrs, 0, disks, PAGE_SIZE, &submit);
+       tx = async_gen_syndrome(dataptrs, dataoffs, disks, PAGE_SIZE, &submit);
        async_tx_issue_pending(tx);
 
        if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) {
 
 
 /* set_syndrome_sources - populate source buffers for gen_syndrome
  * @srcs - (struct page *) array of size sh->disks
+ * @offs - (unsigned int) array of offset for each page
  * @sh - stripe_head to parse
  *
  * Populates srcs in proper layout order for the stripe and returns the
  * is recorded in srcs[count+1]].
  */
 static int set_syndrome_sources(struct page **srcs,
+                               unsigned int *offs,
                                struct stripe_head *sh,
                                int srctype)
 {
                                srcs[slot] = sh->dev[i].orig_page;
                        else
                                srcs[slot] = sh->dev[i].page;
+                       /*
+                        * For R5_InJournal, PAGE_SIZE must be 4KB and will
+                        * not shared page. In that case, dev[i].offset
+                        * is 0.
+                        */
+                       offs[slot] = sh->dev[i].offset;
                }
                i = raid6_next_disk(i, disks);
        } while (i != d0_idx);
        atomic_inc(&sh->count);
 
        if (target == qd_idx) {
-               count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
+               count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL);
                blocks[count] = NULL; /* regenerating p is not necessary */
                BUG_ON(blocks[count+1] != dest); /* q should already be set */
                init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
                                  ops_complete_compute, sh,
                                  to_addr_conv(sh, percpu, 0));
-               tx = async_gen_syndrome(blocks, 0, count+2,
+               tx = async_gen_syndrome(blocks, offs, count+2,
                                RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
        } else {
                /* Compute any data- or p-drive using XOR */
                        init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
                                          ops_complete_compute, sh,
                                          to_addr_conv(sh, percpu, 0));
-                       return async_gen_syndrome(blocks, 0, syndrome_disks+2,
+                       return async_gen_syndrome(blocks, offs, syndrome_disks+2,
                                                  RAID5_STRIPE_SIZE(sh->raid_conf),
                                                  &submit);
                } else {
                                       RAID5_STRIPE_SIZE(sh->raid_conf),
                                       &submit);
 
-                       count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
+                       count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL);
                        init_async_submit(&submit, ASYNC_TX_FENCE, tx,
                                          ops_complete_compute, sh,
                                          to_addr_conv(sh, percpu, 0));
-                       return async_gen_syndrome(blocks, 0, count+2,
+                       return async_gen_syndrome(blocks, offs, count+2,
                                                  RAID5_STRIPE_SIZE(sh->raid_conf),
                                                  &submit);
                }
                struct dma_async_tx_descriptor *tx)
 {
        struct page **blocks = to_addr_page(percpu, 0);
+       unsigned int *offs = to_addr_offs(sh, percpu);
        int count;
        struct async_submit_ctl submit;
 
        pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
 
-       count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN);
+       count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_WANT_DRAIN);
 
        init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx,
                          ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
-       tx = async_gen_syndrome(blocks, 0, count+2,
+       tx = async_gen_syndrome(blocks, offs, count+2,
                        RAID5_STRIPE_SIZE(sh->raid_conf), &submit);
 
        return tx;
 {
        struct async_submit_ctl submit;
        struct page **blocks;
+       unsigned int *offs;
        int count, i, j = 0;
        struct stripe_head *head_sh = sh;
        int last_stripe;
 
 again:
        blocks = to_addr_page(percpu, j);
+       offs = to_addr_offs(sh, percpu);
 
        if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
                synflags = SYNDROME_SRC_WRITTEN;
                txflags = ASYNC_TX_ACK;
        }
 
-       count = set_syndrome_sources(blocks, sh, synflags);
+       count = set_syndrome_sources(blocks, offs, sh, synflags);
        last_stripe = !head_sh->batch_head ||
                list_first_entry(&sh->batch_list,
                                 struct stripe_head, batch_list) == head_sh;
        } else
                init_async_submit(&submit, 0, tx, NULL, NULL,
                                  to_addr_conv(sh, percpu, j));
-       tx = async_gen_syndrome(blocks, 0, count+2,
+       tx = async_gen_syndrome(blocks, offs, count+2,
                        RAID5_STRIPE_SIZE(sh->raid_conf),  &submit);
        if (!last_stripe) {
                j++;
 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
 {
        struct page **srcs = to_addr_page(percpu, 0);
+       unsigned int *offs = to_addr_offs(sh, percpu);
        struct async_submit_ctl submit;
        int count;
 
                (unsigned long long)sh->sector, checkp);
 
        BUG_ON(sh->batch_head);
-       count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL);
+       count = set_syndrome_sources(srcs, offs, sh, SYNDROME_SRC_ALL);
        if (!checkp)
                srcs[count] = NULL;
 
        atomic_inc(&sh->count);
        init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
                          sh, to_addr_conv(sh, percpu, 0));
-       async_syndrome_val(srcs, 0, count+2,
+       async_syndrome_val(srcs, offs, count+2,
                           RAID5_STRIPE_SIZE(sh->raid_conf),
-                          &sh->ops.zero_sum_result, percpu->spare_page, &submit);
+                          &sh->ops.zero_sum_result, percpu->spare_page, 0, &submit);
 }
 
 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
 
 struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
 
 struct dma_async_tx_descriptor *
-async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt,
+async_gen_syndrome(struct page **blocks, unsigned int *offsets, int src_cnt,
                   size_t len, struct async_submit_ctl *submit);
 
 struct dma_async_tx_descriptor *
-async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt,
+async_syndrome_val(struct page **blocks, unsigned int *offsets, int src_cnt,
                   size_t len, enum sum_check_flags *pqres, struct page *spare,
-                  struct async_submit_ctl *submit);
+                  unsigned int s_off, struct async_submit_ctl *submit);
 
 struct dma_async_tx_descriptor *
 async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb,