Spare a double calculation on the fast write path.
Signed-off-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Matias Bjørling <matias@cnexlabs.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
                        lba_list[paddr] = cpu_to_le64(w_ctx->lba);
                        le64_add_cpu(&line->emeta->nr_valid_lbas, 1);
                } else {
-                       meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
-                       lba_list[paddr] = cpu_to_le64(ADDR_EMPTY);
+                       u64 addr_empty = cpu_to_le64(ADDR_EMPTY);
+
+                       lba_list[paddr] = meta_list[i].lba = addr_empty;
                        pblk_map_pad_invalidate(pblk, line, paddr);
                }
        }
 
 
                for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) {
                        struct ppa_addr dev_ppa;
+                       u64 addr_empty = cpu_to_le64(ADDR_EMPTY);
 
                        dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
 
                        pblk_map_invalidate(pblk, dev_ppa);
-                       meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
-                       lba_list[w_ptr] = cpu_to_le64(ADDR_EMPTY);
+                       lba_list[w_ptr] = meta_list[i].lba = addr_empty;
                        rqd->ppa_list[i] = dev_ppa;
                }
        }