*count = i;
 }
 
-static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
-{
-       u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
-
-       if (umem_dma & ODP_READ_ALLOWED_BIT)
-               mtt_entry |= MLX5_IB_MTT_READ;
-       if (umem_dma & ODP_WRITE_ALLOWED_BIT)
-               mtt_entry |= MLX5_IB_MTT_WRITE;
-
-       return mtt_entry;
-}
-
 /*
  * Populate the given array with bus addresses from the umem.
  *
        struct scatterlist *sg;
        int entry;
 
-       if (umem->is_odp) {
-               WARN_ON(shift != 0);
-               WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
-
-               for (i = 0; i < num_pages; ++i) {
-                       dma_addr_t pa =
-                               to_ib_umem_odp(umem)->dma_list[offset + i];
-
-                       pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
-               }
-               return;
-       }
-
        i = 0;
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
                len = sg_dma_len(sg) >> PAGE_SHIFT;
 
 int __init mlx5_ib_odp_init(void);
 void mlx5_ib_odp_cleanup(void);
 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
-void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
-                          size_t nentries, struct mlx5_ib_mr *mr, int flags);
+void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+                          struct mlx5_ib_mr *mr, int flags);
 
 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
                               enum ib_uverbs_advise_mr_advice advice,
 static inline int mlx5_ib_odp_init(void) { return 0; }
 static inline void mlx5_ib_odp_cleanup(void)                               {}
 static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
-static inline void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
-                                        size_t nentries, struct mlx5_ib_mr *mr,
-                                        int flags) {}
+static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+                                        struct mlx5_ib_mr *mr, int flags) {}
 
 static inline int
 mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
 
        return mr;
 }
 
-static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
-                              void *xlt, int page_shift, size_t size,
-                              int flags)
-{
-       struct mlx5_ib_dev *dev = mr->dev;
-       struct ib_umem *umem = mr->umem;
-
-       if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
-               if (!umr_can_use_indirect_mkey(dev))
-                       return -EPERM;
-               mlx5_odp_populate_klm(xlt, idx, npages, mr, flags);
-               return npages;
-       }
-
-       npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
-
-       if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
-               __mlx5_ib_populate_pas(dev, umem, page_shift,
-                                      idx, npages, xlt,
-                                      MLX5_IB_MTT_PRESENT);
-               /* Clear padding after the pages
-                * brought from the umem.
-                */
-               memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
-                      size - npages * sizeof(struct mlx5_mtt));
-       }
-
-       return npages;
-}
-
 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
                            MLX5_UMR_MTT_ALIGNMENT)
 #define MLX5_SPARE_UMR_CHUNK 0x10000
        size_t pages_mapped = 0;
        size_t pages_to_map = 0;
        size_t pages_iter = 0;
+       size_t size_to_map = 0;
        gfp_t gfp;
        bool use_emergency_page = false;
 
                goto free_xlt;
        }
 
+       if (mr->umem->is_odp) {
+               if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) {
+                       struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+                       size_t max_pages = ib_umem_odp_num_pages(odp) - idx;
+
+                       pages_to_map = min_t(size_t, pages_to_map, max_pages);
+               }
+       }
+
        sg.addr = dma;
        sg.lkey = dev->umrc.pd->local_dma_lkey;
 
             pages_mapped < pages_to_map && !err;
             pages_mapped += pages_iter, idx += pages_iter) {
                npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
+               size_to_map = npages * desc_size;
                dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
-               npages = populate_xlt(mr, idx, npages, xlt,
-                                     page_shift, size, flags);
-
+               if (mr->umem->is_odp) {
+                       mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
+               } else {
+                       __mlx5_ib_populate_pas(dev, mr->umem, page_shift, idx,
+                                              npages, xlt,
+                                              MLX5_IB_MTT_PRESENT);
+                       /* Clear padding after the pages
+                        * brought from the umem.
+                        */
+                       memset(xlt + size_to_map, 0, size - size_to_map);
+               }
                dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
 
-               sg.length = ALIGN(npages * desc_size,
-                                 MLX5_UMR_MTT_ALIGNMENT);
+               sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
 
                if (pages_mapped + pages_iter >= pages_to_map) {
                        if (flags & MLX5_IB_UPD_XLT_ENABLE)
 
 
 static u64 mlx5_imr_ksm_entries;
 
-void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
-                          struct mlx5_ib_mr *imr, int flags)
+static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
+                       struct mlx5_ib_mr *imr, int flags)
 {
        struct mlx5_klm *end = pklm + nentries;
 
        }
 }
 
+static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
+{
+       u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
+
+       if (umem_dma & ODP_READ_ALLOWED_BIT)
+               mtt_entry |= MLX5_IB_MTT_READ;
+       if (umem_dma & ODP_WRITE_ALLOWED_BIT)
+               mtt_entry |= MLX5_IB_MTT_WRITE;
+
+       return mtt_entry;
+}
+
+static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
+                        struct mlx5_ib_mr *mr, int flags)
+{
+       struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+       dma_addr_t pa;
+       size_t i;
+
+       if (flags & MLX5_IB_UPD_XLT_ZAP)
+               return;
+
+       for (i = 0; i < nentries; i++) {
+               pa = odp->dma_list[idx + i];
+               pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
+       }
+}
+
+void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+                          struct mlx5_ib_mr *mr, int flags)
+{
+       if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
+               populate_klm(xlt, idx, nentries, mr, flags);
+       } else {
+               populate_mtt(xlt, idx, nentries, mr, flags);
+       }
+}
+
 static void dma_fence_odp_mr(struct mlx5_ib_mr *mr)
 {
        struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);