}
 
 #define MLX5_MAX_UMR_CHUNK                                                     \
-       ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - MLX5_UMR_MTT_ALIGNMENT)
+       ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - MLX5_UMR_FLEX_ALIGNMENT)
 #define MLX5_SPARE_UMR_CHUNK 0x10000
 
 /*
  */
 static void *mlx5r_umr_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
 {
-       const size_t xlt_chunk_align = MLX5_UMR_MTT_ALIGNMENT / ent_size;
+       const size_t xlt_chunk_align = MLX5_UMR_FLEX_ALIGNMENT / ent_size;
        size_t size;
        void *res = NULL;
 
-       static_assert(PAGE_SIZE % MLX5_UMR_MTT_ALIGNMENT == 0);
+       static_assert(PAGE_SIZE % MLX5_UMR_FLEX_ALIGNMENT == 0);
 
        /*
         * MLX5_IB_UPD_XLT_ATOMIC doesn't signal an atomic context just that the
        }
 
        final_size = (void *)cur_mtt - (void *)mtt;
-       sg.length = ALIGN(final_size, MLX5_UMR_MTT_ALIGNMENT);
+       sg.length = ALIGN(final_size, MLX5_UMR_FLEX_ALIGNMENT);
        memset(cur_mtt, 0, sg.length - final_size);
        mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
 
        int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
                               ? sizeof(struct mlx5_klm)
                               : sizeof(struct mlx5_mtt);
-       const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
+       const int page_align = MLX5_UMR_FLEX_ALIGNMENT / desc_size;
        struct mlx5_ib_dev *dev = mr_to_mdev(mr);
        struct device *ddev = &dev->mdev->pdev->dev;
        const int page_mask = page_align - 1;
        if (WARN_ON(!mr->umem->is_odp))
                return -EINVAL;
 
-       /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
+       /* UMR copies MTTs in units of MLX5_UMR_FLEX_ALIGNMENT bytes,
         * so we need to align the offset and length accordingly
         */
        if (idx & page_mask) {
                mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
                dma_sync_single_for_device(ddev, sg.addr, sg.length,
                                           DMA_TO_DEVICE);
-               sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
+               sg.length = ALIGN(size_to_map, MLX5_UMR_FLEX_ALIGNMENT);
 
                if (pages_mapped + pages_iter >= pages_to_map)
                        mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
 
  * size actually used at runtime, but it's not a problem when calculating static
  * array sizes.
  */
-#define MLX5_UMR_MAX_MTT_SPACE \
+#define MLX5_UMR_MAX_FLEX_SPACE \
        (ALIGN_DOWN(MLX5_SEND_WQE_MAX_SIZE - sizeof(struct mlx5e_umr_wqe), \
-                   MLX5_UMR_MTT_ALIGNMENT))
+                   MLX5_UMR_FLEX_ALIGNMENT))
 #define MLX5_MPWRQ_MAX_PAGES_PER_WQE \
-       rounddown_pow_of_two(MLX5_UMR_MAX_MTT_SPACE / sizeof(struct mlx5_mtt))
+       rounddown_pow_of_two(MLX5_UMR_MAX_FLEX_SPACE / sizeof(struct mlx5_mtt))
 
 #define MLX5E_MAX_RQ_NUM_MTTS  \
        (ALIGN_DOWN(U16_MAX, 4) * 2) /* Fits into u16 and aligned by WQEBB. */
 
        /* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
        max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
        max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
-                                      MLX5_UMR_MTT_ALIGNMENT) / umr_entry_size;
+                                      MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size;
        max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
 
        WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
        u16 umr_wqe_sz;
 
        umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) +
-               ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT);
+               ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_FLEX_ALIGNMENT);
 
        WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK);