Size of pages are held by struct ib_umem in page_size field.
It is better to store it as an exponent, because page size by nature
is always power-of-two and used as a factor, divisor or ilog2's argument.
The conversion of page_size to be page_shift allows to have portable
code and avoid following error while compiling on ARM:
  ERROR: "__aeabi_uldivmod" [drivers/infiniband/core/ib_core.ko] undefined!
CC: Selvin Xavier <selvin.xavier@broadcom.com>
CC: Steve Wise <swise@chelsio.com>
CC: Lijun Ou <oulijun@huawei.com>
CC: Shiraz Saleem <shiraz.saleem@intel.com>
CC: Adit Ranadive <aditr@vmware.com>
CC: Dennis Dalessandro <dennis.dalessandro@intel.com>
CC: Ram Amrani <Ram.Amrani@Cavium.com>
Signed-off-by: Artemy Kovalyov <artemyko@mellanox.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Acked-by: Ram Amrani <Ram.Amrani@cavium.com>
Acked-by: Shiraz Saleem <shiraz.saleem@intel.com>
Acked-by: Selvin Xavier <selvin.xavier@broadcom.com>
Acked-by: Selvin Xavier <selvin.xavier@broadcom.com>
Acked-by: Adit Ranadive <aditr@vmware.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
        if (!umem)
                return ERR_PTR(-ENOMEM);
 
-       umem->context   = context;
-       umem->length    = size;
-       umem->address   = addr;
-       umem->page_size = PAGE_SIZE;
-       umem->pid       = get_task_pid(current, PIDTYPE_PID);
+       umem->context    = context;
+       umem->length     = size;
+       umem->address    = addr;
+       umem->page_shift = PAGE_SHIFT;
+       umem->pid        = get_task_pid(current, PIDTYPE_PID);
        /*
         * We ask for writable memory if any of the following
         * access flags are set.  "Local write" and "remote write"
 
 int ib_umem_page_count(struct ib_umem *umem)
 {
-       int shift;
        int i;
        int n;
        struct scatterlist *sg;
        if (umem->odp_data)
                return ib_umem_num_pages(umem);
 
-       shift = ilog2(umem->page_size);
-
        n = 0;
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
-               n += sg_dma_len(sg) >> shift;
+               n += sg_dma_len(sg) >> umem->page_shift;
 
        return n;
 }
 
        if (!umem)
                return ERR_PTR(-ENOMEM);
 
-       umem->context   = context;
-       umem->length    = size;
-       umem->address   = addr;
-       umem->page_size = PAGE_SIZE;
-       umem->writable  = 1;
+       umem->context    = context;
+       umem->length     = size;
+       umem->address    = addr;
+       umem->page_shift = PAGE_SHIFT;
+       umem->writable   = 1;
 
        odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
        if (!odp_data) {
         * invalidations, so we must make sure we free each page only
         * once. */
        mutex_lock(&umem->odp_data->umem_mutex);
-       for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
+       for (addr = virt; addr < bound; addr += BIT(umem->page_shift)) {
                idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
                if (umem->odp_data->page_list[idx]) {
                        struct page *page = umem->odp_data->page_list[idx];
 
        struct bnxt_re_mr *mr;
        struct ib_umem *umem;
        u64 *pbl_tbl, *pbl_tbl_orig;
-       int i, umem_pgs, pages, page_shift, rc;
+       int i, umem_pgs, pages, rc;
        struct scatterlist *sg;
        int entry;
 
        }
        pbl_tbl_orig = pbl_tbl;
 
-       page_shift = ilog2(umem->page_size);
        if (umem->hugetlb) {
                dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
                rc = -EFAULT;
                goto fail;
        }
-       if (umem->page_size != PAGE_SIZE) {
-               dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
+
+       if (umem->page_shift != PAGE_SHIFT) {
+               dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
                rc = -EFAULT;
                goto fail;
        }
        /* Map umem buf ptrs to the PBL */
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
-               pages = sg_dma_len(sg) >> page_shift;
+               pages = sg_dma_len(sg) >> umem->page_shift;
                for (i = 0; i < pages; i++, pbl_tbl++)
-                       *pbl_tbl = sg_dma_address(sg) + (i << page_shift);
+                       *pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
        }
        rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
                               umem_pgs, false);
 
                return ERR_PTR(err);
        }
 
-       shift = ffs(mhp->umem->page_size) - 1;
+       shift = mhp->umem->page_shift;
 
        n = mhp->umem->nmap;
 
                        len = sg_dma_len(sg) >> shift;
                        for (k = 0; k < len; ++k) {
                                pages[i++] = cpu_to_be64(sg_dma_address(sg) +
-                                       mhp->umem->page_size * k);
+                                                        (k << shift));
                                if (i == PAGE_SIZE / sizeof *pages) {
                                        err = iwch_write_pbl(mhp, pages, i, n);
                                        if (err)
 
                return ERR_PTR(err);
        }
 
-       shift = ffs(mhp->umem->page_size) - 1;
+       shift = mhp->umem->page_shift;
 
        n = mhp->umem->nmap;
        err = alloc_pbl(mhp, n);
                len = sg_dma_len(sg) >> shift;
                for (k = 0; k < len; ++k) {
                        pages[i++] = cpu_to_be64(sg_dma_address(sg) +
-                               mhp->umem->page_size * k);
+                                                (k << shift));
                        if (i == PAGE_SIZE / sizeof *pages) {
                                err = write_pbl(&mhp->rhp->rdev,
                                      pages,
 
                return PTR_ERR(*umem);
 
        ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
-                               ilog2((unsigned int)(*umem)->page_size),
-                               &buf->hr_mtt);
+                               (*umem)->page_shift, &buf->hr_mtt);
        if (ret)
                goto err_buf;
 
 
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
                len = sg_dma_len(sg) >> mtt->page_shift;
                for (k = 0; k < len; ++k) {
-                       pages[i++] = sg_dma_address(sg) + umem->page_size * k;
+                       pages[i++] = sg_dma_address(sg) +
+                               (k << umem->page_shift);
                        if (i == PAGE_SIZE / sizeof(u64)) {
                                ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
                                                         pages);
        }
 
        n = ib_umem_page_count(mr->umem);
-       if (mr->umem->page_size != HNS_ROCE_HEM_PAGE_SIZE) {
-               dev_err(dev, "Just support 4K page size but is 0x%x now!\n",
-                       mr->umem->page_size);
+       if (mr->umem->page_shift != HNS_ROCE_HEM_PAGE_SHIFT) {
+               dev_err(dev, "Just support 4K page size but is 0x%lx now!\n",
+                       BIT(mr->umem->page_shift));
                ret = -EINVAL;
                goto err_umem;
        }
 
                }
 
                ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(hr_qp->umem),
-                                   ilog2((unsigned int)hr_qp->umem->page_size),
-                                   &hr_qp->mtt);
+                                       hr_qp->umem->page_shift, &hr_qp->mtt);
                if (ret) {
                        dev_err(dev, "hns_roce_mtt_init error for create qp\n");
                        goto err_buf;
 
 {
        struct ib_umem *region = iwmr->region;
        struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
-       int chunk_pages, entry, pg_shift, i;
+       int chunk_pages, entry, i;
        struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
        struct i40iw_pble_info *pinfo;
        struct scatterlist *sg;
 
        pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
 
-       pg_shift = ffs(region->page_size) - 1;
        for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
-               chunk_pages = sg_dma_len(sg) >> pg_shift;
+               chunk_pages = sg_dma_len(sg) >> region->page_shift;
                if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
                    !iwpbl->qp_mr.sq_page)
                        iwpbl->qp_mr.sq_page = sg_page(sg);
                for (i = 0; i < chunk_pages; i++) {
-                       pg_addr = sg_dma_address(sg) + region->page_size * i;
+                       pg_addr = sg_dma_address(sg) +
+                               (i << region->page_shift);
 
                        if ((entry + i) == 0)
                                *pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
        iwmr->ibmr.device = pd->device;
        ucontext = to_ucontext(pd->uobject->context);
 
-       iwmr->page_size = region->page_size;
+       iwmr->page_size = PAGE_SIZE;
        iwmr->page_msk = PAGE_MASK;
 
        if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
 
                return PTR_ERR(*umem);
 
        err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
-                           ilog2((*umem)->page_size), &buf->mtt);
+                           (*umem)->page_shift, &buf->mtt);
        if (err)
                goto err_buf;
 
 
                len = sg_dma_len(sg) >> mtt->page_shift;
                for (k = 0; k < len; ++k) {
                        pages[i++] = sg_dma_address(sg) +
-                               umem->page_size * k;
+                               (k << umem->page_shift);
                        /*
                         * Be friendly to mlx4_write_mtt() and
                         * pass it chunks of appropriate size.
        }
 
        n = ib_umem_page_count(mr->umem);
-       shift = ilog2(mr->umem->page_size);
+       shift = mr->umem->page_shift;
 
        err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
                            convert_access(access_flags), n, shift, &mr->mmr);
                        goto release_mpt_entry;
                }
                n = ib_umem_page_count(mmr->umem);
-               shift = ilog2(mmr->umem->page_size);
+               shift = mmr->umem->page_shift;
 
                err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
                                              virt_addr, length, n, shift,
 
                }
 
                err = mlx4_mtt_init(dev->dev, ib_umem_page_count(qp->umem),
-                                   ilog2(qp->umem->page_size), &qp->mtt);
+                                   qp->umem->page_shift, &qp->mtt);
                if (err)
                        goto err_buf;
 
 
                }
 
                err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
-                                   ilog2(srq->umem->page_size), &srq->mtt);
+                                   srq->umem->page_shift, &srq->mtt);
                if (err)
                        goto err_buf;
 
 
        u64 pfn;
        struct scatterlist *sg;
        int entry;
-       unsigned long page_shift = ilog2(umem->page_size);
+       unsigned long page_shift = umem->page_shift;
 
        /* With ODP we must always match OS page size. */
        if (umem->odp_data) {
                            int page_shift, size_t offset, size_t num_pages,
                            __be64 *pas, int access_flags)
 {
-       unsigned long umem_page_shift = ilog2(umem->page_size);
+       unsigned long umem_page_shift = umem->page_shift;
        int shift = page_shift - umem_page_shift;
        int mask = (1 << shift) - 1;
        int i, k, idx;
 
         * but they will write 0s as well, so no difference in the end result.
         */
 
-       for (addr = start; addr < end; addr += (u64)umem->page_size) {
+       for (addr = start; addr < end; addr += BIT(umem->page_shift)) {
                idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
                /*
                 * Strive to write the MTTs in chunks, but avoid overwriting
 
                goto err;
        }
 
-       shift = ffs(mr->umem->page_size) - 1;
+       shift = mr->umem->page_shift;
        n = mr->umem->nmap;
 
        mr->mtt = mthca_alloc_mtt(dev, n);
        for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
                len = sg_dma_len(sg) >> shift;
                for (k = 0; k < len; ++k) {
-                       pages[i++] = sg_dma_address(sg) +
-                               mr->umem->page_size * k;
+                       pages[i++] = sg_dma_address(sg) + (k << shift);
                        /*
                         * Be friendly to write_mtt and pass it chunks
                         * of appropriate size.
 
        }
 
        nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u,"
-                       " offset = %u, page size = %u.\n",
+                       " offset = %u, page size = %lu.\n",
                        (unsigned long int)start, (unsigned long int)virt, (u32)length,
-                       ib_umem_offset(region), region->page_size);
+                       ib_umem_offset(region), BIT(region->page_shift));
 
        skip_pages = ((u32)ib_umem_offset(region)) >> 12;
 
 
        pbe = (struct ocrdma_pbe *)pbl_tbl->va;
        pbe_cnt = 0;
 
-       shift = ilog2(umem->page_size);
+       shift = umem->page_shift;
 
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
                pages = sg_dma_len(sg) >> shift;
                for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
                        /* store the page address in pbe */
                        pbe->pa_lo =
-                           cpu_to_le32(sg_dma_address
-                                       (sg) +
-                                       (umem->page_size * pg_cnt));
+                           cpu_to_le32(sg_dma_address(sg) +
+                                       (pg_cnt << shift));
                        pbe->pa_hi =
-                           cpu_to_le32(upper_32_bits
-                                       ((sg_dma_address
-                                         (sg) +
-                                         umem->page_size * pg_cnt)));
+                           cpu_to_le32(upper_32_bits(sg_dma_address(sg) +
+                                        (pg_cnt << shift)));
                        pbe_cnt += 1;
                        total_num_pbes += 1;
                        pbe++;
        if (status)
                goto umem_err;
 
-       mr->hwmr.pbe_size = mr->umem->page_size;
+       mr->hwmr.pbe_size = BIT(mr->umem->page_shift);
        mr->hwmr.fbo = ib_umem_offset(mr->umem);
        mr->hwmr.va = usr_addr;
        mr->hwmr.len = len;
 
 
        pbe_cnt = 0;
 
-       shift = ilog2(umem->page_size);
+       shift = umem->page_shift;
 
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
                pages = sg_dma_len(sg) >> shift;
                for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
                        /* store the page address in pbe */
                        pbe->lo = cpu_to_le32(sg_dma_address(sg) +
-                                             umem->page_size * pg_cnt);
+                                             (pg_cnt << shift));
                        addr = upper_32_bits(sg_dma_address(sg) +
-                                            umem->page_size * pg_cnt);
+                                            (pg_cnt << shift));
                        pbe->hi = cpu_to_le32(addr);
                        pbe_cnt++;
                        total_num_pbes++;
        mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
        mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
        mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
-       mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
+       mr->hw_mr.page_size_log = mr->umem->page_shift;
        mr->hw_mr.fbo = ib_umem_offset(mr->umem);
        mr->hw_mr.length = len;
        mr->hw_mr.vaddr = usr_addr;
 
                len = sg_dma_len(sg) >> PAGE_SHIFT;
                for (j = 0; j < len; j++) {
                        dma_addr_t addr = sg_dma_address(sg) +
-                                         umem->page_size * j;
+                                         (j << umem->page_shift);
 
                        ret = pvrdma_page_dir_insert_dma(pdir, i, addr);
                        if (ret)
 
        mr->mr.access_flags = mr_access_flags;
        mr->umem = umem;
 
-       if (is_power_of_2(umem->page_size))
-               mr->mr.page_shift = ilog2(umem->page_size);
+       mr->mr.page_shift = umem->page_shift;
        m = 0;
        n = 0;
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
                        goto bail_inval;
                }
                mr->mr.map[m]->segs[n].vaddr = vaddr;
-               mr->mr.map[m]->segs[n].length = umem->page_size;
-               trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, umem->page_size);
+               mr->mr.map[m]->segs[n].length = BIT(umem->page_shift);
+               trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr,
+                                     BIT(umem->page_shift));
                n++;
                if (n == RVT_SEGSZ) {
                        m++;
 
                goto err1;
        }
 
-       WARN_ON_ONCE(!is_power_of_2(umem->page_size));
-
-       mem->page_shift         = ilog2(umem->page_size);
-       mem->page_mask          = umem->page_size - 1;
+       mem->page_shift         = umem->page_shift;
+       mem->page_mask          = BIT(umem->page_shift) - 1;
 
        num_buf                 = 0;
        map                     = mem->map;
                        }
 
                        buf->addr = (uintptr_t)vaddr;
-                       buf->size = umem->page_size;
+                       buf->size = BIT(umem->page_shift);
                        num_buf++;
                        buf++;
 
 
        struct ib_ucontext     *context;
        size_t                  length;
        unsigned long           address;
-       int                     page_size;
+       int                     page_shift;
        int                     writable;
        int                     hugetlb;
        struct work_struct      work;
 /* Returns the offset of the umem start relative to the first page. */
 static inline int ib_umem_offset(struct ib_umem *umem)
 {
-       return umem->address & ((unsigned long)umem->page_size - 1);
+       return umem->address & (BIT(umem->page_shift) - 1);
 }
 
 /* Returns the first page of an ODP umem. */