slots = ((last - first) >> PAGE_SHIFT) + 1;
 
-       offset = prandom_u32_max(slots);
+       offset = get_random_u32_below(slots);
 
        addr = first + (offset << PAGE_SHIFT);
 
 
 unsigned long arch_align_stack(unsigned long sp)
 {
        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-               sp -= prandom_u32_max(PAGE_SIZE);
+               sp -= get_random_u32_below(PAGE_SIZE);
        return sp & ~0xf;
 }
 
 
 unsigned long arch_align_stack(unsigned long sp)
 {
        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-               sp -= prandom_u32_max(PAGE_SIZE);
+               sp -= get_random_u32_below(PAGE_SIZE);
 
        return sp & STACK_ALIGN;
 }
 
        unsigned long base = STACK_TOP;
 
        if (current->flags & PF_RANDOMIZE) {
-               base += prandom_u32_max(VDSO_RANDOMIZE_SIZE);
+               base += get_random_u32_below(VDSO_RANDOMIZE_SIZE);
                base = PAGE_ALIGN(base);
        }
 
 
 unsigned long arch_align_stack(unsigned long sp)
 {
        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-               sp -= prandom_u32_max(PAGE_SIZE);
+               sp -= get_random_u32_below(PAGE_SIZE);
 
        return sp & ALMASK;
 }
 
        }
 
        if (current->flags & PF_RANDOMIZE) {
-               base += prandom_u32_max(VDSO_RANDOMIZE_SIZE);
+               base += get_random_u32_below(VDSO_RANDOMIZE_SIZE);
                base = PAGE_ALIGN(base);
        }
 
 
 
        map_base = mm->mmap_base;
        if (current->flags & PF_RANDOMIZE)
-               map_base -= prandom_u32_max(0x20) * PAGE_SIZE;
+               map_base -= get_random_u32_below(0x20) * PAGE_SIZE;
 
        vdso_text_start = get_unmapped_area(NULL, map_base, vdso_text_len, 0, 0);
 
 
 
                pr_info("crc-vpmsum_test begins, %lu iterations\n", iterations);
                for (i=0; i<iterations; i++) {
-                       size_t offset = prandom_u32_max(16);
-                       size_t len = prandom_u32_max(MAX_CRC_LENGTH);
+                       size_t offset = get_random_u32_below(16);
+                       size_t len = get_random_u32_below(MAX_CRC_LENGTH);
 
                        if (len <= offset)
                                continue;
 
 unsigned long arch_align_stack(unsigned long sp)
 {
        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-               sp -= prandom_u32_max(PAGE_SIZE);
+               sp -= get_random_u32_below(PAGE_SIZE);
        return sp & ~0xf;
 }
 
 unsigned long arch_align_stack(unsigned long sp)
 {
        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-               sp -= prandom_u32_max(PAGE_SIZE);
+               sp -= get_random_u32_below(PAGE_SIZE);
        return sp & ~0xf;
 }
 
 
        end -= len;
 
        if (end > start) {
-               offset = prandom_u32_max(((end - start) >> PAGE_SHIFT) + 1);
+               offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
                addr = start + (offset << PAGE_SHIFT);
        } else {
                addr = start;
 
        unsigned int offset;
 
        /* This loses some more bits than a modulo, but is cheaper */
-       offset = prandom_u32_max(PTRS_PER_PTE);
+       offset = get_random_u32_below(PTRS_PER_PTE);
        return start + (offset << PAGE_SHIFT);
 }
 
 
 unsigned long arch_align_stack(unsigned long sp)
 {
        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-               sp -= prandom_u32_max(8192);
+               sp -= get_random_u32_below(8192);
        return sp & ~0xf;
 }
 #endif
 
        end -= len;
 
        if (end > start) {
-               offset = prandom_u32_max(((end - start) >> PAGE_SHIFT) + 1);
+               offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1);
                addr = start + (offset << PAGE_SHIFT);
        } else {
                addr = start;
 
                 */
                if (module_load_offset == 0)
                        module_load_offset =
-                               (prandom_u32_max(1024) + 1) * PAGE_SIZE;
+                               (get_random_u32_below(1024) + 1) * PAGE_SIZE;
                mutex_unlock(&module_kaslr_mutex);
        }
        return module_load_offset;
 
 unsigned long arch_align_stack(unsigned long sp)
 {
        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
-               sp -= prandom_u32_max(8192);
+               sp -= get_random_u32_below(8192);
        return sp & ~0xf;
 }
 
 
        failed += print_split(&sa);
 
        for (i = 0; i < NTEST; i++) {
-               unsigned long pfn = prandom_u32_max(max_pfn_mapped);
+               unsigned long pfn = get_random_u32_below(max_pfn_mapped);
 
                addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
-               len[i] = prandom_u32_max(NPAGES);
+               len[i] = get_random_u32_below(NPAGES);
                len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1);
 
                if (len[i] == 0)
 
        ps_end = ctx->key_size - req->src_len - 2;
        req_ctx->in_buf[0] = 0x02;
        for (i = 1; i < ps_end; i++)
-               req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
+               req_ctx->in_buf[i] = 1 + get_random_u32_below(255);
        req_ctx->in_buf[ps_end] = 0x00;
 
        pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
 
 /* Generate a random length in range [0, max_len], but prefer smaller values */
 static unsigned int generate_random_length(unsigned int max_len)
 {
-       unsigned int len = prandom_u32_max(max_len + 1);
+       unsigned int len = get_random_u32_below(max_len + 1);
 
-       switch (prandom_u32_max(4)) {
+       switch (get_random_u32_below(4)) {
        case 0:
                return len % 64;
        case 1:
 {
        size_t bitpos;
 
-       bitpos = prandom_u32_max(size * 8);
+       bitpos = get_random_u32_below(size * 8);
        buf[bitpos / 8] ^= 1 << (bitpos % 8);
 }
 
 /* Flip a random byte in the given nonempty data buffer */
 static void flip_random_byte(u8 *buf, size_t size)
 {
-       buf[prandom_u32_max(size)] ^= 0xff;
+       buf[get_random_u32_below(size)] ^= 0xff;
 }
 
 /* Sometimes make some random changes to the given nonempty data buffer */
        size_t i;
 
        /* Sometimes flip some bits */
-       if (prandom_u32_max(4) == 0) {
-               num_flips = min_t(size_t, 1 << prandom_u32_max(8), size * 8);
+       if (get_random_u32_below(4) == 0) {
+               num_flips = min_t(size_t, 1 << get_random_u32_below(8), size * 8);
                for (i = 0; i < num_flips; i++)
                        flip_random_bit(buf, size);
        }
 
        /* Sometimes flip some bytes */
-       if (prandom_u32_max(4) == 0) {
-               num_flips = min_t(size_t, 1 << prandom_u32_max(8), size);
+       if (get_random_u32_below(4) == 0) {
+               num_flips = min_t(size_t, 1 << get_random_u32_below(8), size);
                for (i = 0; i < num_flips; i++)
                        flip_random_byte(buf, size);
        }
        if (count == 0)
                return;
 
-       switch (prandom_u32_max(8)) { /* Choose a generation strategy */
+       switch (get_random_u32_below(8)) { /* Choose a generation strategy */
        case 0:
        case 1:
                /* All the same byte, plus optional mutations */
-               switch (prandom_u32_max(4)) {
+               switch (get_random_u32_below(4)) {
                case 0:
                        b = 0x00;
                        break;
                unsigned int this_len;
                const char *flushtype_str;
 
-               if (div == &divs[max_divs - 1] || prandom_u32_max(2) == 0)
+               if (div == &divs[max_divs - 1] || get_random_u32_below(2) == 0)
                        this_len = remaining;
                else
-                       this_len = 1 + prandom_u32_max(remaining);
+                       this_len = 1 + get_random_u32_below(remaining);
                div->proportion_of_total = this_len;
 
-               if (prandom_u32_max(4) == 0)
-                       div->offset = (PAGE_SIZE - 128) + prandom_u32_max(128);
-               else if (prandom_u32_max(2) == 0)
-                       div->offset = prandom_u32_max(32);
+               if (get_random_u32_below(4) == 0)
+                       div->offset = (PAGE_SIZE - 128) + get_random_u32_below(128);
+               else if (get_random_u32_below(2) == 0)
+                       div->offset = get_random_u32_below(32);
                else
-                       div->offset = prandom_u32_max(PAGE_SIZE);
-               if (prandom_u32_max(8) == 0)
+                       div->offset = get_random_u32_below(PAGE_SIZE);
+               if (get_random_u32_below(8) == 0)
                        div->offset_relative_to_alignmask = true;
 
                div->flush_type = FLUSH_TYPE_NONE;
                if (gen_flushes) {
-                       switch (prandom_u32_max(4)) {
+                       switch (get_random_u32_below(4)) {
                        case 0:
                                div->flush_type = FLUSH_TYPE_REIMPORT;
                                break;
 
                if (div->flush_type != FLUSH_TYPE_NONE &&
                    !(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
-                   prandom_u32_max(2) == 0)
+                   get_random_u32_below(2) == 0)
                        div->nosimd = true;
 
                switch (div->flush_type) {
 
        p += scnprintf(p, end - p, "random:");
 
-       switch (prandom_u32_max(4)) {
+       switch (get_random_u32_below(4)) {
        case 0:
        case 1:
                cfg->inplace_mode = OUT_OF_PLACE;
                break;
        }
 
-       if (prandom_u32_max(2) == 0) {
+       if (get_random_u32_below(2) == 0) {
                cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
                p += scnprintf(p, end - p, " may_sleep");
        }
 
-       switch (prandom_u32_max(4)) {
+       switch (get_random_u32_below(4)) {
        case 0:
                cfg->finalization_type = FINALIZATION_TYPE_FINAL;
                p += scnprintf(p, end - p, " use_final");
        }
 
        if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
-           prandom_u32_max(2) == 0) {
+           get_random_u32_below(2) == 0) {
                cfg->nosimd = true;
                p += scnprintf(p, end - p, " nosimd");
        }
                                          cfg->req_flags);
        p += scnprintf(p, end - p, "]");
 
-       if (cfg->inplace_mode == OUT_OF_PLACE && prandom_u32_max(2) == 0) {
+       if (cfg->inplace_mode == OUT_OF_PLACE && get_random_u32_below(2) == 0) {
                p += scnprintf(p, end - p, " dst_divs=[");
                p = generate_random_sgl_divisions(cfg->dst_divs,
                                                  ARRAY_SIZE(cfg->dst_divs),
                p += scnprintf(p, end - p, "]");
        }
 
-       if (prandom_u32_max(2) == 0) {
-               cfg->iv_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK);
+       if (get_random_u32_below(2) == 0) {
+               cfg->iv_offset = 1 + get_random_u32_below(MAX_ALGAPI_ALIGNMASK);
                p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
        }
 
-       if (prandom_u32_max(2) == 0) {
-               cfg->key_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK);
+       if (get_random_u32_below(2) == 0) {
+               cfg->key_offset = 1 + get_random_u32_below(MAX_ALGAPI_ALIGNMASK);
                p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset);
        }
 
        vec->ksize = 0;
        if (maxkeysize) {
                vec->ksize = maxkeysize;
-               if (prandom_u32_max(4) == 0)
-                       vec->ksize = 1 + prandom_u32_max(maxkeysize);
+               if (get_random_u32_below(4) == 0)
+                       vec->ksize = 1 + get_random_u32_below(maxkeysize);
                generate_random_bytes((u8 *)vec->key, vec->ksize);
 
                vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
        const unsigned int aad_tail_size = aad_iv ? ivsize : 0;
        const unsigned int authsize = vec->clen - vec->plen;
 
-       if (prandom_u32_max(2) == 0 && vec->alen > aad_tail_size) {
+       if (get_random_u32_below(2) == 0 && vec->alen > aad_tail_size) {
                 /* Mutate the AAD */
                flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size);
-               if (prandom_u32_max(2) == 0)
+               if (get_random_u32_below(2) == 0)
                        return;
        }
-       if (prandom_u32_max(2) == 0) {
+       if (get_random_u32_below(2) == 0) {
                /* Mutate auth tag (assuming it's at the end of ciphertext) */
                flip_random_bit((u8 *)vec->ctext + vec->plen, authsize);
        } else {
        const unsigned int ivsize = crypto_aead_ivsize(tfm);
        const unsigned int authsize = vec->clen - vec->plen;
        const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) &&
-                                (prefer_inauthentic || prandom_u32_max(4) == 0);
+                                (prefer_inauthentic || get_random_u32_below(4) == 0);
 
        /* Generate the AAD. */
        generate_random_bytes((u8 *)vec->assoc, vec->alen);
                /* Avoid implementation-defined behavior. */
                memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize);
 
-       if (inauthentic && prandom_u32_max(2) == 0) {
+       if (inauthentic && get_random_u32_below(2) == 0) {
                /* Generate a random ciphertext. */
                generate_random_bytes((u8 *)vec->ctext, vec->clen);
        } else {
 
        /* Key: length in [0, maxkeysize], but usually choose maxkeysize */
        vec->klen = maxkeysize;
-       if (prandom_u32_max(4) == 0)
-               vec->klen = prandom_u32_max(maxkeysize + 1);
+       if (get_random_u32_below(4) == 0)
+               vec->klen = get_random_u32_below(maxkeysize + 1);
        generate_random_bytes((u8 *)vec->key, vec->klen);
        vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
 
 
        /* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
        authsize = maxauthsize;
-       if (prandom_u32_max(4) == 0)
-               authsize = prandom_u32_max(maxauthsize + 1);
+       if (get_random_u32_below(4) == 0)
+               authsize = get_random_u32_below(maxauthsize + 1);
        if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE)
                authsize = MIN_COLLISION_FREE_AUTHSIZE;
        if (WARN_ON(authsize > maxdatasize))
 
        /* AAD, plaintext, and ciphertext lengths */
        total_len = generate_random_length(maxdatasize);
-       if (prandom_u32_max(4) == 0)
+       if (get_random_u32_below(4) == 0)
                vec->alen = 0;
        else
                vec->alen = generate_random_length(total_len);
 
        /* Key: length in [0, maxkeysize], but usually choose maxkeysize */
        vec->klen = maxkeysize;
-       if (prandom_u32_max(4) == 0)
-               vec->klen = prandom_u32_max(maxkeysize + 1);
+       if (get_random_u32_below(4) == 0)
+               vec->klen = get_random_u32_below(maxkeysize + 1);
        generate_random_bytes((u8 *)vec->key, vec->klen);
        vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
 
 
 
        timeo = connect_int * HZ;
        /* 28.5% random jitter */
-       timeo += prandom_u32_max(2) ? timeo / 7 : -timeo / 7;
+       timeo += get_random_u32_below(2) ? timeo / 7 : -timeo / 7;
 
        err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
        if (err <= 0)
                                drbd_warn(connection, "Error receiving initial packet\n");
                                sock_release(s);
 randomize:
-                               if (prandom_u32_max(2))
+                               if (get_random_u32_below(2))
                                        goto retry;
                        }
                }
 
 #define PRIMARY_CMD_RING                               0
 #define MHI_DEV_WAKE_DB                                        127
 #define MHI_MAX_MTU                                    0xffff
-#define MHI_RANDOM_U32_NONZERO(bmsk)                   (prandom_u32_max(bmsk) + 1)
+#define MHI_RANDOM_U32_NONZERO(bmsk)                   (get_random_u32_below(bmsk) + 1)
 
 enum mhi_er_type {
        MHI_ER_TYPE_INVALID = 0x0,
 
                struct dma_fence *fence = dma_fence_get(data->fc.tail);
                int seqno;
 
-               seqno = prandom_u32_max(data->fc.chain_length) + 1;
+               seqno = get_random_u32_below(data->fc.chain_length) + 1;
 
                err = dma_fence_chain_find_seqno(&fence, seqno);
                if (err) {
                dma_fence_put(fence);
 
 signal:
-               seqno = prandom_u32_max(data->fc.chain_length - 1);
+               seqno = get_random_u32_below(data->fc.chain_length - 1);
                dma_fence_signal(data->fc.fences[seqno]);
                cond_resched();
        }
        while (--count) {
                unsigned int swp;
 
-               swp = prandom_u32_max(count + 1);
+               swp = get_random_u32_below(count + 1);
                if (swp == count)
                        continue;
 
 
        /* Check whether the file_priv has already selected one ring. */
        if ((int)file_priv->bsd_engine < 0)
                file_priv->bsd_engine =
-                       prandom_u32_max(num_vcs_engines(dev_priv));
+                       get_random_u32_below(num_vcs_engines(dev_priv));
 
        return file_priv->bsd_engine;
 }
 
         * NB This does not force us to execute on this engine, it will just
         * typically be the first we inspect for submission.
         */
-       swp = prandom_u32_max(ve->num_siblings);
+       swp = get_random_u32_below(ve->num_siblings);
        if (swp)
                swap(ve->siblings[swp], ve->siblings[0]);
 }
 
                        u8 value, resource_size_t offset,
                        const void *caller)
 {
-       int byte = prandom_u32_max(pagesize);
+       int byte = get_random_u32_below(pagesize);
        u8 result[3];
 
        memset_io(va, value, pagesize); /* or GPF! */
 static resource_size_t random_page(resource_size_t last)
 {
        /* Limited to low 44b (16TiB), but should suffice for a spot check */
-       return prandom_u32_max(last >> PAGE_SHIFT) << PAGE_SHIFT;
+       return get_random_u32_below(last >> PAGE_SHIFT) << PAGE_SHIFT;
 }
 
 static int iomemtest(struct intel_memory_region *mem,
 
 
        inet_get_local_port_range(net, &low, &high);
        remaining = (high - low) + 1;
-       rover = prandom_u32_max(remaining) + low;
+       rover = get_random_u32_below(remaining) + low;
 retry:
        if (last_used_port != rover) {
                struct rdma_bind_list *bind_list;
 
 
        if (obj < alloc->max) {
                if (alloc->flags & C4IW_ID_TABLE_F_RANDOM)
-                       alloc->last += prandom_u32_max(RANDOM_SKIP);
+                       alloc->last += get_random_u32_below(RANDOM_SKIP);
                else
                        alloc->last = obj + 1;
                if (alloc->last >= alloc->max)
        alloc->start = start;
        alloc->flags = flags;
        if (flags & C4IW_ID_TABLE_F_RANDOM)
-               alloc->last = prandom_u32_max(RANDOM_SKIP);
+               alloc->last = get_random_u32_below(RANDOM_SKIP);
        else
                alloc->last = 0;
        alloc->max = num;
 
        u16 sport;
 
        if (!fl)
-               sport = prandom_u32_max(IB_ROCE_UDP_ENCAP_VALID_PORT_MAX + 1 -
-                                       IB_ROCE_UDP_ENCAP_VALID_PORT_MIN) +
+               sport = get_random_u32_below(IB_ROCE_UDP_ENCAP_VALID_PORT_MAX +
+                                            1 - IB_ROCE_UDP_ENCAP_VALID_PORT_MIN) +
                        IB_ROCE_UDP_ENCAP_VALID_PORT_MIN;
        else
                sport = rdma_flow_label_to_udp_sport(fl);
 
        rtrs_clt_stop_and_destroy_conns(clt_path);
        queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
                           msecs_to_jiffies(delay_ms +
-                                           prandom_u32_max(RTRS_RECONNECT_SEED)));
+                                           get_random_u32_below(RTRS_RECONNECT_SEED)));
 }
 
 static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
 
        }
 
        if (bypass_torture_test(dc)) {
-               if (prandom_u32_max(4) == 3)
+               if (get_random_u32_below(4) == 3)
                        goto skip;
                else
                        goto rescale;
 
        } else if (tpg->pattern == TPG_PAT_NOISE) {
                r = g = b = get_random_u8();
        } else if (k == TPG_COLOR_RANDOM) {
-               r = g = b = tpg->qual_offset + prandom_u32_max(196);
+               r = g = b = tpg->qual_offset + get_random_u32_below(196);
        } else if (k >= TPG_COLOR_RAMP) {
                r = g = b = k - TPG_COLOR_RAMP;
        }
                params->wss_width = tpg->crop.width;
        params->wss_width = tpg_hscale_div(tpg, p, params->wss_width);
        params->wss_random_offset =
-               params->twopixsize * prandom_u32_max(tpg->src_width / 2);
+               params->twopixsize * get_random_u32_below(tpg->src_width / 2);
 
        if (tpg->crop.left < tpg->border.left) {
                left_pillar_width = tpg->border.left - tpg->crop.left;
                linestart_newer = tpg->black_line[p];
        } else if (tpg->pattern == TPG_PAT_NOISE || tpg->qual == TPG_QUAL_NOISE) {
                linestart_older = tpg->random_line[p] +
-                                 twopixsize * prandom_u32_max(tpg->src_width / 2);
+                                 twopixsize * get_random_u32_below(tpg->src_width / 2);
                linestart_newer = tpg->random_line[p] +
-                                 twopixsize * prandom_u32_max(tpg->src_width / 2);
+                                 twopixsize * get_random_u32_below(tpg->src_width / 2);
        } else {
                unsigned frame_line_old =
                        (frame_line + mv_vert_old) % tpg->src_height;
 
         * Also, usually, signal strength is a negative number in dBm.
         */
        c->strength.stat[0].svalue = state->tuner_cnr;
-       c->strength.stat[0].svalue -= prandom_u32_max(state->tuner_cnr / 50);
+       c->strength.stat[0].svalue -= get_random_u32_below(state->tuner_cnr / 50);
        c->strength.stat[0].svalue -= 68000; /* Adjust to a better range */
 
        c->cnr.stat[0].svalue = state->tuner_cnr;
-       c->cnr.stat[0].svalue -= prandom_u32_max(state->tuner_cnr / 50);
+       c->cnr.stat[0].svalue -= get_random_u32_below(state->tuner_cnr / 50);
 }
 
 static int vidtv_demod_read_status(struct dvb_frontend *fe,
 
                if (snr < cnr2qual->cnr_ok) {
                        /* eventually lose the TS lock */
-                       if (prandom_u32_max(100) < config->drop_tslock_prob_on_low_snr)
+                       if (get_random_u32_below(100) < config->drop_tslock_prob_on_low_snr)
                                state->status = 0;
                } else {
                        /* recover if the signal improves */
-                       if (prandom_u32_max(100) <
+                       if (get_random_u32_below(100) <
                            config->recover_tslock_prob_on_good_snr)
                                state->status = FE_HAS_SIGNAL  |
                                                FE_HAS_CARRIER |
 
 
        /* Drop a certain percentage of buffers. */
        if (dev->perc_dropped_buffers &&
-           prandom_u32_max(100) < dev->perc_dropped_buffers)
+           get_random_u32_below(100) < dev->perc_dropped_buffers)
                goto update_mv;
 
        spin_lock(&dev->slock);
 
 
        /* Drop a certain percentage of buffers. */
        if (dev->perc_dropped_buffers &&
-           prandom_u32_max(100) < dev->perc_dropped_buffers)
+           get_random_u32_below(100) < dev->perc_dropped_buffers)
                return;
 
        spin_lock(&dev->slock);
 
 
                if (data_blk == 0 && dev->radio_rds_loop)
                        vivid_radio_rds_init(dev);
-               if (perc && prandom_u32_max(100) < perc) {
-                       switch (prandom_u32_max(4)) {
+               if (perc && get_random_u32_below(100) < perc) {
+                       switch (get_random_u32_below(4)) {
                        case 0:
                                rds.block |= V4L2_RDS_BLOCK_CORRECTED;
                                break;
 
 
        /* Drop a certain percentage of buffers. */
        if (dev->perc_dropped_buffers &&
-           prandom_u32_max(100) < dev->perc_dropped_buffers)
+           get_random_u32_below(100) < dev->perc_dropped_buffers)
                return;
 
        spin_lock(&dev->slock);
 
 
 static inline int get_random_pressure(void)
 {
-       return prandom_u32_max(VIVID_PRESSURE_LIMIT);
+       return get_random_u32_below(VIVID_PRESSURE_LIMIT);
 }
 
 static void vivid_tch_buf_set(struct v4l2_pix_format *f,
 
            !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
                return;
 
-       data->error = data_errors[prandom_u32_max(ARRAY_SIZE(data_errors))];
-       data->bytes_xfered = prandom_u32_max(data->bytes_xfered >> 9) << 9;
+       data->error = data_errors[get_random_u32_below(ARRAY_SIZE(data_errors))];
+       data->bytes_xfered = get_random_u32_below(data->bytes_xfered >> 9) << 9;
 }
 
 #else /* CONFIG_FAIL_MMC_REQUEST */
 
         * Try to inject the error at random points during the data transfer.
         */
        hrtimer_start(&host->fault_timer,
-                     ms_to_ktime(prandom_u32_max(25)),
+                     ms_to_ktime(get_random_u32_below(25)),
                      HRTIMER_MODE_REL);
 }
 
 
        if (bitflips && get_random_u16() < (1 << 6)) {
                int flips = 1;
                if (bitflips > 1)
-                       flips = prandom_u32_max(bitflips) + 1;
+                       flips = get_random_u32_below(bitflips) + 1;
                while (flips--) {
-                       int pos = prandom_u32_max(num * 8);
+                       int pos = get_random_u32_below(num * 8);
                        ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
                        NS_WARN("read_page: flipping bit %d in page %d "
                                "reading from %d ecc: corrected=%u failed=%u\n",
 
 static void single_bit_error_data(void *error_data, void *correct_data,
                                size_t size)
 {
-       unsigned int offset = prandom_u32_max(size * BITS_PER_BYTE);
+       unsigned int offset = get_random_u32_below(size * BITS_PER_BYTE);
 
        memcpy(error_data, correct_data, size);
        __change_bit_le(offset, error_data);
 {
        unsigned int offset[2];
 
-       offset[0] = prandom_u32_max(size * BITS_PER_BYTE);
+       offset[0] = get_random_u32_below(size * BITS_PER_BYTE);
        do {
-               offset[1] = prandom_u32_max(size * BITS_PER_BYTE);
+               offset[1] = get_random_u32_below(size * BITS_PER_BYTE);
        } while (offset[0] == offset[1]);
 
        memcpy(error_data, correct_data, size);
 
 static unsigned int random_ecc_bit(size_t size)
 {
-       unsigned int offset = prandom_u32_max(3 * BITS_PER_BYTE);
+       unsigned int offset = get_random_u32_below(3 * BITS_PER_BYTE);
 
        if (size == 256) {
                /*
                 * and 17th bit) in ECC code for 256 byte data block
                 */
                while (offset == 16 || offset == 17)
-                       offset = prandom_u32_max(3 * BITS_PER_BYTE);
+                       offset = get_random_u32_below(3 * BITS_PER_BYTE);
        }
 
        return offset;
 
 
 again:
        /* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
-       eb = prandom_u32_max(ebcnt - 1);
+       eb = get_random_u32_below(ebcnt - 1);
        if (bbt[eb])
                goto again;
        return eb;
 
 static int rand_offs(void)
 {
-       return prandom_u32_max(bufsize);
+       return get_random_u32_below(bufsize);
 }
 
 static int rand_len(int offs)
 {
-       return prandom_u32_max(bufsize - offs);
+       return get_random_u32_below(bufsize - offs);
 }
 
 static int do_read(void)
 
 static int do_operation(void)
 {
-       if (prandom_u32_max(2))
+       if (get_random_u32_below(2))
                return do_read();
        else
                return do_write();
 
 
                if (ubi->dbg.power_cut_max > ubi->dbg.power_cut_min) {
                        range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min;
-                       ubi->dbg.power_cut_counter += prandom_u32_max(range);
+                       ubi->dbg.power_cut_counter += get_random_u32_below(range);
                }
                return 0;
        }
 
 static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
 {
        if (ubi->dbg.emulate_bitflips)
-               return !prandom_u32_max(200);
+               return !get_random_u32_below(200);
        return 0;
 }
 
 static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
 {
        if (ubi->dbg.emulate_io_failures)
-               return !prandom_u32_max(500);
+               return !get_random_u32_below(500);
        return 0;
 }
 
 static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
 {
        if (ubi->dbg.emulate_io_failures)
-               return !prandom_u32_max(400);
+               return !get_random_u32_below(400);
        return 0;
 }
 
 
        for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
                atomic_set(&cp->csk_tbl[i].ref_count, 0);
 
-       port_id = prandom_u32_max(CNIC_LOCAL_PORT_RANGE);
+       port_id = get_random_u32_below(CNIC_LOCAL_PORT_RANGE);
        if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
                             CNIC_LOCAL_PORT_MIN, port_id)) {
                cnic_cm_free_mem(dev);
 
        current_timeo = *timeo_p;
        noblock = (*timeo_p ? false : true);
        if (csk_mem_free(cdev, sk)) {
-               current_timeo = prandom_u32_max(HZ / 5) + 2;
-               vm_wait = prandom_u32_max(HZ / 5) + 2;
+               current_timeo = get_random_u32_below(HZ / 5) + 2;
+               vm_wait = get_random_u32_below(HZ / 5) + 2;
        }
 
        add_wait_queue(sk_sleep(sk), &wait);
 
 
 static int qca808x_phy_ms_random_seed_set(struct phy_device *phydev)
 {
-       u16 seed_value = prandom_u32_max(QCA808X_MASTER_SLAVE_SEED_RANGE);
+       u16 seed_value = get_random_u32_below(QCA808X_MASTER_SLAVE_SEED_RANGE);
 
        return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
                        QCA808X_MASTER_SLAVE_SEED_CFG,
 
        struct team_port *port;
        int port_index;
 
-       port_index = prandom_u32_max(team->en_port_count);
+       port_index = get_random_u32_below(team->en_port_count);
        port = team_get_port_by_index_rcu(team, port_index);
        if (unlikely(!port))
                goto drop;
 
 
        for (i = 0; i < NUM_RAND_ROUTES; ++i) {
                get_random_bytes(ip, 4);
-               cidr = prandom_u32_max(32) + 1;
-               peer = peers[prandom_u32_max(NUM_PEERS)];
+               cidr = get_random_u32_below(32) + 1;
+               peer = peers[get_random_u32_below(NUM_PEERS)];
                if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr,
                                            peer, &mutex) < 0) {
                        pr_err("allowedips random self-test malloc: FAIL\n");
                for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
                        memcpy(mutated, ip, 4);
                        get_random_bytes(mutate_mask, 4);
-                       mutate_amount = prandom_u32_max(32);
+                       mutate_amount = get_random_u32_below(32);
                        for (k = 0; k < mutate_amount / 8; ++k)
                                mutate_mask[k] = 0xff;
                        mutate_mask[k] = 0xff
                                mutated[k] = (mutated[k] & mutate_mask[k]) |
                                             (~mutate_mask[k] &
                                              get_random_u8());
-                       cidr = prandom_u32_max(32) + 1;
-                       peer = peers[prandom_u32_max(NUM_PEERS)];
+                       cidr = get_random_u32_below(32) + 1;
+                       peer = peers[get_random_u32_below(NUM_PEERS)];
                        if (wg_allowedips_insert_v4(&t,
                                                    (struct in_addr *)mutated,
                                                    cidr, peer, &mutex) < 0) {
 
        for (i = 0; i < NUM_RAND_ROUTES; ++i) {
                get_random_bytes(ip, 16);
-               cidr = prandom_u32_max(128) + 1;
-               peer = peers[prandom_u32_max(NUM_PEERS)];
+               cidr = get_random_u32_below(128) + 1;
+               peer = peers[get_random_u32_below(NUM_PEERS)];
                if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr,
                                            peer, &mutex) < 0) {
                        pr_err("allowedips random self-test malloc: FAIL\n");
                for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
                        memcpy(mutated, ip, 16);
                        get_random_bytes(mutate_mask, 16);
-                       mutate_amount = prandom_u32_max(128);
+                       mutate_amount = get_random_u32_below(128);
                        for (k = 0; k < mutate_amount / 8; ++k)
                                mutate_mask[k] = 0xff;
                        mutate_mask[k] = 0xff
                                mutated[k] = (mutated[k] & mutate_mask[k]) |
                                             (~mutate_mask[k] &
                                              get_random_u8());
-                       cidr = prandom_u32_max(128) + 1;
-                       peer = peers[prandom_u32_max(NUM_PEERS)];
+                       cidr = get_random_u32_below(128) + 1;
+                       peer = peers[get_random_u32_below(NUM_PEERS)];
                        if (wg_allowedips_insert_v6(&t,
                                                    (struct in6_addr *)mutated,
                                                    cidr, peer, &mutex) < 0) {
 
        if (!timer_pending(&peer->timer_new_handshake))
                mod_peer_timer(peer, &peer->timer_new_handshake,
                        jiffies + (KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) * HZ +
-                       prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES));
+                       get_random_u32_below(REKEY_TIMEOUT_JITTER_MAX_JIFFIES));
 }
 
 /* Should be called after an authenticated data packet is received. */
 {
        mod_peer_timer(peer, &peer->timer_retransmit_handshake,
                       jiffies + REKEY_TIMEOUT * HZ +
-                      prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES));
+                      get_random_u32_below(REKEY_TIMEOUT_JITTER_MAX_JIFFIES));
 }
 
 /* Should be called after a handshake response message is received and processed
 
        if (afx_hdl->is_listen && afx_hdl->my_listen_chan)
                /* 100ms ~ 300ms */
                err = brcmf_p2p_discover_listen(p2p, afx_hdl->my_listen_chan,
-                                               100 * (1 + prandom_u32_max(3)));
+                                               100 * (1 + get_random_u32_below(3)));
        else
                err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan);
 
 
                        iwl_mvm_mac_ap_iterator, &data);
 
                if (data.beacon_device_ts) {
-                       u32 rand = prandom_u32_max(64 - 36) + 36;
+                       u32 rand = get_random_u32_below(64 - 36) + 36;
                        mvmvif->ap_beacon_time = data.beacon_device_ts +
                                ieee80211_tu_to_usec(data.beacon_int * rand /
                                                     100);
 
        }
 
        if (dev_cnt)
-               pdev = pci_dev_get(closest_pdevs[prandom_u32_max(dev_cnt)]);
+               pdev = pci_dev_get(closest_pdevs[get_random_u32_below(dev_cnt)]);
 
        for (i = 0; i < dev_cnt; i++)
                pci_dev_put(closest_pdevs[i]);
 
 {
        if (!port_scan_backoff)
                return 0;
-       return prandom_u32_max(port_scan_backoff);
+       return get_random_u32_below(port_scan_backoff);
 }
 
 static void zfcp_fc_port_scan_time(struct zfcp_adapter *adapter)
 
 
        if (fip->probe_tries < FIP_VN_RLIM_COUNT) {
                fip->probe_tries++;
-               wait = prandom_u32_max(FIP_VN_PROBE_WAIT);
+               wait = get_random_u32_below(FIP_VN_PROBE_WAIT);
        } else
                wait = FIP_VN_RLIM_INT;
        mod_timer(&fip->timer, jiffies + msecs_to_jiffies(wait));
                                          fcoe_all_vn2vn, 0);
                        fip->port_ka_time = jiffies +
                                 msecs_to_jiffies(FIP_VN_BEACON_INT +
-                                       prandom_u32_max(FIP_VN_BEACON_FUZZ));
+                                       get_random_u32_below(FIP_VN_BEACON_FUZZ));
                }
                if (time_before(fip->port_ka_time, next_time))
                        next_time = fip->port_ka_time;
 
                                sizeof(struct qedi_endpoint *)), GFP_KERNEL);
        if (!qedi->ep_tbl)
                return -ENOMEM;
-       port_id = prandom_u32_max(QEDI_LOCAL_PORT_RANGE);
+       port_id = get_random_u32_below(QEDI_LOCAL_PORT_RANGE);
        if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE,
                             QEDI_LOCAL_PORT_MIN, port_id)) {
                qedi_cm_free_mem(qedi);
 
                        u64 ns = jiffies_to_nsecs(delta_jiff);
 
                        if (sdebug_random && ns < U32_MAX) {
-                               ns = prandom_u32_max((u32)ns);
+                               ns = get_random_u32_below((u32)ns);
                        } else if (sdebug_random) {
                                ns >>= 12;      /* scale to 4 usec precision */
                                if (ns < U32_MAX)       /* over 4 hours max */
-                                       ns = prandom_u32_max((u32)ns);
+                                       ns = get_random_u32_below((u32)ns);
                                ns <<= 12;
                        }
                        kt = ns_to_ktime(ns);
                } else {        /* ndelay has a 4.2 second max */
-                       kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
+                       kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
                                             (u32)ndelay;
                        if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
                                u64 d = ktime_get_boottime_ns() - ns_from_boot;
 
        if (nsplits != ci->i_fragtree_nsplits) {
                update = true;
        } else if (nsplits) {
-               i = prandom_u32_max(nsplits);
+               i = get_random_u32_below(nsplits);
                id = le32_to_cpu(fragtree->splits[i].frag);
                if (!__ceph_find_frag(ci, id))
                        update = true;
 
                return -1;
 
        /* pick */
-       n = prandom_u32_max(n);
+       n = get_random_u32_below(n);
        for (j = 0, i = 0; i < m->possible_max_rank; i++) {
                if (CEPH_MDS_IS_READY(i, ignore_laggy))
                        j++;
 
                int best_ndir = inodes_per_group;
                int best_group = -1;
 
-               parent_group = prandom_u32_max(ngroups);
+               parent_group = get_random_u32_below(ngroups);
                for (i = 0; i < ngroups; i++) {
                        group = (parent_group + i) % ngroups;
                        desc = ext2_get_group_desc (sb, group, NULL);
 
                        ext4fs_dirhash(parent, qstr->name, qstr->len, &hinfo);
                        parent_group = hinfo.hash % ngroups;
                } else
-                       parent_group = prandom_u32_max(ngroups);
+                       parent_group = get_random_u32_below(ngroups);
                for (i = 0; i < ngroups; i++) {
                        g = (parent_group + i) % ngroups;
                        get_orlov_stats(sb, g, flex_size, &stats);
 
                        }
                        if (!progress) {
                                elr->lr_next_sched = jiffies +
-                                       prandom_u32_max(EXT4_DEF_LI_MAX_START_DELAY * HZ);
+                                       get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ);
                        }
                        if (time_before(elr->lr_next_sched, next_wakeup))
                                next_wakeup = elr->lr_next_sched;
         * spread the inode table initialization requests
         * better.
         */
-       elr->lr_next_sched = jiffies + prandom_u32_max(
-                               EXT4_DEF_LI_MAX_START_DELAY * HZ);
+       elr->lr_next_sched = jiffies + get_random_u32_below(EXT4_DEF_LI_MAX_START_DELAY * HZ);
        return elr;
 }
 
 
 
        /* let's select beginning hot/small space first in no_heap mode*/
        if (f2fs_need_rand_seg(sbi))
-               p->offset = prandom_u32_max(MAIN_SECS(sbi) * sbi->segs_per_sec);
+               p->offset = get_random_u32_below(MAIN_SECS(sbi) * sbi->segs_per_sec);
        else if (test_opt(sbi, NOHEAP) &&
                (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
                p->offset = 0;
 
 
        sanity_check_seg_type(sbi, seg_type);
        if (f2fs_need_rand_seg(sbi))
-               return prandom_u32_max(MAIN_SECS(sbi) * sbi->segs_per_sec);
+               return get_random_u32_below(MAIN_SECS(sbi) * sbi->segs_per_sec);
 
        /* if segs_per_sec is large than 1, we need to keep original policy. */
        if (__is_large_section(sbi))
        curseg->alloc_type = LFS;
        if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
                curseg->fragment_remained_chunk =
-                               prandom_u32_max(sbi->max_fragment_chunk) + 1;
+                               get_random_u32_below(sbi->max_fragment_chunk) + 1;
 }
 
 static int __next_free_blkoff(struct f2fs_sb_info *sbi,
                        /* To allocate block chunks in different sizes, use random number */
                        if (--seg->fragment_remained_chunk <= 0) {
                                seg->fragment_remained_chunk =
-                                  prandom_u32_max(sbi->max_fragment_chunk) + 1;
+                                  get_random_u32_below(sbi->max_fragment_chunk) + 1;
                                seg->next_blkoff +=
-                                  prandom_u32_max(sbi->max_fragment_hole) + 1;
+                                  get_random_u32_below(sbi->max_fragment_hole) + 1;
                        }
                }
        }
 
 
 static inline int chance(unsigned int n, unsigned int out_of)
 {
-       return !!(prandom_u32_max(out_of) + 1 <= n);
+       return !!(get_random_u32_below(out_of) + 1 <= n);
 
 }
 
                        if (chance(1, 2)) {
                                d->pc_delay = 1;
                                /* Fail within 1 minute */
-                               delay = prandom_u32_max(60000);
+                               delay = get_random_u32_below(60000);
                                d->pc_timeout = jiffies;
                                d->pc_timeout += msecs_to_jiffies(delay);
                                ubifs_warn(c, "failing after %lums", delay);
                        } else {
                                d->pc_delay = 2;
-                               delay = prandom_u32_max(10000);
+                               delay = get_random_u32_below(10000);
                                /* Fail within 10000 operations */
                                d->pc_cnt_max = delay;
                                ubifs_warn(c, "failing after %lu calls", delay);
        unsigned int from, to, ffs = chance(1, 2);
        unsigned char *p = (void *)buf;
 
-       from = prandom_u32_max(len);
+       from = get_random_u32_below(len);
        /* Corruption span max to end of write unit */
        to = min(len, ALIGN(from + 1, c->max_write_size));
 
 
 
        if (!dbg_is_chk_gen(c))
                return 0;
-       if (prandom_u32_max(4))
+       if (get_random_u32_below(4))
                return 0;
 
        for (i = 0; i < c->lsave_cnt; i++)
                c->lsave[i] = c->main_first;
 
        list_for_each_entry(lprops, &c->empty_list, list)
-               c->lsave[prandom_u32_max(c->lsave_cnt)] = lprops->lnum;
+               c->lsave[get_random_u32_below(c->lsave_cnt)] = lprops->lnum;
        list_for_each_entry(lprops, &c->freeable_list, list)
-               c->lsave[prandom_u32_max(c->lsave_cnt)] = lprops->lnum;
+               c->lsave[get_random_u32_below(c->lsave_cnt)] = lprops->lnum;
        list_for_each_entry(lprops, &c->frdi_idx_list, list)
-               c->lsave[prandom_u32_max(c->lsave_cnt)] = lprops->lnum;
+               c->lsave[get_random_u32_below(c->lsave_cnt)] = lprops->lnum;
 
        heap = &c->lpt_heap[LPROPS_DIRTY_IDX - 1];
        for (i = 0; i < heap->cnt; i++)
-               c->lsave[prandom_u32_max(c->lsave_cnt)] = heap->arr[i]->lnum;
+               c->lsave[get_random_u32_below(c->lsave_cnt)] = heap->arr[i]->lnum;
        heap = &c->lpt_heap[LPROPS_DIRTY - 1];
        for (i = 0; i < heap->cnt; i++)
-               c->lsave[prandom_u32_max(c->lsave_cnt)] = heap->arr[i]->lnum;
+               c->lsave[get_random_u32_below(c->lsave_cnt)] = heap->arr[i]->lnum;
        heap = &c->lpt_heap[LPROPS_FREE - 1];
        for (i = 0; i < heap->cnt; i++)
-               c->lsave[prandom_u32_max(c->lsave_cnt)] = heap->arr[i]->lnum;
+               c->lsave[get_random_u32_below(c->lsave_cnt)] = heap->arr[i]->lnum;
 
        return 1;
 }
 
                c->ilebs[c->ileb_cnt++] = lnum;
                dbg_cmt("LEB %d", lnum);
        }
-       if (dbg_is_chk_index(c) && !prandom_u32_max(8))
+       if (dbg_is_chk_index(c) && !get_random_u32_below(8))
                return -ENOSPC;
        return 0;
 }
 
 
 #ifdef DEBUG
        /* Randomly don't execute the first algorithm. */
-       if (prandom_u32_max(2))
+       if (get_random_u32_below(2))
                return 0;
 #endif
 
 
        /* randomly do sparse inode allocations */
        if (xfs_has_sparseinodes(tp->t_mountp) &&
            igeo->ialloc_min_blks < igeo->ialloc_blks)
-               do_sparse = prandom_u32_max(2);
+               do_sparse = get_random_u32_below(2);
 #endif
 
        /*
 
 
        ASSERT(error_tag < XFS_ERRTAG_MAX);
        randfactor = mp->m_errortag[error_tag];
-       if (!randfactor || prandom_u32_max(randfactor))
+       if (!randfactor || get_random_u32_below(randfactor))
                return false;
 
        xfs_warn_ratelimited(mp,
 
 /* Get a random number in [l, r) */
 static inline unsigned long damon_rand(unsigned long l, unsigned long r)
 {
-       return l + prandom_u32_max(r - l);
+       return l + get_random_u32_below(r - l);
 }
 
 /**
 
                bit = first_node(*maskp);
                break;
        default:
-               bit = find_nth_bit(maskp->bits, MAX_NUMNODES, prandom_u32_max(w));
+               bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_u32_below(w));
                break;
        }
        return bit;
 
        hdr->size = size;
        hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
                     PAGE_SIZE - sizeof(*hdr));
-       start = prandom_u32_max(hole) & ~(alignment - 1);
+       start = get_random_u32_below(hole) & ~(alignment - 1);
 
        /* Leave a random number of instructions before BPF code. */
        *image_ptr = &hdr->image[start];
 
        hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
                     BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
-       start = prandom_u32_max(hole) & ~(alignment - 1);
+       start = get_random_u32_below(hole) & ~(alignment - 1);
 
        *image_ptr = &ro_header->image[start];
        *rw_image = &(*rw_header)->image[start];
 
        int i;
 
        for (i = 0; i < ITERS_PER_TEST; ++i) {
-               size_t size = prandom_u32_max(MAX_ENCODABLE_SIZE) + 1;
-               bool is_write = !!prandom_u32_max(2);
+               size_t size = get_random_u32_below(MAX_ENCODABLE_SIZE) + 1;
+               bool is_write = !!get_random_u32_below(2);
                unsigned long verif_masked_addr;
                long encoded_watchpoint;
                bool verif_is_write;
 
                order[n] = n;
 
        for (n = count - 1; n > 1; n--) {
-               r = prandom_u32_max(n + 1);
+               r = get_random_u32_below(n + 1);
                if (r != n) {
                        tmp = order[n];
                        order[n] = order[r];
 {
        struct stress *stress = container_of(work, typeof(*stress), work);
        const int nlocks = stress->nlocks;
-       struct ww_mutex *lock = stress->locks + prandom_u32_max(nlocks);
+       struct ww_mutex *lock = stress->locks + get_random_u32_below(nlocks);
        int err;
 
        do {
 
         * CPUs that are currently online.
         */
        for (i = 1; i < n; i++) {
-               cpu = prandom_u32_max(nr_cpu_ids);
+               cpu = get_random_u32_below(nr_cpu_ids);
                cpu = cpumask_next(cpu - 1, cpu_online_mask);
                if (cpu >= nr_cpu_ids)
                        cpu = cpumask_first(cpu_online_mask);
 
                        return false;
        }
 
-       if (attr->probability <= prandom_u32_max(100))
+       if (attr->probability <= get_random_u32_below(100))
                return false;
 
        if (!fail_stacktrace(attr))
 
        bitmap_zero(bitmap2, BITMAP_LEN);
 
        while (nbits--) {
-               __set_bit(prandom_u32_max(BITMAP_LEN), bitmap);
-               __set_bit(prandom_u32_max(BITMAP_LEN), bitmap2);
+               __set_bit(get_random_u32_below(BITMAP_LEN), bitmap);
+               __set_bit(get_random_u32_below(BITMAP_LEN), bitmap2);
        }
 
        test_find_next_bit(bitmap, BITMAP_LEN);
 
 {
        struct kobject *kobj = container_of(kref, struct kobject, kref);
 #ifdef CONFIG_DEBUG_KOBJECT_RELEASE
-       unsigned long delay = HZ + HZ * prandom_u32_max(4);
+       unsigned long delay = HZ + HZ * get_random_u32_below(4);
        pr_info("kobject: '%s' (%p): %s, parent %p (delayed %ld)\n",
                 kobject_name(kobj), kobj, __func__, kobj->parent, delay);
        INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
 
 
                do {
                        /* Must not choose the same location twice */
-                       errloc = prandom_u32_max(len);
+                       errloc = get_random_u32_below(len);
                } while (errlocs[errloc] != 0);
 
                errlocs[errloc] = 1;
        for (i = 0; i < eras; i++) {
                do {
                        /* Must not choose the same location twice */
-                       errloc = prandom_u32_max(len);
+                       errloc = get_random_u32_below(len);
                } while (errlocs[errloc] != 0);
 
                derrlocs[i] = errloc;
 
-               if (ewsc && prandom_u32_max(2)) {
+               if (ewsc && get_random_u32_below(2)) {
                        /* Erasure with the symbol intact */
                        errlocs[errloc] = 2;
                } else {
 
                int i;
 
                for_each_possible_cpu(i)
-                       *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32_max(depth);
+                       *per_cpu_ptr(sb->alloc_hint, i) = get_random_u32_below(depth);
        }
        return 0;
 }
 
        hint = this_cpu_read(*sb->alloc_hint);
        if (unlikely(hint >= depth)) {
-               hint = depth ? prandom_u32_max(depth) : 0;
+               hint = depth ? get_random_u32_below(depth) : 0;
                this_cpu_write(*sb->alloc_hint, hint);
        }
 
 
        for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++)
                test_string_unescape("unescape", i, false);
        test_string_unescape("unescape inplace",
-                            prandom_u32_max(UNESCAPE_ANY + 1), true);
+                            get_random_u32_below(UNESCAPE_ANY + 1), true);
 
        /* Without dictionary */
        for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
 
 static void __init test_hexdump_set(int rowsize, bool ascii)
 {
        size_t d = min_t(size_t, sizeof(data_b), rowsize);
-       size_t len = prandom_u32_max(d) + 1;
+       size_t len = get_random_u32_below(d) + 1;
 
        test_hexdump(len, rowsize, 4, ascii);
        test_hexdump(len, rowsize, 2, ascii);
 static void __init test_hexdump_overflow_set(size_t buflen, bool ascii)
 {
        unsigned int i = 0;
-       int rs = (prandom_u32_max(2) + 1) * 16;
+       int rs = (get_random_u32_below(2) + 1) * 16;
 
        do {
                int gs = 1 << i;
-               size_t len = prandom_u32_max(rs) + gs;
+               size_t len = get_random_u32_below(rs) + gs;
 
                test_hexdump_overflow(buflen, rounddown(len, gs), rs, gs, ascii);
        } while (i++ < 3);
        unsigned int i;
        int rowsize;
 
-       rowsize = (prandom_u32_max(2) + 1) * 16;
+       rowsize = (get_random_u32_below(2) + 1) * 16;
        for (i = 0; i < 16; i++)
                test_hexdump_set(rowsize, false);
 
-       rowsize = (prandom_u32_max(2) + 1) * 16;
+       rowsize = (get_random_u32_below(2) + 1) * 16;
        for (i = 0; i < 16; i++)
                test_hexdump_set(rowsize, true);
 
 
                KUNIT_ASSERT_NOT_ERR_OR_NULL(test, el);
 
                 /* force some equivalencies */
-               el->value = prandom_u32_max(TEST_LIST_LEN / 3);
+               el->value = get_random_u32_below(TEST_LIST_LEN / 3);
                el->serial = i;
                el->poison1 = TEST_POISON1;
                el->poison2 = TEST_POISON2;
 
         * be able to print it as expected.
         */
        failed_tests += do_test(BUF_SIZE, expect, elen, fmt, ap);
-       rand = 1 + prandom_u32_max(elen+1);
+       rand = 1 + get_random_u32_below(elen + 1);
        /* Since elen < BUF_SIZE, we have 1 <= rand <= BUF_SIZE. */
        failed_tests += do_test(rand, expect, elen, fmt, ap);
        failed_tests += do_test(0, expect, elen, fmt, ap);
 
 
        pr_info("test %d random rhlist add/delete operations\n", entries);
        for (j = 0; j < entries; j++) {
-               u32 i = prandom_u32_max(entries);
-               u32 prand = prandom_u32_max(4);
+               u32 i = get_random_u32_below(entries);
+               u32 prand = get_random_u32_below(4);
 
                cond_resched();
 
                }
 
                if (prand & 2) {
-                       i = prandom_u32_max(entries);
+                       i = get_random_u32_below(entries);
                        if (test_bit(i, obj_in_table)) {
                                err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
                                WARN(err, "cannot remove element at slot %d", i);
 
        int i;
 
        for (i = 0; i < test_loop_count; i++) {
-               n = prandom_u32_max(100) + 1;
+               n = get_random_u32_below(100) + 1;
                p = vmalloc(n * PAGE_SIZE);
 
                if (!p)
                return -1;
 
        for (i = 0; i < 35000; i++) {
-               size = prandom_u32_max(PAGE_SIZE / 4) + 1;
+               size = get_random_u32_below(PAGE_SIZE / 4) + 1;
 
                /*
                 * Maximum PAGE_SIZE
                 */
-               align = 1 << (prandom_u32_max(11) + 1);
+               align = 1 << (get_random_u32_below(11) + 1);
 
                pcpu[i] = __alloc_percpu(size, align);
                if (!pcpu[i])
 
        for (i = n - 1; i > 0; i--)  {
                /* Cut the range. */
-               j = prandom_u32_max(i);
+               j = get_random_u32_below(i);
 
                /* Swap indexes. */
                swap(arr[i], arr[j]);
 
        KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
 
        for (i = 0; i < 256; i++) {
-               size = prandom_u32_max(1024) + 1;
+               size = get_random_u32_below(1024) + 1;
                ptr = kmalloc(size, GFP_KERNEL);
                KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
                KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
        }
 
        for (i = 0; i < 256; i++) {
-               order = prandom_u32_max(4) + 1;
+               order = get_random_u32_below(4) + 1;
                pages = alloc_pages(GFP_KERNEL, order);
                ptr = page_address(pages);
                KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
                return;
 
        for (i = 0; i < 256; i++) {
-               size = prandom_u32_max(1024) + 1;
+               size = get_random_u32_below(1024) + 1;
                ptr = vmalloc(size);
                KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
                KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
 
        unsigned long flags;
        struct slab *slab;
        void *addr;
-       const bool random_right_allocate = prandom_u32_max(2);
+       const bool random_right_allocate = get_random_u32_below(2);
        const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
-                                 !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS);
+                                 !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS);
 
        /* Try to obtain a free object. */
        raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
 
        int iter;
 
        for (iter = 0; iter < 5; iter++) {
-               const size_t size = setup_test_cache(test, 8 + prandom_u32_max(300), 0,
-                                                    (iter & 1) ? ctor_set_x : NULL);
+               const size_t size = setup_test_cache(test, 8 + get_random_u32_below(300),
+                                                    0, (iter & 1) ? ctor_set_x : NULL);
                void *objects[] = {
                        test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT),
                        test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
 
                return false;
 
        freelist_count = oo_objects(s->oo);
-       pos = prandom_u32_max(freelist_count);
+       pos = get_random_u32_below(freelist_count);
 
        page_limit = slab->objects * s->size;
        start = fixup_red_left(s, slab_address(slab));
 
                if (si->highest_bit <= si->lowest_bit)
                        return;
                next = si->lowest_bit +
-                       prandom_u32_max(si->highest_bit - si->lowest_bit + 1);
+                       get_random_u32_below(si->highest_bit - si->lowest_bit + 1);
                next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES);
                next = max_t(unsigned int, next, si->lowest_bit);
        }
                 */
                for_each_possible_cpu(cpu) {
                        per_cpu(*p->cluster_next_cpu, cpu) =
-                               1 + prandom_u32_max(p->highest_bit);
+                               1 + get_random_u32_below(p->highest_bit);
                }
                nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
 
 
 {
        unsigned long delay;
 
-       delay = prandom_u32_max(msecs_to_jiffies(garp_join_time));
+       delay = get_random_u32_below(msecs_to_jiffies(garp_join_time));
        mod_timer(&app->join_timer, jiffies + delay);
 }
 
 
 {
        unsigned long delay;
 
-       delay = prandom_u32_max(msecs_to_jiffies(mrp_join_time));
+       delay = get_random_u32_below(msecs_to_jiffies(mrp_join_time));
        mod_timer(&app->join_timer, jiffies + delay);
 }
 
 
        unsigned int msecs;
 
        msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
-       msecs += prandom_u32_max(2 * BATADV_JITTER);
+       msecs += get_random_u32_below(2 * BATADV_JITTER);
 
        return jiffies + msecs_to_jiffies(msecs);
 }
 /* when do we schedule a ogm packet to be sent */
 static unsigned long batadv_iv_ogm_fwd_send_time(void)
 {
-       return jiffies + msecs_to_jiffies(prandom_u32_max(BATADV_JITTER / 2));
+       return jiffies + msecs_to_jiffies(get_random_u32_below(BATADV_JITTER / 2));
 }
 
 /* apply hop penalty for a normal link */
 
        unsigned int msecs;
 
        msecs = atomic_read(&hard_iface->bat_v.elp_interval) - BATADV_JITTER;
-       msecs += prandom_u32_max(2 * BATADV_JITTER);
+       msecs += get_random_u32_below(2 * BATADV_JITTER);
 
        queue_delayed_work(batadv_event_workqueue, &hard_iface->bat_v.elp_wq,
                           msecs_to_jiffies(msecs));
 
        unsigned int msecs = BATADV_MAX_AGGREGATION_MS * 1000;
 
        /* msecs * [0.9, 1.1] */
-       msecs += prandom_u32_max(msecs / 5) - (msecs / 10);
+       msecs += get_random_u32_below(msecs / 5) - (msecs / 10);
        queue_delayed_work(batadv_event_workqueue, &hard_iface->bat_v.aggr_wq,
                           msecs_to_jiffies(msecs / 1000));
 }
                return;
 
        msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
-       msecs += prandom_u32_max(2 * BATADV_JITTER);
+       msecs += get_random_u32_below(2 * BATADV_JITTER);
        queue_delayed_work(batadv_event_workqueue, &bat_priv->bat_v.ogm_wq,
                           msecs_to_jiffies(msecs));
 }
 
 static u8 batadv_nc_random_weight_tq(u8 tq)
 {
        /* randomize the estimated packet loss (max TQ - estimated TQ) */
-       u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq);
+       u8 rand_tq = get_random_u32_below(BATADV_TQ_MAX_VALUE + 1 - tq);
 
        /* convert to (randomized) estimated tq again */
        return BATADV_TQ_MAX_VALUE - rand_tq;
 
         * calculate conn info age as random value between min/max set in hdev.
         */
        conn_info_age = hdev->conn_info_min_age +
-                       prandom_u32_max(hdev->conn_info_max_age -
-                                       hdev->conn_info_min_age);
+                       get_random_u32_below(hdev->conn_info_max_age -
+                                            hdev->conn_info_min_age);
 
        /* Query controller to refresh cached values if they are too old or were
         * never read.
 
                int time_ms = 0;
 
                if (err)
-                       time_ms = 10 + prandom_u32_max(16);
+                       time_ms = 10 + get_random_u32_below(16);
 
                j1939_tp_schedule_txtimer(first, time_ms);
        }
 
                if (session->tx_retry < J1939_XTP_TX_RETRY_LIMIT) {
                        session->tx_retry++;
                        j1939_tp_schedule_txtimer(session,
-                                                 10 + prandom_u32_max(16));
+                                                 10 + get_random_u32_below(16));
                } else {
                        netdev_alert(priv->ndev, "%s: 0x%p: tx retry count reached\n",
                                     __func__, session);
 
                                max--;
                }
 
-               n = prandom_u32_max(max);
+               n = get_random_u32_below(max);
                if (o >= 0 && n >= o)
                        n++;
 
 
 
 static int pick_random_replica(const struct ceph_osds *acting)
 {
-       int i = prandom_u32_max(acting->size);
+       int i = get_random_u32_below(acting->size);
 
        dout("%s picked osd%d, primary osd%d\n", __func__,
             acting->osds[i], acting->primary);
 
 
 unsigned long neigh_rand_reach_time(unsigned long base)
 {
-       return base ? prandom_u32_max(base) + (base >> 1) : 0;
+       return base ? get_random_u32_below(base) + (base >> 1) : 0;
 }
 EXPORT_SYMBOL(neigh_rand_reach_time);
 
                    struct sk_buff *skb)
 {
        unsigned long sched_next = jiffies +
-                       prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
+                       get_random_u32_below(NEIGH_VAR(p, PROXY_DELAY));
 
        if (p->qlen > NEIGH_VAR(p, PROXY_QLEN)) {
                kfree_skb(skb);
 
                                pkt_dev->curfl = 0; /*reset */
                }
        } else {
-               flow = prandom_u32_max(pkt_dev->cflows);
+               flow = get_random_u32_below(pkt_dev->cflows);
                pkt_dev->curfl = flow;
 
                if (pkt_dev->flows[flow].count > pkt_dev->lflow) {
        else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) {
                __u16 t;
                if (pkt_dev->flags & F_QUEUE_MAP_RND) {
-                       t = prandom_u32_max(pkt_dev->queue_map_max -
-                                           pkt_dev->queue_map_min + 1) +
+                       t = get_random_u32_below(pkt_dev->queue_map_max -
+                                                pkt_dev->queue_map_min + 1) +
                            pkt_dev->queue_map_min;
                } else {
                        t = pkt_dev->cur_queue_map + 1;
                __u32 tmp;
 
                if (pkt_dev->flags & F_MACSRC_RND)
-                       mc = prandom_u32_max(pkt_dev->src_mac_count);
+                       mc = get_random_u32_below(pkt_dev->src_mac_count);
                else {
                        mc = pkt_dev->cur_src_mac_offset++;
                        if (pkt_dev->cur_src_mac_offset >=
                __u32 tmp;
 
                if (pkt_dev->flags & F_MACDST_RND)
-                       mc = prandom_u32_max(pkt_dev->dst_mac_count);
+                       mc = get_random_u32_below(pkt_dev->dst_mac_count);
 
                else {
                        mc = pkt_dev->cur_dst_mac_offset++;
        }
 
        if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) {
-               pkt_dev->vlan_id = prandom_u32_max(4096);
+               pkt_dev->vlan_id = get_random_u32_below(4096);
        }
 
        if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) {
-               pkt_dev->svlan_id = prandom_u32_max(4096);
+               pkt_dev->svlan_id = get_random_u32_below(4096);
        }
 
        if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) {
                if (pkt_dev->flags & F_UDPSRC_RND)
-                       pkt_dev->cur_udp_src = prandom_u32_max(
+                       pkt_dev->cur_udp_src = get_random_u32_below(
                                pkt_dev->udp_src_max - pkt_dev->udp_src_min) +
                                pkt_dev->udp_src_min;
 
 
        if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) {
                if (pkt_dev->flags & F_UDPDST_RND) {
-                       pkt_dev->cur_udp_dst = prandom_u32_max(
+                       pkt_dev->cur_udp_dst = get_random_u32_below(
                                pkt_dev->udp_dst_max - pkt_dev->udp_dst_min) +
                                pkt_dev->udp_dst_min;
                } else {
                if (imn < imx) {
                        __u32 t;
                        if (pkt_dev->flags & F_IPSRC_RND)
-                               t = prandom_u32_max(imx - imn) + imn;
+                               t = get_random_u32_below(imx - imn) + imn;
                        else {
                                t = ntohl(pkt_dev->cur_saddr);
                                t++;
                                if (pkt_dev->flags & F_IPDST_RND) {
 
                                        do {
-                                               t = prandom_u32_max(imx - imn) +
+                                               t = get_random_u32_below(imx - imn) +
                                                    imn;
                                                s = htonl(t);
                                        } while (ipv4_is_loopback(s) ||
        if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) {
                __u32 t;
                if (pkt_dev->flags & F_TXSIZE_RND) {
-                       t = prandom_u32_max(pkt_dev->max_pkt_size -
-                                           pkt_dev->min_pkt_size) +
+                       t = get_random_u32_below(pkt_dev->max_pkt_size -
+                                                pkt_dev->min_pkt_size) +
                            pkt_dev->min_pkt_size;
                } else {
                        t = pkt_dev->cur_pkt_size + 1;
                pkt_dev->cur_pkt_size = t;
        } else if (pkt_dev->n_imix_entries > 0) {
                struct imix_pkt *entry;
-               __u32 t = prandom_u32_max(IMIX_PRECISION);
+               __u32 t = get_random_u32_below(IMIX_PRECISION);
                __u8 entry_index = pkt_dev->imix_distribution[t];
 
                entry = &pkt_dev->imix_entries[entry_index];
 
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
 
        if (sk_stream_memory_free(sk))
-               current_timeo = vm_wait = prandom_u32_max(HZ / 5) + 2;
+               current_timeo = vm_wait = get_random_u32_below(HZ / 5) + 2;
 
        add_wait_queue(sk_sleep(sk), &wait);
 
 
                /* We want to use a credit of one in average, but need to randomize
                 * it for security reasons.
                 */
-               credit = max_t(int, credit - prandom_u32_max(3), 0);
+               credit = max_t(int, credit - get_random_u32_below(3), 0);
                rc = true;
        }
        WRITE_ONCE(icmp_global.credit, credit);
 
 /* It must be called with locked im->lock */
 static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
 {
-       int tv = prandom_u32_max(max_delay);
+       int tv = get_random_u32_below(max_delay);
 
        im->tm_running = 1;
        if (!mod_timer(&im->timer, jiffies+tv+2))
 
 static void igmp_gq_start_timer(struct in_device *in_dev)
 {
-       int tv = prandom_u32_max(in_dev->mr_maxdelay);
+       int tv = get_random_u32_below(in_dev->mr_maxdelay);
        unsigned long exp = jiffies + tv + 2;
 
        if (in_dev->mr_gq_running &&
 
 static void igmp_ifc_start_timer(struct in_device *in_dev, int delay)
 {
-       int tv = prandom_u32_max(delay);
+       int tv = get_random_u32_below(delay);
 
        if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2))
                in_dev_hold(in_dev);
 
        if (likely(remaining > 1))
                remaining &= ~1U;
 
-       offset = prandom_u32_max(remaining);
+       offset = get_random_u32_below(remaining);
        /* __inet_hash_connect() favors ports having @low parity
         * We do the opposite to not pollute connect() users.
         */
 
         * on low contention the randomness is maximal and on high contention
         * it may be inexistent.
         */
-       i = max_t(int, i, prandom_u32_max(8) * 2);
+       i = max_t(int, i, get_random_u32_below(8) * 2);
        WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
 
        /* Head lock still held and bh's disabled */
 
        old = READ_ONCE(*p_tstamp);
 
        if (old != now && cmpxchg(p_tstamp, old, now) == old)
-               delta = prandom_u32_max(now - old);
+               delta = get_random_u32_below(now - old);
 
        /* If UBSAN reports an error there, please make sure your compiler
         * supports -fno-strict-overflow before reporting it that was a bug
        } else {
                /* Randomize max depth to avoid some side channels attacks. */
                int max_depth = FNHE_RECLAIM_DEPTH +
-                               prandom_u32_max(FNHE_RECLAIM_DEPTH);
+                               get_random_u32_below(FNHE_RECLAIM_DEPTH);
 
                while (depth > max_depth) {
                        fnhe_remove_oldest(hash);
 
        struct bbr *bbr = inet_csk_ca(sk);
 
        bbr->mode = BBR_PROBE_BW;
-       bbr->cycle_idx = CYCLE_LEN - 1 - prandom_u32_max(bbr_cycle_rand);
+       bbr->cycle_idx = CYCLE_LEN - 1 - get_random_u32_below(bbr_cycle_rand);
        bbr_advance_cycle_phase(sk);    /* flip to next phase of gain cycle */
 }
 
 
                u32 half = (ack_limit + 1) >> 1;
 
                WRITE_ONCE(net->ipv4.tcp_challenge_timestamp, now);
-               WRITE_ONCE(net->ipv4.tcp_challenge_count, half + prandom_u32_max(ack_limit));
+               WRITE_ONCE(net->ipv4.tcp_challenge_count,
+                          half + get_random_u32_below(ack_limit));
        }
        count = READ_ONCE(net->ipv4.tcp_challenge_count);
        if (count > 0) {
 
 static inline s32 rfc3315_s14_backoff_init(s32 irt)
 {
        /* multiply 'initial retransmission time' by 0.9 .. 1.1 */
-       u64 tmp = (900000 + prandom_u32_max(200001)) * (u64)irt;
+       u64 tmp = (900000 + get_random_u32_below(200001)) * (u64)irt;
        do_div(tmp, 1000000);
        return (s32)tmp;
 }
 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
 {
        /* multiply 'retransmission timeout' by 1.9 .. 2.1 */
-       u64 tmp = (1900000 + prandom_u32_max(200001)) * (u64)rt;
+       u64 tmp = (1900000 + get_random_u32_below(200001)) * (u64)rt;
        do_div(tmp, 1000000);
        if ((s32)tmp > mrt) {
                /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
-               tmp = (900000 + prandom_u32_max(200001)) * (u64)mrt;
+               tmp = (900000 + get_random_u32_below(200001)) * (u64)mrt;
                do_div(tmp, 1000000);
        }
        return (s32)tmp;
        if (ifp->flags & IFA_F_OPTIMISTIC)
                rand_num = 0;
        else
-               rand_num = prandom_u32_max(idev->cnf.rtr_solicit_delay ?: 1);
+               rand_num = get_random_u32_below(idev->cnf.rtr_solicit_delay ? : 1);
 
        nonce = 0;
        if (idev->cnf.enhanced_dad ||
 
 /* called with mc_lock */
 static void mld_gq_start_work(struct inet6_dev *idev)
 {
-       unsigned long tv = prandom_u32_max(idev->mc_maxdelay);
+       unsigned long tv = get_random_u32_below(idev->mc_maxdelay);
 
        idev->mc_gq_running = 1;
        if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2))
 /* called with mc_lock */
 static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay)
 {
-       unsigned long tv = prandom_u32_max(delay);
+       unsigned long tv = get_random_u32_below(delay);
 
        if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2))
                in6_dev_hold(idev);
 /* called with mc_lock */
 static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay)
 {
-       unsigned long tv = prandom_u32_max(delay);
+       unsigned long tv = get_random_u32_below(delay);
 
        if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2))
                in6_dev_hold(idev);
        }
 
        if (delay >= resptime)
-               delay = prandom_u32_max(resptime);
+               delay = get_random_u32_below(resptime);
 
        if (!mod_delayed_work(mld_wq, &ma->mca_work, delay))
                refcount_inc(&ma->mca_refcnt);
 
        igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
 
-       delay = prandom_u32_max(unsolicited_report_interval(ma->idev));
+       delay = get_random_u32_below(unsolicited_report_interval(ma->idev));
 
        if (cancel_delayed_work(&ma->mca_work)) {
                refcount_dec(&ma->mca_refcnt);
 
        net->ipv6.rt6_stats->fib_rt_cache++;
 
        /* Randomize max depth to avoid some side channels attacks. */
-       max_depth = FIB6_MAX_DEPTH + prandom_u32_max(FIB6_MAX_DEPTH);
+       max_depth = FIB6_MAX_DEPTH + get_random_u32_below(FIB6_MAX_DEPTH);
        while (bucket->depth > max_depth)
                rt6_exception_remove_oldest(bucket);
 
 
         * from 0 to total_weight
         */
        total_weight += 1;
-       rweight1 = prandom_u32_max(total_weight);
-       rweight2 = prandom_u32_max(total_weight);
+       rweight1 = get_random_u32_below(total_weight);
+       rweight2 = get_random_u32_below(total_weight);
 
        /* Pick two weighted servers */
        list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
 
                                           nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
        } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
-       max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);
+       max_chainlen = MIN_CHAINLEN + get_random_u32_below(MAX_CHAINLEN);
 
        /* See if there's one in the list already, including reverse */
        hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {
                goto dying;
        }
 
-       max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);
+       max_chainlen = MIN_CHAINLEN + get_random_u32_below(MAX_CHAINLEN);
        /* See if there's one in the list already, including reverse:
           NAT could have grabbed it without realizing, since we're
           not in the hash.  If there is, we lost race. */
 
                if (res != -EBUSY || (--attempts_left < 0))
                        break;
 
-               port = min + prandom_u32_max(range);
+               port = min + get_random_u32_below(range);
        }
 
        return 0;
 
                /* Bind collision, search negative portid values. */
                if (rover == -4096)
                        /* rover will be in range [S32_MIN, -4097] */
-                       rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
+                       rover = S32_MIN + get_random_u32_below(-4096 - S32_MIN);
                else if (rover >= -4096)
                        rover = -4097;
                portid = rover--;
 
                if (READ_ONCE(history[i]) == rxhash)
                        count++;
 
-       victim = prandom_u32_max(ROLLOVER_HLEN);
+       victim = get_random_u32_below(ROLLOVER_HLEN);
 
        /* Avoid dirtying the cache line if possible */
        if (READ_ONCE(history[victim]) != rxhash)
                                     struct sk_buff *skb,
                                     unsigned int num)
 {
-       return prandom_u32_max(num);
+       return get_random_u32_below(num);
 }
 
 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
 
 static int gact_net_rand(struct tcf_gact *gact)
 {
        smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
-       if (prandom_u32_max(gact->tcfg_pval))
+       if (get_random_u32_below(gact->tcfg_pval))
                return gact->tcf_action;
        return gact->tcfg_paction;
 }
 
        psample_group = rcu_dereference_bh(s->psample_group);
 
        /* randomly sample packets according to rate */
-       if (psample_group && (prandom_u32_max(s->rate) == 0)) {
+       if (psample_group && (get_random_u32_below(s->rate) == 0)) {
                if (!skb_at_tc_ingress(skb)) {
                        md.in_ifindex = skb->skb_iif;
                        md.out_ifindex = skb->dev->ifindex;
 
        int retrys = 3;
 
        do {
-               *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask;
+               *pidx = (q->head + get_random_u32_below(choke_len(q))) & q->tab_mask;
                skb = q->tab[*pidx];
                if (skb)
                        return skb;
 
                        goto finish_segs;
                }
 
-               skb->data[prandom_u32_max(skb_headlen(skb))] ^=
-                       1<<prandom_u32_max(8);
+               skb->data[get_random_u32_below(skb_headlen(skb))] ^=
+                       1<<get_random_u32_below(8);
        }
 
        if (unlikely(sch->q.qlen >= sch->limit)) {
 
 
                inet_get_local_port_range(net, &low, &high);
                remaining = (high - low) + 1;
-               rover = prandom_u32_max(remaining) + low;
+               rover = get_random_u32_below(remaining) + low;
 
                do {
                        rover++;
 
        if ((time_before(transport->hb_timer.expires, expires) ||
             !timer_pending(&transport->hb_timer)) &&
            !mod_timer(&transport->hb_timer,
-                      expires + prandom_u32_max(transport->rto)))
+                      expires + get_random_u32_below(transport->rto)))
                sctp_transport_hold(transport);
 }
 
 
 
        /* Consider removing either the first or the last */
        if (cache_defer_cnt > DFR_MAX) {
-               if (prandom_u32_max(2))
+               if (get_random_u32_below(2))
                        discard = list_entry(cache_defer_list.next,
                                             struct cache_deferred_req, recent);
                else
 
        if (max < min)
                return -EADDRINUSE;
        range = max - min + 1;
-       rand = prandom_u32_max(range);
+       rand = get_random_u32_below(range);
        return rand + min;
 }
 
 
        struct net *net = sock_net(sk);
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
-       u32 portid = prandom_u32_max(remaining) + TIPC_MIN_PORT;
+       u32 portid = get_random_u32_below(remaining) + TIPC_MIN_PORT;
 
        while (remaining--) {
                portid++;
 
 
        if (!port)
                port = LAST_RESERVED_PORT + 1 +
-                       prandom_u32_max(U32_MAX - LAST_RESERVED_PORT);
+                       get_random_u32_below(U32_MAX - LAST_RESERVED_PORT);
 
        vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
 
 
        } else {
                u32 spi = 0;
                for (h = 0; h < high-low+1; h++) {
-                       spi = low + prandom_u32_max(high - low + 1);
+                       spi = low + get_random_u32_below(high - low + 1);
                        x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
                        if (x0 == NULL) {
                                newspi = htonl(spi);