}
 }
 
+#ifdef CONFIG_SLAB_FREELIST_RANDOM
+static void freelist_randomize(struct rnd_state *state, freelist_idx_t *list,
+                       size_t count)
+{
+       size_t i;
+       unsigned int rand;
+
+       for (i = 0; i < count; i++)
+               list[i] = i;
+
+       /* Fisher-Yates shuffle */
+       for (i = count - 1; i > 0; i--) {
+               rand = prandom_u32_state(state);
+               rand %= (i + 1);
+               swap(list[i], list[rand]);
+       }
+}
+
+/* Create a random sequence per cache */
+static int cache_random_seq_create(struct kmem_cache *cachep, gfp_t gfp)
+{
+       unsigned int seed, count = cachep->num;
+       struct rnd_state state;
+
+       if (count < 2)
+               return 0;
+
+       /* If it fails, we will just use the global lists */
+       cachep->random_seq = kcalloc(count, sizeof(freelist_idx_t), gfp);
+       if (!cachep->random_seq)
+               return -ENOMEM;
+
+       /* Get best entropy at this stage */
+       get_random_bytes_arch(&seed, sizeof(seed));
+       prandom_seed_state(&state, seed);
+
+       freelist_randomize(&state, cachep->random_seq, count);
+       return 0;
+}
+
+/* Destroy the per-cache random freelist sequence */
+static void cache_random_seq_destroy(struct kmem_cache *cachep)
+{
+       kfree(cachep->random_seq);
+       cachep->random_seq = NULL;
+}
+#else
+static inline int cache_random_seq_create(struct kmem_cache *cachep, gfp_t gfp)
+{
+       return 0;
+}
+static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
+#endif /* CONFIG_SLAB_FREELIST_RANDOM */
+
+
 /*
  * Initialisation.  Called after the page allocator have been initialised and
  * before smp_init().
        int i;
        struct kmem_cache_node *n;
 
+       cache_random_seq_destroy(cachep);
+
        free_percpu(cachep->cpu_cache);
 
        /* NUMA: free the node structures */
 #endif
 }
 
+#ifdef CONFIG_SLAB_FREELIST_RANDOM
+/* Hold information during a freelist initialization */
+union freelist_init_state {
+       struct {
+               unsigned int pos;
+               freelist_idx_t *list;
+               unsigned int count;
+               unsigned int rand;
+       };
+       struct rnd_state rnd_state;
+};
+
+/*
+ * Initialize the state based on the randomization methode available.
+ * return true if the pre-computed list is available, false otherwize.
+ */
+static bool freelist_state_initialize(union freelist_init_state *state,
+                               struct kmem_cache *cachep,
+                               unsigned int count)
+{
+       bool ret;
+       unsigned int rand;
+
+       /* Use best entropy available to define a random shift */
+       get_random_bytes_arch(&rand, sizeof(rand));
+
+       /* Use a random state if the pre-computed list is not available */
+       if (!cachep->random_seq) {
+               prandom_seed_state(&state->rnd_state, rand);
+               ret = false;
+       } else {
+               state->list = cachep->random_seq;
+               state->count = count;
+               state->pos = 0;
+               state->rand = rand;
+               ret = true;
+       }
+       return ret;
+}
+
+/* Get the next entry on the list and randomize it using a random shift */
+static freelist_idx_t next_random_slot(union freelist_init_state *state)
+{
+       return (state->list[state->pos++] + state->rand) % state->count;
+}
+
+/*
+ * Shuffle the freelist initialization state based on pre-computed lists.
+ * return true if the list was successfully shuffled, false otherwise.
+ */
+static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page)
+{
+       unsigned int objfreelist = 0, i, count = cachep->num;
+       union freelist_init_state state;
+       bool precomputed;
+
+       if (count < 2)
+               return false;
+
+       precomputed = freelist_state_initialize(&state, cachep, count);
+
+       /* Take a random entry as the objfreelist */
+       if (OBJFREELIST_SLAB(cachep)) {
+               if (!precomputed)
+                       objfreelist = count - 1;
+               else
+                       objfreelist = next_random_slot(&state);
+               page->freelist = index_to_obj(cachep, page, objfreelist) +
+                                               obj_offset(cachep);
+               count--;
+       }
+
+       /*
+        * On early boot, generate the list dynamically.
+        * Later use a pre-computed list for speed.
+        */
+       if (!precomputed) {
+               freelist_randomize(&state.rnd_state, page->freelist, count);
+       } else {
+               for (i = 0; i < count; i++)
+                       set_free_obj(page, i, next_random_slot(&state));
+       }
+
+       if (OBJFREELIST_SLAB(cachep))
+               set_free_obj(page, cachep->num - 1, objfreelist);
+
+       return true;
+}
+#else
+static inline bool shuffle_freelist(struct kmem_cache *cachep,
+                               struct page *page)
+{
+       return false;
+}
+#endif /* CONFIG_SLAB_FREELIST_RANDOM */
+
 static void cache_init_objs(struct kmem_cache *cachep,
                            struct page *page)
 {
        int i;
        void *objp;
+       bool shuffled;
 
        cache_init_objs_debug(cachep, page);
 
-       if (OBJFREELIST_SLAB(cachep)) {
+       /* Try to randomize the freelist if enabled */
+       shuffled = shuffle_freelist(cachep, page);
+
+       if (!shuffled && OBJFREELIST_SLAB(cachep)) {
                page->freelist = index_to_obj(cachep, page, cachep->num - 1) +
                                                obj_offset(cachep);
        }
                        kasan_poison_object_data(cachep, objp);
                }
 
-               set_free_obj(page, i, i);
+               if (!shuffled)
+                       set_free_obj(page, i, i);
        }
 }
 
        int shared = 0;
        int batchcount = 0;
 
+       err = cache_random_seq_create(cachep, gfp);
+       if (err)
+               goto end;
+
        if (!is_root_cache(cachep)) {
                struct kmem_cache *root = memcg_root_cache(cachep);
                limit = root->limit;
        batchcount = (limit + 1) / 2;
 skip_setup:
        err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
+end:
        if (err)
                pr_err("enable_cpucache failed for %s, error %d\n",
                       cachep->name, -err);