}
 #endif /* CONFIG_MEMCG_KMEM */
 
+static void set_task_reclaim_state(struct task_struct *task,
+                                  struct reclaim_state *rs)
+{
+       /* Check for an overwrite */
+       WARN_ON_ONCE(rs && task->reclaim_state);
+
+       /* Check for the nulling of an already-nulled member */
+       WARN_ON_ONCE(!rs && !task->reclaim_state);
+
+       task->reclaim_state = rs;
+}
+
 #ifdef CONFIG_MEMCG
 static bool global_reclaim(struct scan_control *sc)
 {
        if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
                return 1;
 
-       current->reclaim_state = &sc.reclaim_state;
+       set_task_reclaim_state(current, &sc.reclaim_state);
        trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
 
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
 
        trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
-       current->reclaim_state = NULL;
+       set_task_reclaim_state(current, NULL);
 
        return nr_reclaimed;
 }
        };
        unsigned long lru_pages;
 
-       current->reclaim_state = &sc.reclaim_state;
+       set_task_reclaim_state(current, &sc.reclaim_state);
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
 
 
        trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
 
-       current->reclaim_state = NULL;
+       set_task_reclaim_state(current, NULL);
        *nr_scanned = sc.nr_scanned;
 
        return sc.nr_reclaimed;
                .may_shrinkslab = 1,
        };
 
-       current->reclaim_state = &sc.reclaim_state;
+       set_task_reclaim_state(current, &sc.reclaim_state);
        /*
         * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
         * take care of from where we get pages. So the node where we start the
        psi_memstall_leave(&pflags);
 
        trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
-       current->reclaim_state = NULL;
+       set_task_reclaim_state(current, NULL);
 
        return nr_reclaimed;
 }
                .may_unmap = 1,
        };
 
-       current->reclaim_state = &sc.reclaim_state;
+       set_task_reclaim_state(current, &sc.reclaim_state);
        psi_memstall_enter(&pflags);
        __fs_reclaim_acquire();
 
        snapshot_refaults(NULL, pgdat);
        __fs_reclaim_release();
        psi_memstall_leave(&pflags);
-       current->reclaim_state = NULL;
+       set_task_reclaim_state(current, NULL);
 
        /*
         * Return the order kswapd stopped reclaiming at as
                .hibernation_mode = 1,
        };
        struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
-       struct task_struct *p = current;
        unsigned long nr_reclaimed;
        unsigned int noreclaim_flag;
 
        fs_reclaim_acquire(sc.gfp_mask);
        noreclaim_flag = memalloc_noreclaim_save();
-       p->reclaim_state = &sc.reclaim_state;
+       set_task_reclaim_state(current, &sc.reclaim_state);
 
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
 
-       p->reclaim_state = NULL;
+       set_task_reclaim_state(current, NULL);
        memalloc_noreclaim_restore(noreclaim_flag);
        fs_reclaim_release(sc.gfp_mask);
 
         */
        noreclaim_flag = memalloc_noreclaim_save();
        p->flags |= PF_SWAPWRITE;
-       p->reclaim_state = &sc.reclaim_state;
+       set_task_reclaim_state(p, &sc.reclaim_state);
 
        if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
                /*
                } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
        }
 
-       p->reclaim_state = NULL;
+       set_task_reclaim_state(p, NULL);
        current->flags &= ~PF_SWAPWRITE;
        memalloc_noreclaim_restore(noreclaim_flag);
        fs_reclaim_release(sc.gfp_mask);