struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
 
+static inline bool lruvec_holds_page_lru_lock(struct page *page,
+                                             struct lruvec *lruvec)
+{
+       pg_data_t *pgdat = page_pgdat(page);
+       const struct mem_cgroup *memcg;
+       struct mem_cgroup_per_node *mz;
+
+       if (mem_cgroup_disabled())
+               return lruvec == &pgdat->__lruvec;
+
+       mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+       memcg = page_memcg(page) ? : root_mem_cgroup;
+
+       return lruvec->pgdat == pgdat && mz->memcg == memcg;
+}
+
 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
 
 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
        return &pgdat->__lruvec;
 }
 
+static inline bool lruvec_holds_page_lru_lock(struct page *page,
+                                             struct lruvec *lruvec)
+{
+       pg_data_t *pgdat = page_pgdat(page);
+
+       return lruvec == &pgdat->__lruvec;
+}
+
 static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
 {
        return NULL;
        spin_unlock_irqrestore(&lruvec->lru_lock, flags);
 }
 
+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
+               struct lruvec *locked_lruvec)
+{
+       if (locked_lruvec) {
+               if (lruvec_holds_page_lru_lock(page, locked_lruvec))
+                       return locked_lruvec;
+
+               unlock_page_lruvec_irq(locked_lruvec);
+       }
+
+       return lock_page_lruvec_irq(page);
+}
+
+/* Don't lock again iff page's lruvec locked */
+static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
+               struct lruvec *locked_lruvec, unsigned long *flags)
+{
+       if (locked_lruvec) {
+               if (lruvec_holds_page_lru_lock(page, locked_lruvec))
+                       return locked_lruvec;
+
+               unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
+       }
+
+       return lock_page_lruvec_irqsave(page, flags);
+}
+
 #ifdef CONFIG_CGROUP_WRITEBACK
 
 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
 
                         * so we can spare the get_page() here.
                         */
                        if (TestClearPageLRU(page)) {
-                               struct lruvec *new_lruvec;
-
-                               new_lruvec = mem_cgroup_page_lruvec(page,
-                                               page_pgdat(page));
-                               if (new_lruvec != lruvec) {
-                                       if (lruvec)
-                                               unlock_page_lruvec_irq(lruvec);
-                                       lruvec = lock_page_lruvec_irq(page);
-                               }
-
+                               lruvec = relock_page_lruvec_irq(page, lruvec);
                                del_page_from_lru_list(page, lruvec,
                                                        page_lru(page));
                                continue;
 
 
        for (i = 0; i < pagevec_count(pvec); i++) {
                struct page *page = pvec->pages[i];
-               struct lruvec *new_lruvec;
 
                /* block memcg migration during page moving between lru */
                if (!TestClearPageLRU(page))
                        continue;
 
-               new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
-               if (lruvec != new_lruvec) {
-                       if (lruvec)
-                               unlock_page_lruvec_irqrestore(lruvec, flags);
-                       lruvec = lock_page_lruvec_irqsave(page, &flags);
-               }
-
+               lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
                (*move_fn)(page, lruvec);
 
                SetPageLRU(page);
                }
 
                if (PageLRU(page)) {
-                       struct lruvec *new_lruvec;
-
-                       new_lruvec = mem_cgroup_page_lruvec(page,
-                                                       page_pgdat(page));
-                       if (new_lruvec != lruvec) {
-                               if (lruvec)
-                                       unlock_page_lruvec_irqrestore(lruvec,
-                                                                       flags);
+                       struct lruvec *prev_lruvec = lruvec;
+
+                       lruvec = relock_page_lruvec_irqsave(page, lruvec,
+                                                                       &flags);
+                       if (prev_lruvec != lruvec)
                                lock_batch = 0;
-                               lruvec = lock_page_lruvec_irqsave(page, &flags);
-                       }
 
                        VM_BUG_ON_PAGE(!PageLRU(page), page);
                        __ClearPageLRU(page);
 
        for (i = 0; i < pagevec_count(pvec); i++) {
                struct page *page = pvec->pages[i];
-               struct lruvec *new_lruvec;
-
-               new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
-               if (lruvec != new_lruvec) {
-                       if (lruvec)
-                               unlock_page_lruvec_irqrestore(lruvec, flags);
-                       lruvec = lock_page_lruvec_irqsave(page, &flags);
-               }
 
+               lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
                __pagevec_lru_add_fn(page, lruvec);
        }
        if (lruvec)
 
                 * All pages were isolated from the same lruvec (and isolation
                 * inhibits memcg migration).
                 */
-               VM_BUG_ON_PAGE(mem_cgroup_page_lruvec(page, page_pgdat(page))
-                                                       != lruvec, page);
+               VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page);
                lru = page_lru(page);
                nr_pages = thp_nr_pages(page);
 
        for (i = 0; i < pvec->nr; i++) {
                struct page *page = pvec->pages[i];
                int nr_pages;
-               struct lruvec *new_lruvec;
 
                if (PageTransTail(page))
                        continue;
                if (!TestClearPageLRU(page))
                        continue;
 
-               new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
-               if (lruvec != new_lruvec) {
-                       if (lruvec)
-                               unlock_page_lruvec_irq(lruvec);
-                       lruvec = lock_page_lruvec_irq(page);
-               }
-
+               lruvec = relock_page_lruvec_irq(page, lruvec);
                if (page_evictable(page) && PageUnevictable(page)) {
                        enum lru_list lru = page_lru_base_type(page);