else
                VM_BUG_ON_PAGE(!PageLocked(page), page);
 
-       if (compound && PageTransHuge(page)) {
+       if (likely(!PageCompound(page))) {
+               first = atomic_inc_and_test(&page->_mapcount);
+               nr = first;
+
+       } else if (compound && PageTransHuge(page)) {
                lock_compound_mapcounts(page, &mapcounts);
                first = !mapcounts.compound_mapcount;
                mapcounts.compound_mapcount++;
                                nr = nr_subpages_unmapped(page, nr_pmdmapped);
                }
                unlock_compound_mapcounts(page, &mapcounts);
-
-       } else if (PageCompound(page)) {
+       } else {
                struct page *head = compound_head(page);
 
                lock_compound_mapcounts(head, &mapcounts);
                first = subpage_mapcount_inc(page);
                nr = first && !mapcounts.compound_mapcount;
                unlock_compound_mapcounts(head, &mapcounts);
-
-       } else {
-               first = atomic_inc_and_test(&page->_mapcount);
-               nr = first;
        }
 
        VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
 void page_add_new_anon_rmap(struct page *page,
        struct vm_area_struct *vma, unsigned long address)
 {
-       const bool compound = PageCompound(page);
-       int nr = compound ? thp_nr_pages(page) : 1;
+       int nr;
 
        VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
        __SetPageSwapBacked(page);
-       if (compound) {
+
+       if (likely(!PageCompound(page))) {
+               /* increment count (starts at -1) */
+               atomic_set(&page->_mapcount, 0);
+               nr = 1;
+       } else {
                VM_BUG_ON_PAGE(!PageTransHuge(page), page);
                /* increment count (starts at -1) */
                atomic_set(compound_mapcount_ptr(page), 0);
+               nr = thp_nr_pages(page);
                __mod_lruvec_page_state(page, NR_ANON_THPS, nr);
-       } else {
-               /* increment count (starts at -1) */
-               atomic_set(&page->_mapcount, 0);
        }
+
        __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
        __page_set_anon_rmap(page, vma, address, 1);
 }
        VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
        lock_page_memcg(page);
 
-       if (compound && PageTransHuge(page)) {
+       if (likely(!PageCompound(page))) {
+               first = atomic_inc_and_test(&page->_mapcount);
+               nr = first;
+
+       } else if (compound && PageTransHuge(page)) {
                lock_compound_mapcounts(page, &mapcounts);
                first = !mapcounts.compound_mapcount;
                mapcounts.compound_mapcount++;
                                nr = nr_subpages_unmapped(page, nr_pmdmapped);
                }
                unlock_compound_mapcounts(page, &mapcounts);
-
-       } else if (PageCompound(page)) {
+       } else {
                struct page *head = compound_head(page);
 
                lock_compound_mapcounts(head, &mapcounts);
                first = subpage_mapcount_inc(page);
                nr = first && !mapcounts.compound_mapcount;
                unlock_compound_mapcounts(head, &mapcounts);
-
-       } else {
-               first = atomic_inc_and_test(&page->_mapcount);
-               nr = first;
        }
 
        if (nr_pmdmapped)
        lock_page_memcg(page);
 
        /* page still mapped by someone else? */
-       if (compound && PageTransHuge(page)) {
+       if (likely(!PageCompound(page))) {
+               last = atomic_add_negative(-1, &page->_mapcount);
+               nr = last;
+
+       } else if (compound && PageTransHuge(page)) {
                lock_compound_mapcounts(page, &mapcounts);
                mapcounts.compound_mapcount--;
                last = !mapcounts.compound_mapcount;
                                nr = nr_subpages_unmapped(page, nr_pmdmapped);
                }
                unlock_compound_mapcounts(page, &mapcounts);
-
-       } else if (PageCompound(page)) {
+       } else {
                struct page *head = compound_head(page);
 
                lock_compound_mapcounts(head, &mapcounts);
                last = subpage_mapcount_dec(page);
                nr = last && !mapcounts.compound_mapcount;
                unlock_compound_mapcounts(head, &mapcounts);
-
-       } else {
-               last = atomic_add_negative(-1, &page->_mapcount);
-               nr = last;
        }
 
        if (nr_pmdmapped) {