The comment in commit 
4fc3f1d66b1e ("mm/rmap, migration: Make
rmap_walk_anon() and try_to_unmap_anon() more scalable") says:
| Rename anon_vma_[un]lock() => anon_vma_[un]lock_write(),
| to make it clearer that it's an exclusive write-lock in
| that case - suggested by Rik van Riel.
But that commit renames only anon_vma_lock()
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
 
        do {                                                            \
                pmd_t *____pmd = (__pmd);                               \
                anon_vma_lock_write(__anon_vma);                        \
-               anon_vma_unlock(__anon_vma);                            \
+               anon_vma_unlock_write(__anon_vma);                      \
                BUG_ON(pmd_trans_splitting(*____pmd) ||                 \
                       pmd_trans_huge(*____pmd));                       \
        } while (0)
 
        down_write(&anon_vma->root->rwsem);
 }
 
-static inline void anon_vma_unlock(struct anon_vma *anon_vma)
+static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
 {
        up_write(&anon_vma->root->rwsem);
 }
 
 
        BUG_ON(PageCompound(page));
 out_unlock:
-       anon_vma_unlock(anon_vma);
+       anon_vma_unlock_write(anon_vma);
        put_anon_vma(anon_vma);
 out:
        return ret;
                BUG_ON(!pmd_none(*pmd));
                set_pmd_at(mm, address, pmd, _pmd);
                spin_unlock(&mm->page_table_lock);
-               anon_vma_unlock(vma->anon_vma);
+               anon_vma_unlock_write(vma->anon_vma);
                goto out;
        }
 
         * All pages are isolated and locked so anon_vma rmap
         * can't run anymore.
         */
-       anon_vma_unlock(vma->anon_vma);
+       anon_vma_unlock_write(vma->anon_vma);
 
        __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
        pte_unmap(pte);
 
                anon_vma_interval_tree_post_update_vma(vma);
                if (adjust_next)
                        anon_vma_interval_tree_post_update_vma(next);
-               anon_vma_unlock(anon_vma);
+               anon_vma_unlock_write(anon_vma);
        }
        if (mapping)
                mutex_unlock(&mapping->i_mmap_mutex);
                if (!__test_and_clear_bit(0, (unsigned long *)
                                          &anon_vma->root->rb_root.rb_node))
                        BUG();
-               anon_vma_unlock(anon_vma);
+               anon_vma_unlock_write(anon_vma);
        }
 }
 
 
        pte_unmap(new_pte - 1);
        pte_unmap_unlock(old_pte - 1, old_ptl);
        if (anon_vma)
-               anon_vma_unlock(anon_vma);
+               anon_vma_unlock_write(anon_vma);
        if (mapping)
                mutex_unlock(&mapping->i_mmap_mutex);
 }
 
         */
        if (rwsem_is_locked(&anon_vma->root->rwsem)) {
                anon_vma_lock_write(anon_vma);
-               anon_vma_unlock(anon_vma);
+               anon_vma_unlock_write(anon_vma);
        }
 
        kmem_cache_free(anon_vma_cachep, anon_vma);
                        avc = NULL;
                }
                spin_unlock(&mm->page_table_lock);
-               anon_vma_unlock(anon_vma);
+               anon_vma_unlock_write(anon_vma);
 
                if (unlikely(allocated))
                        put_anon_vma(allocated);
        vma->anon_vma = anon_vma;
        anon_vma_lock_write(anon_vma);
        anon_vma_chain_link(vma, avc, anon_vma);
-       anon_vma_unlock(anon_vma);
+       anon_vma_unlock_write(anon_vma);
 
        return 0;