*/
 static int me_kernel(struct page *p, unsigned long pfn)
 {
+       unlock_page(p);
        return MF_IGNORED;
 }
 
 static int me_unknown(struct page *p, unsigned long pfn)
 {
        pr_err("Memory failure: %#lx: Unknown page state\n", pfn);
+       unlock_page(p);
        return MF_FAILED;
 }
 
  */
 static int me_pagecache_clean(struct page *p, unsigned long pfn)
 {
+       int ret;
        struct address_space *mapping;
 
        delete_from_lru_cache(p);
         * For anonymous pages we're done the only reference left
         * should be the one m_f() holds.
         */
-       if (PageAnon(p))
-               return MF_RECOVERED;
+       if (PageAnon(p)) {
+               ret = MF_RECOVERED;
+               goto out;
+       }
 
        /*
         * Now truncate the page in the page cache. This is really
                /*
                 * Page has been teared down in the meanwhile
                 */
-               return MF_FAILED;
+               ret = MF_FAILED;
+               goto out;
        }
 
        /*
         *
         * Open: to take i_mutex or not for this? Right now we don't.
         */
-       return truncate_error_page(p, pfn, mapping);
+       ret = truncate_error_page(p, pfn, mapping);
+out:
+       unlock_page(p);
+       return ret;
 }
 
 /*
  */
 static int me_swapcache_dirty(struct page *p, unsigned long pfn)
 {
+       int ret;
+
        ClearPageDirty(p);
        /* Trigger EIO in shmem: */
        ClearPageUptodate(p);
 
-       if (!delete_from_lru_cache(p))
-               return MF_DELAYED;
-       else
-               return MF_FAILED;
+       ret = delete_from_lru_cache(p) ? MF_FAILED : MF_DELAYED;
+       unlock_page(p);
+       return ret;
 }
 
 static int me_swapcache_clean(struct page *p, unsigned long pfn)
 {
+       int ret;
+
        delete_from_swap_cache(p);
 
-       if (!delete_from_lru_cache(p))
-               return MF_RECOVERED;
-       else
-               return MF_FAILED;
+       ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
+       unlock_page(p);
+       return ret;
 }
 
 /*
        mapping = page_mapping(hpage);
        if (mapping) {
                res = truncate_error_page(hpage, pfn, mapping);
+               unlock_page(hpage);
        } else {
                res = MF_FAILED;
                unlock_page(hpage);
                        page_ref_inc(p);
                        res = MF_RECOVERED;
                }
-               lock_page(hpage);
        }
 
        return res;
        unsigned long mask;
        unsigned long res;
        enum mf_action_page_type type;
+
+       /* Callback ->action() has to unlock the relevant page inside it. */
        int (*action)(struct page *p, unsigned long pfn);
 } error_states[] = {
        { reserved,     reserved,       MF_MSG_KERNEL,  me_kernel },
        int result;
        int count;
 
+       /* page p should be unlocked after returning from ps->action().  */
        result = ps->action(p, pfn);
 
        count = page_count(p) - 1;
                goto out;
        }
 
-       res = identify_page_state(pfn, p, page_flags);
+       return identify_page_state(pfn, p, page_flags);
 out:
        unlock_page(head);
        return res;
 
 identify_page_state:
        res = identify_page_state(pfn, p, page_flags);
+       mutex_unlock(&mf_mutex);
+       return res;
 unlock_page:
        unlock_page(p);
 unlock_mutex: