Implement pud_free_pmd_page() and pmd_free_pte_page() on x86, which
clear a given pud/pmd entry and free up lower level page table(s).
The address range associated with the pud/pmd entry must have been
purged by INVLPG.
Link: http://lkml.kernel.org/r/20180314180155.19492-3-toshi.kani@hpe.com
Fixes: e61ce6ade404e ("mm: change ioremap to set up huge I/O mappings")
Signed-off-by: Toshi Kani <toshi.kani@hpe.com>
Reported-by: Lei Li <lious.lilei@hisilicon.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  */
 int pud_free_pmd_page(pud_t *pud)
 {
-       return pud_none(*pud);
+       pmd_t *pmd;
+       int i;
+
+       if (pud_none(*pud))
+               return 1;
+
+       pmd = (pmd_t *)pud_page_vaddr(*pud);
+
+       for (i = 0; i < PTRS_PER_PMD; i++)
+               if (!pmd_free_pte_page(&pmd[i]))
+                       return 0;
+
+       pud_clear(pud);
+       free_page((unsigned long)pmd);
+
+       return 1;
 }
 
 /**
  */
 int pmd_free_pte_page(pmd_t *pmd)
 {
-       return pmd_none(*pmd);
+       pte_t *pte;
+
+       if (pmd_none(*pmd))
+               return 1;
+
+       pte = (pte_t *)pmd_page_vaddr(*pmd);
+       pmd_clear(pmd);
+       free_page((unsigned long)pte);
+
+       return 1;
 }
 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */