]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
xen/mmu: The xen_setup_kernel_pagetable doesn't need to return anything.
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Fri, 29 Jun 2012 02:47:35 +0000 (22:47 -0400)
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tue, 31 Jul 2012 15:01:38 +0000 (11:01 -0400)
We don't need to return the new PGD - as we do not use it.

Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
(cherry picked from commit a573e36a3641f268ee6215a7d7cf74610ca5e81a)

Conflicts:

arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c

arch/x86/xen/enlighten.c
arch/x86/xen/mmu.c
arch/x86/xen/xen-ops.h

index 9a5abca55686e383f1c455caf4b2f6c3774aafa9..02a42979d54b306acf936a19197dc4b9a6f9f855 100644 (file)
@@ -1231,7 +1231,6 @@ asmlinkage void __init xen_start_kernel(void)
 {
        struct physdev_set_iopl set_iopl;
        int rc;
-       pgd_t *pgd;
 
        if (!xen_start_info)
                return;
@@ -1321,8 +1320,6 @@ asmlinkage void __init xen_start_kernel(void)
        acpi_numa = -1;
 #endif
 
-       pgd = (pgd_t *)xen_start_info->pt_base;
-
        if (!xen_initial_domain())
                __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
 
@@ -1337,7 +1334,7 @@ asmlinkage void __init xen_start_kernel(void)
        memblock_init();
 
        xen_raw_console_write("mapping kernel into physical memory\n");
-       pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
+       xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_start_info->nr_pages);
 
        xen_reserve_internals();
        /* Allocate and initialize top and mid mfn levels for p2m structure */
index 48fbf63851d63b0c3d21a0d2eb5ebaea932c37ed..72b587c0999f5e3a5dc34aa2d2af18d5303dda34 100644 (file)
@@ -1746,8 +1746,7 @@ static void convert_pfn_mfn(void *v)
  * of the physical mapping once some sort of allocator has been set
  * up.
  */
-pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
-                                        unsigned long max_pfn)
+void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
 {
        pud_t *l3;
        pmd_t *l2;
@@ -1810,8 +1809,6 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
                      __pa(xen_start_info->pt_base +
                           xen_start_info->nr_pt_frames * PAGE_SIZE),
                      "XEN PAGETABLES");
-
-       return pgd;
 }
 #else  /* !CONFIG_X86_64 */
 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
@@ -1854,8 +1851,7 @@ static void __init xen_write_cr3_init(unsigned long cr3)
        pv_mmu_ops.write_cr3 = &xen_write_cr3;
 }
 
-pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
-                                        unsigned long max_pfn)
+void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
 {
        pmd_t *kernel_pmd;
 
@@ -1889,8 +1885,6 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
                      __pa(xen_start_info->pt_base +
                           xen_start_info->nr_pt_frames * PAGE_SIZE),
                      "XEN PAGETABLES");
-
-       return initial_page_table;
 }
 #endif /* CONFIG_X86_64 */
 
index ec0649fa0c7ab92c0dc840fc41ea6899f1f11304..9dca58f2f1932060dae32333b33571a43ffe1a0d 100644 (file)
@@ -27,7 +27,7 @@ void xen_setup_mfn_list_list(void);
 void xen_setup_shared_info(void);
 void xen_build_mfn_list_list(void);
 void xen_setup_machphys_mapping(void);
-pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
+void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
 void xen_reserve_top(void);
 extern unsigned long xen_max_p2m_pfn;