]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
xen/balloon: Only mark a page as managed when it is released
authorRoss Lagerwall <ross.lagerwall@citrix.com>
Fri, 9 Dec 2016 17:10:22 +0000 (17:10 +0000)
committerBoris Ostrovsky <boris.ostrovsky@oracle.com>
Fri, 3 Feb 2017 20:55:31 +0000 (15:55 -0500)
Only mark a page as managed when it is released back to the allocator.
This ensures that the managed page count does not get falsely increased
when a VM is running. Correspondingly change it so that pages are
marked as unmanaged after getting them from the allocator.

Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
OraBug: 25497392

(cherry picked from commit 709613ad2b3c9eaeb2a3e24284b7c8feffc17326)
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
drivers/xen/balloon.c

index cfab1d24e4bccd8ec12165b81d53d656f9fba3e2..7edb76c9c09f5d6f57a4560599a8406c29f6e4e7 100644 (file)
@@ -181,7 +181,6 @@ static void __balloon_append(struct page *page)
 static void balloon_append(struct page *page)
 {
        __balloon_append(page);
-       adjust_managed_page_count(page, -1);
 }
 
 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
@@ -202,8 +201,6 @@ static struct page *balloon_retrieve(bool require_lowmem)
        else
                balloon_stats.balloon_low--;
 
-       adjust_managed_page_count(page, 1);
-
        return page;
 }
 
@@ -470,7 +467,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
 #endif
 
                /* Relinquish the page back to the allocator. */
-               __free_reserved_page(page);
+               free_reserved_page(page);
        }
 
        balloon_stats.current_pages += rc;
@@ -501,6 +498,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
                        state = BP_EAGAIN;
                        break;
                }
+               adjust_managed_page_count(page, -1);
                scrub_page(page);
                list_add(&page->lru, &pages);
        }