*/
        unsigned long           lowmem_reserve[MAX_NR_ZONES];
 
+       /*
+        * This is a per-zone reserve of pages that should not be
+        * considered dirtyable memory.
+        */
+       unsigned long           dirty_balance_reserve;
+
 #ifdef CONFIG_NUMA
        int node;
        /*
 
 /* linux/mm/page_alloc.c */
 extern unsigned long totalram_pages;
 extern unsigned long totalreserve_pages;
+extern unsigned long dirty_balance_reserve;
 extern unsigned int nr_free_buffer_pages(void);
 extern unsigned int nr_free_pagecache_pages(void);
 
 
                        &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
 
                x += zone_page_state(z, NR_FREE_PAGES) +
-                    zone_reclaimable_pages(z);
+                    zone_reclaimable_pages(z) - z->dirty_balance_reserve;
        }
        /*
         * Make sure that the number of highmem pages is never larger
 {
        unsigned long x;
 
-       x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
+       x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() -
+           dirty_balance_reserve;
 
        if (!vm_highmem_is_dirtyable)
                x -= highmem_dirtyable_memory(x);
 
 
 unsigned long totalram_pages __read_mostly;
 unsigned long totalreserve_pages __read_mostly;
+/*
+ * When calculating the number of globally allowed dirty pages, there
+ * is a certain number of per-zone reserves that should not be
+ * considered dirtyable memory.  This is the sum of those reserves
+ * over all existing zones that contribute dirtyable memory.
+ */
+unsigned long dirty_balance_reserve __read_mostly;
+
 int percpu_pagelist_fraction;
 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 
                        if (max > zone->present_pages)
                                max = zone->present_pages;
                        reserve_pages += max;
+                       /*
+                        * Lowmem reserves are not available to
+                        * GFP_HIGHUSER page cache allocations and
+                        * kswapd tries to balance zones to their high
+                        * watermark.  As a result, neither should be
+                        * regarded as dirtyable memory, to prevent a
+                        * situation where reclaim has to clean pages
+                        * in order to balance the zones.
+                        */
+                       zone->dirty_balance_reserve = max;
                }
        }
+       dirty_balance_reserve = reserve_pages;
        totalreserve_pages = reserve_pages;
 }