.writepages     = generic_writepages,
        .releasepage    = blkdev_releasepage,
        .direct_IO      = blkdev_direct_IO,
+       .is_dirty_writeback = buffer_check_dirty_writeback,
 };
 
 const struct file_operations def_blk_fops = {
 
 }
 EXPORT_SYMBOL(unlock_buffer);
 
+/*
+ * Returns if the page has dirty or writeback buffers. If all the buffers
+ * are unlocked and clean then the PageDirty information is stale. If
+ * any of the pages are locked, it is assumed they are locked for IO.
+ */
+void buffer_check_dirty_writeback(struct page *page,
+                                    bool *dirty, bool *writeback)
+{
+       struct buffer_head *head, *bh;
+       *dirty = false;
+       *writeback = false;
+
+       BUG_ON(!PageLocked(page));
+
+       if (!page_has_buffers(page))
+               return;
+
+       if (PageWriteback(page))
+               *writeback = true;
+
+       head = page_buffers(page);
+       bh = head;
+       do {
+               if (buffer_locked(bh))
+                       *writeback = true;
+
+               if (buffer_dirty(bh))
+                       *dirty = true;
+
+               bh = bh->b_this_page;
+       } while (bh != head);
+}
+EXPORT_SYMBOL(buffer_check_dirty_writeback);
+
 /*
  * Block until a buffer comes unlocked.  This doesn't stop it
  * from becoming locked again - you have to lock it yourself
 
        .direct_IO              = ext3_direct_IO,
        .migratepage            = buffer_migrate_page,
        .is_partially_uptodate  = block_is_partially_uptodate,
+       .is_dirty_writeback     = buffer_check_dirty_writeback,
        .error_remove_page      = generic_error_remove_page,
 };
 
 
        })
 #define page_has_buffers(page) PagePrivate(page)
 
+void buffer_check_dirty_writeback(struct page *page,
+                                    bool *dirty, bool *writeback);
+
 /*
  * Declarations
  */
 
        int (*launder_page) (struct page *);
        int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
                                        unsigned long);
+       void (*is_dirty_writeback) (struct page *, bool *, bool *);
        int (*error_remove_page)(struct address_space *, struct page *);
 
        /* swapfile support */
 
 static void page_check_dirty_writeback(struct page *page,
                                       bool *dirty, bool *writeback)
 {
+       struct address_space *mapping;
+
        /*
         * Anonymous pages are not handled by flushers and must be written
         * from reclaim context. Do not stall reclaim based on them
        /* By default assume that the page flags are accurate */
        *dirty = PageDirty(page);
        *writeback = PageWriteback(page);
+
+       /* Verify dirty/writeback state if the filesystem supports it */
+       if (!page_has_private(page))
+               return;
+
+       mapping = page_mapping(page);
+       if (mapping && mapping->a_ops->is_dirty_writeback)
+               mapping->a_ops->is_dirty_writeback(page, dirty, writeback);
 }
 
 /*