* __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
                 * so shmem can relocate pages during swapin if required.
                 */
-               BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
+               BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
                                (page_to_pfn(p) >= 0x00100000UL));
        }
 
 
         * Fail silently without starting the shrinker
         */
        mapping = file_inode(obj->base.filp)->i_mapping;
-       gfp = mapping_gfp_mask(mapping);
+       gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
        gfp |= __GFP_NORETRY | __GFP_NOWARN;
-       gfp &= ~(__GFP_IO | __GFP_RECLAIM);
        sg = st->sgl;
        st->nents = 0;
        for (i = 0; i < page_count; i++) {
 
                        goto next;
                }
 
-               page = __page_cache_alloc(mapping_gfp_mask(mapping) &
-                                                               ~__GFP_FS);
+               page = __page_cache_alloc(mapping_gfp_constraint(mapping,
+                                                                ~__GFP_FS));
                if (!page)
                        break;
 
-               if (add_to_page_cache_lru(page, mapping, pg_index,
-                                                               GFP_NOFS)) {
+               if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
                        page_cache_release(page);
                        goto next;
                }
 
 
 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
 {
-       return mapping_gfp_mask(mapping) & ~__GFP_FS;
+       return mapping_gfp_constraint(mapping, ~__GFP_FS);
 }
 
 /* extent-tree.c */
 
        }
 
        mapping_set_gfp_mask(inode->i_mapping,
-                       mapping_gfp_mask(inode->i_mapping) &
-                       ~(__GFP_FS | __GFP_HIGHMEM));
+                       mapping_gfp_constraint(inode->i_mapping,
+                       ~(__GFP_FS | __GFP_HIGHMEM)));
 
        return inode;
 }
 
        int ret = 0;            /* Will call free_more_memory() */
        gfp_t gfp_mask;
 
-       gfp_mask = (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS) | gfp;
+       gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
 
        /*
         * XXX: __getblk_slow() can not really deal with failure and
 
                int ret1;
                struct address_space *mapping = inode->i_mapping;
                struct page *page = find_or_create_page(mapping, 0,
-                                               mapping_gfp_mask(mapping) &
-                                               ~__GFP_FS);
+                                               mapping_gfp_constraint(mapping,
+                                               ~__GFP_FS));
                if (!page) {
                        ret = VM_FAULT_OOM;
                        goto out;
                if (i_size_read(inode) == 0)
                        return;
                page = find_or_create_page(mapping, 0,
-                                          mapping_gfp_mask(mapping) & ~__GFP_FS);
+                                          mapping_gfp_constraint(mapping,
+                                          ~__GFP_FS));
                if (!page)
                        return;
                if (PageUptodate(page)) {
 
        struct page *page, *tpage;
        unsigned int expected_index;
        int rc;
-       gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
+       gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
 
        INIT_LIST_HEAD(tmplist);
 
 
        int err = 0;
 
        page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
-                                  mapping_gfp_mask(mapping) & ~__GFP_FS);
+                                  mapping_gfp_constraint(mapping, ~__GFP_FS));
        if (!page)
                return -ENOMEM;
 
 
                        page = list_entry(pages->prev, struct page, lru);
                        list_del(&page->lru);
                        if (add_to_page_cache_lru(page, mapping, page->index,
-                                       GFP_KERNEL & mapping_gfp_mask(mapping)))
+                                 mapping_gfp_constraint(mapping, GFP_KERNEL)))
                                goto next_page;
                }
 
 
        filler_t *filler = super->s_devops->readpage;
        struct page *page;
 
-       BUG_ON(mapping_gfp_mask(mapping) & __GFP_FS);
+       BUG_ON(mapping_gfp_constraint(mapping, __GFP_FS));
        if (use_filler)
                page = read_cache_page(mapping, index, filler, sb);
        else {
 
        sector_t last_block_in_bio = 0;
        struct buffer_head map_bh;
        unsigned long first_logical_block = 0;
-       gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);
+       gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
 
        map_bh.b_state = 0;
        map_bh.b_size = 0;
        sector_t last_block_in_bio = 0;
        struct buffer_head map_bh;
        unsigned long first_logical_block = 0;
-       gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(page->mapping);
+       gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
 
        map_bh.b_state = 0;
        map_bh.b_size = 0;
 
 int page_symlink(struct inode *inode, const char *symname, int len)
 {
        return __page_symlink(inode, symname, len,
-                       !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS));
+                       !mapping_gfp_constraint(inode->i_mapping, __GFP_FS));
 }
 EXPORT_SYMBOL(page_symlink);
 
 
                goto failed;
 
        mapping_set_gfp_mask(inode->i_mapping,
-                            mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
+                          mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
 
        root = NILFS_I(dir)->i_root;
        ii = NILFS_I(inode);
        up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
        nilfs_set_inode_flags(inode);
        mapping_set_gfp_mask(inode->i_mapping,
-                            mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
+                          mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
        return 0;
 
  failed_unmap:
 
                                }
                        }
                        err = add_to_page_cache_lru(*cached_page, mapping,
-                                       index,
-                                       GFP_KERNEL & mapping_gfp_mask(mapping));
+                                  index,
+                                  mapping_gfp_constraint(mapping, GFP_KERNEL));
                        if (unlikely(err)) {
                                if (err == -EEXIST)
                                        continue;
 
                                break;
 
                        error = add_to_page_cache_lru(page, mapping, index,
-                                       GFP_KERNEL & mapping_gfp_mask(mapping));
+                                  mapping_gfp_constraint(mapping, GFP_KERNEL));
                        if (unlikely(error)) {
                                page_cache_release(page);
                                if (error == -EEXIST)
 
        return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
 }
 
+/* Restricts the given gfp_mask to what the mapping allows. */
+static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
+               gfp_t gfp_mask)
+{
+       return mapping_gfp_mask(mapping) & gfp_mask;
+}
+
 /*
  * This is non-atomic.  Only to be used before the mapping is activated.
  * Probably needs a barrier...
 
                        goto out;
                }
                error = add_to_page_cache_lru(page, mapping, index,
-                                       GFP_KERNEL & mapping_gfp_mask(mapping));
+                               mapping_gfp_constraint(mapping, GFP_KERNEL));
                if (error) {
                        page_cache_release(page);
                        if (error == -EEXIST) {
                        return -ENOMEM;
 
                ret = add_to_page_cache_lru(page, mapping, offset,
-                               GFP_KERNEL & mapping_gfp_mask(mapping));
+                               mapping_gfp_constraint(mapping, GFP_KERNEL));
                if (ret == 0)
                        ret = mapping->a_ops->readpage(file, page);
                else if (ret == -EEXIST)
 
                page = list_to_page(pages);
                list_del(&page->lru);
                if (add_to_page_cache_lru(page, mapping, page->index,
-                               GFP_KERNEL & mapping_gfp_mask(mapping))) {
+                               mapping_gfp_constraint(mapping, GFP_KERNEL))) {
                        read_cache_pages_invalidate_page(mapping, page);
                        continue;
                }
                struct page *page = list_to_page(pages);
                list_del(&page->lru);
                if (!add_to_page_cache_lru(page, mapping, page->index,
-                               GFP_KERNEL & mapping_gfp_mask(mapping))) {
+                               mapping_gfp_constraint(mapping, GFP_KERNEL))) {
                        mapping->a_ops->readpage(filp, page);
                }
                page_cache_release(page);