void nilfs_palloc_commit_alloc_entry(struct inode *inode,
                                     struct nilfs_palloc_req *req)
 {
-       nilfs_mdt_mark_buffer_dirty(req->pr_bitmap_bh);
-       nilfs_mdt_mark_buffer_dirty(req->pr_desc_bh);
+       mark_buffer_dirty(req->pr_bitmap_bh);
+       mark_buffer_dirty(req->pr_desc_bh);
        nilfs_mdt_mark_dirty(inode);
 
        brelse(req->pr_bitmap_bh);
        kunmap(req->pr_bitmap_bh->b_page);
        kunmap(req->pr_desc_bh->b_page);
 
-       nilfs_mdt_mark_buffer_dirty(req->pr_desc_bh);
-       nilfs_mdt_mark_buffer_dirty(req->pr_bitmap_bh);
+       mark_buffer_dirty(req->pr_desc_bh);
+       mark_buffer_dirty(req->pr_bitmap_bh);
        nilfs_mdt_mark_dirty(inode);
 
        brelse(req->pr_bitmap_bh);
                kunmap(bitmap_bh->b_page);
                kunmap(desc_bh->b_page);
 
-               nilfs_mdt_mark_buffer_dirty(desc_bh);
-               nilfs_mdt_mark_buffer_dirty(bitmap_bh);
+               mark_buffer_dirty(desc_bh);
+               mark_buffer_dirty(bitmap_bh);
                nilfs_mdt_mark_dirty(inode);
 
                brelse(bitmap_bh);
 
                                       "invalid oldkey %lld (newkey=%lld)",
                                       (unsigned long long)oldkey,
                                       (unsigned long long)newkey);
-               nilfs_btnode_mark_dirty(obh);
+               mark_buffer_dirty(obh);
 
                spin_lock_irq(&btnc->tree_lock);
                radix_tree_delete(&btnc->page_tree, oldkey);
                unlock_page(opage);
        } else {
                nilfs_copy_buffer(nbh, obh);
-               nilfs_btnode_mark_dirty(nbh);
+               mark_buffer_dirty(nbh);
 
                nbh->b_blocknr = newkey;
                ctxt->bh = nbh;
 
 void nilfs_btnode_abort_change_key(struct address_space *,
                                   struct nilfs_btnode_chkey_ctxt *);
 
-#define nilfs_btnode_mark_dirty(bh)    nilfs_mark_buffer_dirty(bh)
-
-
 #endif /* _NILFS_BTNODE_H */
 
                                nilfs_btree_get_nonroot_node(path, level),
                                path[level].bp_index, key);
                        if (!buffer_dirty(path[level].bp_bh))
-                               nilfs_btnode_mark_dirty(path[level].bp_bh);
+                               mark_buffer_dirty(path[level].bp_bh);
                } while ((path[level].bp_index == 0) &&
                         (++level < nilfs_btree_height(btree) - 1));
        }
                nilfs_btree_node_insert(node, path[level].bp_index,
                                        *keyp, *ptrp, ncblk);
                if (!buffer_dirty(path[level].bp_bh))
-                       nilfs_btnode_mark_dirty(path[level].bp_bh);
+                       mark_buffer_dirty(path[level].bp_bh);
 
                if (path[level].bp_index == 0)
                        nilfs_btree_promote_key(btree, path, level + 1,
        nilfs_btree_node_move_left(left, node, n, ncblk, ncblk);
 
        if (!buffer_dirty(path[level].bp_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_bh);
+               mark_buffer_dirty(path[level].bp_bh);
        if (!buffer_dirty(path[level].bp_sib_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
+               mark_buffer_dirty(path[level].bp_sib_bh);
 
        nilfs_btree_promote_key(btree, path, level + 1,
                                nilfs_btree_node_get_key(node, 0));
        nilfs_btree_node_move_right(node, right, n, ncblk, ncblk);
 
        if (!buffer_dirty(path[level].bp_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_bh);
+               mark_buffer_dirty(path[level].bp_bh);
        if (!buffer_dirty(path[level].bp_sib_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
+               mark_buffer_dirty(path[level].bp_sib_bh);
 
        path[level + 1].bp_index++;
        nilfs_btree_promote_key(btree, path, level + 1,
        nilfs_btree_node_move_right(node, right, n, ncblk, ncblk);
 
        if (!buffer_dirty(path[level].bp_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_bh);
+               mark_buffer_dirty(path[level].bp_bh);
        if (!buffer_dirty(path[level].bp_sib_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
+               mark_buffer_dirty(path[level].bp_sib_bh);
 
        newkey = nilfs_btree_node_get_key(right, 0);
        newptr = path[level].bp_newreq.bpr_ptr;
        nilfs_btree_node_set_level(root, level + 1);
 
        if (!buffer_dirty(path[level].bp_sib_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
+               mark_buffer_dirty(path[level].bp_sib_bh);
 
        path[level].bp_bh = path[level].bp_sib_bh;
        path[level].bp_sib_bh = NULL;
                nilfs_btree_node_delete(node, path[level].bp_index,
                                        keyp, ptrp, ncblk);
                if (!buffer_dirty(path[level].bp_bh))
-                       nilfs_btnode_mark_dirty(path[level].bp_bh);
+                       mark_buffer_dirty(path[level].bp_bh);
                if (path[level].bp_index == 0)
                        nilfs_btree_promote_key(btree, path, level + 1,
                                nilfs_btree_node_get_key(node, 0));
        nilfs_btree_node_move_right(left, node, n, ncblk, ncblk);
 
        if (!buffer_dirty(path[level].bp_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_bh);
+               mark_buffer_dirty(path[level].bp_bh);
        if (!buffer_dirty(path[level].bp_sib_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
+               mark_buffer_dirty(path[level].bp_sib_bh);
 
        nilfs_btree_promote_key(btree, path, level + 1,
                                nilfs_btree_node_get_key(node, 0));
        nilfs_btree_node_move_left(node, right, n, ncblk, ncblk);
 
        if (!buffer_dirty(path[level].bp_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_bh);
+               mark_buffer_dirty(path[level].bp_bh);
        if (!buffer_dirty(path[level].bp_sib_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
+               mark_buffer_dirty(path[level].bp_sib_bh);
 
        path[level + 1].bp_index++;
        nilfs_btree_promote_key(btree, path, level + 1,
        nilfs_btree_node_move_left(left, node, n, ncblk, ncblk);
 
        if (!buffer_dirty(path[level].bp_sib_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
+               mark_buffer_dirty(path[level].bp_sib_bh);
 
        nilfs_btnode_delete(path[level].bp_bh);
        path[level].bp_bh = path[level].bp_sib_bh;
        nilfs_btree_node_move_left(node, right, n, ncblk, ncblk);
 
        if (!buffer_dirty(path[level].bp_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_bh);
+               mark_buffer_dirty(path[level].bp_bh);
 
        nilfs_btnode_delete(path[level].bp_sib_bh);
        path[level].bp_sib_bh = NULL;
                nilfs_btree_node_init(node, 0, 1, n, ncblk, keys, ptrs);
                nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk);
                if (!buffer_dirty(bh))
-                       nilfs_btnode_mark_dirty(bh);
+                       mark_buffer_dirty(bh);
                if (!nilfs_bmap_dirty(btree))
                        nilfs_bmap_set_dirty(btree);
 
 {
        while ((++level < nilfs_btree_height(btree) - 1) &&
               !buffer_dirty(path[level].bp_bh))
-               nilfs_btnode_mark_dirty(path[level].bp_bh);
+               mark_buffer_dirty(path[level].bp_bh);
 
        return 0;
 }
        }
 
        if (!buffer_dirty(bh))
-               nilfs_btnode_mark_dirty(bh);
+               mark_buffer_dirty(bh);
        brelse(bh);
        if (!nilfs_bmap_dirty(btree))
                nilfs_bmap_set_dirty(btree);
 
                if (!nilfs_cpfile_is_in_first(cpfile, cno))
                        nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh,
                                                                 kaddr, 1);
-               nilfs_mdt_mark_buffer_dirty(cp_bh);
+               mark_buffer_dirty(cp_bh);
 
                kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
                header = nilfs_cpfile_block_get_header(cpfile, header_bh,
                                                       kaddr);
                le64_add_cpu(&header->ch_ncheckpoints, 1);
                kunmap_atomic(kaddr, KM_USER0);
-               nilfs_mdt_mark_buffer_dirty(header_bh);
+               mark_buffer_dirty(header_bh);
                nilfs_mdt_mark_dirty(cpfile);
        }
 
                }
                if (nicps > 0) {
                        tnicps += nicps;
-                       nilfs_mdt_mark_buffer_dirty(cp_bh);
+                       mark_buffer_dirty(cp_bh);
                        nilfs_mdt_mark_dirty(cpfile);
                        if (!nilfs_cpfile_is_in_first(cpfile, cno)) {
                                count =
                header = nilfs_cpfile_block_get_header(cpfile, header_bh,
                                                       kaddr);
                le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
-               nilfs_mdt_mark_buffer_dirty(header_bh);
+               mark_buffer_dirty(header_bh);
                nilfs_mdt_mark_dirty(cpfile);
                kunmap_atomic(kaddr, KM_USER0);
        }
        le64_add_cpu(&header->ch_nsnapshots, 1);
        kunmap_atomic(kaddr, KM_USER0);
 
-       nilfs_mdt_mark_buffer_dirty(prev_bh);
-       nilfs_mdt_mark_buffer_dirty(curr_bh);
-       nilfs_mdt_mark_buffer_dirty(cp_bh);
-       nilfs_mdt_mark_buffer_dirty(header_bh);
+       mark_buffer_dirty(prev_bh);
+       mark_buffer_dirty(curr_bh);
+       mark_buffer_dirty(cp_bh);
+       mark_buffer_dirty(header_bh);
        nilfs_mdt_mark_dirty(cpfile);
 
        brelse(prev_bh);
        le64_add_cpu(&header->ch_nsnapshots, -1);
        kunmap_atomic(kaddr, KM_USER0);
 
-       nilfs_mdt_mark_buffer_dirty(next_bh);
-       nilfs_mdt_mark_buffer_dirty(prev_bh);
-       nilfs_mdt_mark_buffer_dirty(cp_bh);
-       nilfs_mdt_mark_buffer_dirty(header_bh);
+       mark_buffer_dirty(next_bh);
+       mark_buffer_dirty(prev_bh);
+       mark_buffer_dirty(cp_bh);
+       mark_buffer_dirty(header_bh);
        nilfs_mdt_mark_dirty(cpfile);
 
        brelse(prev_bh);
 
 static void nilfs_dat_commit_entry(struct inode *dat,
                                   struct nilfs_palloc_req *req)
 {
-       nilfs_mdt_mark_buffer_dirty(req->pr_entry_bh);
+       mark_buffer_dirty(req->pr_entry_bh);
        nilfs_mdt_mark_dirty(dat);
        brelse(req->pr_entry_bh);
 }
        entry->de_blocknr = cpu_to_le64(blocknr);
        kunmap_atomic(kaddr, KM_USER0);
 
-       nilfs_mdt_mark_buffer_dirty(entry_bh);
+       mark_buffer_dirty(entry_bh);
        nilfs_mdt_mark_dirty(dat);
 
        brelse(entry_bh);
 
        if (buffer_dirty(bh))
                return -EEXIST;
 
-       if (buffer_nilfs_node(bh)) {
-               if (nilfs_btree_broken_node_block(bh)) {
-                       clear_buffer_uptodate(bh);
-                       return -EIO;
-               }
-               nilfs_btnode_mark_dirty(bh);
-       } else {
-               nilfs_mark_buffer_dirty(bh);
+       if (buffer_nilfs_node(bh) && nilfs_btree_broken_node_block(bh)) {
+               clear_buffer_uptodate(bh);
+               return -EIO;
        }
+       mark_buffer_dirty(bh);
        return 0;
 }
 
 
                return ret;
        }
        nilfs_palloc_commit_alloc_entry(ifile, &req);
-       nilfs_mdt_mark_buffer_dirty(req.pr_entry_bh);
+       mark_buffer_dirty(req.pr_entry_bh);
        nilfs_mdt_mark_dirty(ifile);
        *out_ino = (ino_t)req.pr_entry_nr;
        *out_bh = req.pr_entry_bh;
        raw_inode->i_flags = 0;
        kunmap_atomic(kaddr, KM_USER0);
 
-       nilfs_mdt_mark_buffer_dirty(req.pr_entry_bh);
+       mark_buffer_dirty(req.pr_entry_bh);
        brelse(req.pr_entry_bh);
 
        nilfs_palloc_commit_free_entry(ifile, &req);
 
                return err;
        }
        nilfs_update_inode(inode, ibh);
-       nilfs_mdt_mark_buffer_dirty(ibh);
+       mark_buffer_dirty(ibh);
        nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
        brelse(ibh);
        return 0;
 
        kunmap_atomic(kaddr, KM_USER0);
 
        set_buffer_uptodate(bh);
-       nilfs_mark_buffer_dirty(bh);
+       mark_buffer_dirty(bh);
        nilfs_mdt_mark_dirty(inode);
        return 0;
 }
        err = nilfs_mdt_read_block(inode, block, 0, &bh);
        if (unlikely(err))
                return err;
-       nilfs_mark_buffer_dirty(bh);
+       mark_buffer_dirty(bh);
        nilfs_mdt_mark_dirty(inode);
        brelse(bh);
        return 0;
 
 struct buffer_head *nilfs_mdt_get_frozen_buffer(struct inode *inode,
                                                struct buffer_head *bh);
 
-#define nilfs_mdt_mark_buffer_dirty(bh)        nilfs_mark_buffer_dirty(bh)
-
 static inline void nilfs_mdt_mark_dirty(struct inode *inode)
 {
        if (!test_bit(NILFS_I_DIRTY, &NILFS_I(inode)->i_state))
 
        return bh;
 }
 
-/*
- * Since the page cache of B-tree node pages or data page cache of pseudo
- * inodes does not have a valid mapping->host pointer, calling
- * mark_buffer_dirty() for their buffers causes a NULL pointer dereference;
- * it calls __mark_inode_dirty(NULL) through __set_page_dirty().
- * To avoid this problem, the old style mark_buffer_dirty() is used instead.
- */
-void nilfs_mark_buffer_dirty(struct buffer_head *bh)
-{
-       if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
-               __set_page_dirty_nobuffers(bh->b_page);
-}
-
 struct buffer_head *nilfs_grab_buffer(struct inode *inode,
                                      struct address_space *mapping,
                                      unsigned long blkoff,
 
 BUFFER_FNS(NILFS_Redirected, nilfs_redirected) /* redirected to a copy */
 
 
-void nilfs_mark_buffer_dirty(struct buffer_head *bh);
 int __nilfs_clear_page_dirty(struct page *);
 
 struct buffer_head *nilfs_grab_buffer(struct inode *, struct address_space *,
 
                /* The following code is duplicated with cpfile.  But, it is
                   needed to collect the checkpoint even if it was not newly
                   created */
-               nilfs_mdt_mark_buffer_dirty(bh_cp);
+               mark_buffer_dirty(bh_cp);
                nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
                nilfs_cpfile_put_checkpoint(
                        nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
                                              "failed to get inode block.\n");
                                return err;
                        }
-                       nilfs_mdt_mark_buffer_dirty(ibh);
+                       mark_buffer_dirty(ibh);
                        nilfs_mdt_mark_dirty(ifile);
                        spin_lock(&nilfs->ns_inode_lock);
                        if (likely(!ii->i_bh))
 
        le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
        kunmap_atomic(kaddr, KM_USER0);
 
-       nilfs_mdt_mark_buffer_dirty(header_bh);
+       mark_buffer_dirty(header_bh);
 }
 
 /**
                        kunmap_atomic(kaddr, KM_USER0);
 
                        sui->ncleansegs--;
-                       nilfs_mdt_mark_buffer_dirty(header_bh);
-                       nilfs_mdt_mark_buffer_dirty(su_bh);
+                       mark_buffer_dirty(header_bh);
+                       mark_buffer_dirty(su_bh);
                        nilfs_mdt_mark_dirty(sufile);
                        brelse(su_bh);
                        *segnump = segnum;
        nilfs_sufile_mod_counter(header_bh, -1, 1);
        NILFS_SUI(sufile)->ncleansegs--;
 
-       nilfs_mdt_mark_buffer_dirty(su_bh);
+       mark_buffer_dirty(su_bh);
        nilfs_mdt_mark_dirty(sufile);
 }
 
        nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
        NILFS_SUI(sufile)->ncleansegs -= clean;
 
-       nilfs_mdt_mark_buffer_dirty(su_bh);
+       mark_buffer_dirty(su_bh);
        nilfs_mdt_mark_dirty(sufile);
 }
 
        sudirty = nilfs_segment_usage_dirty(su);
        nilfs_segment_usage_set_clean(su);
        kunmap_atomic(kaddr, KM_USER0);
-       nilfs_mdt_mark_buffer_dirty(su_bh);
+       mark_buffer_dirty(su_bh);
 
        nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
        NILFS_SUI(sufile)->ncleansegs++;
 
        ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
        if (!ret) {
-               nilfs_mdt_mark_buffer_dirty(bh);
+               mark_buffer_dirty(bh);
                nilfs_mdt_mark_dirty(sufile);
                brelse(bh);
        }
        su->su_nblocks = cpu_to_le32(nblocks);
        kunmap_atomic(kaddr, KM_USER0);
 
-       nilfs_mdt_mark_buffer_dirty(bh);
+       mark_buffer_dirty(bh);
        nilfs_mdt_mark_dirty(sufile);
        brelse(bh);
 
                nilfs_sufile_mod_counter(header_bh, -1, 0);
                NILFS_SUI(sufile)->ncleansegs--;
        }
-       nilfs_mdt_mark_buffer_dirty(su_bh);
+       mark_buffer_dirty(su_bh);
        nilfs_mdt_mark_dirty(sufile);
 }
 
                }
                kunmap_atomic(kaddr, KM_USER0);
                if (nc > 0) {
-                       nilfs_mdt_mark_buffer_dirty(su_bh);
+                       mark_buffer_dirty(su_bh);
                        ncleaned += nc;
                }
                brelse(su_bh);
        header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
        kunmap_atomic(kaddr, KM_USER0);
 
-       nilfs_mdt_mark_buffer_dirty(header_bh);
+       mark_buffer_dirty(header_bh);
        nilfs_mdt_mark_dirty(sufile);
        nilfs_set_nsegments(nilfs, newnsegs);