struct inode *inode = NILFS_BTNC_I(btnc);
        struct buffer_head *bh;
 
-       bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
+       bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
        if (unlikely(!bh))
                return NULL;
 
        struct page *page;
        int err;
 
-       bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
+       bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
        if (unlikely(!bh))
                return -ENOMEM;
 
 
 
        root = NILFS_I(dir)->i_root;
        ii = NILFS_I(inode);
-       ii->i_state = 1 << NILFS_I_NEW;
+       ii->i_state = BIT(NILFS_I_NEW);
        ii->i_root = root;
 
        err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
 
        inode->i_ino = args->ino;
        if (args->for_gc) {
-               NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
+               NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
                NILFS_I(inode)->i_cno = args->cno;
                NILFS_I(inode)->i_root = NULL;
        } else {
 
 /*
  * Macros to check inode numbers
  */
-#define NILFS_MDT_INO_BITS   \
-       ((unsigned int)(1 << NILFS_DAT_INO | 1 << NILFS_CPFILE_INO |    \
-                       1 << NILFS_SUFILE_INO | 1 << NILFS_IFILE_INO |  \
-                       1 << NILFS_ATIME_INO | 1 << NILFS_SKETCH_INO))
+#define NILFS_MDT_INO_BITS                                             \
+       (BIT(NILFS_DAT_INO) | BIT(NILFS_CPFILE_INO) |                   \
+        BIT(NILFS_SUFILE_INO) | BIT(NILFS_IFILE_INO) |                 \
+        BIT(NILFS_ATIME_INO) | BIT(NILFS_SKETCH_INO))
 
-#define NILFS_SYS_INO_BITS   \
-       ((unsigned int)(1 << NILFS_ROOT_INO) | NILFS_MDT_INO_BITS)
+#define NILFS_SYS_INO_BITS (BIT(NILFS_ROOT_INO) | NILFS_MDT_INO_BITS)
 
 #define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino)
 
 #define NILFS_MDT_INODE(sb, ino) \
-       ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & (1 << (ino))))
+       ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & BIT(ino)))
 #define NILFS_VALID_INODE(sb, ino) \
-       ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & (1 << (ino))))
+       ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & BIT(ino)))
 
 /**
  * struct nilfs_transaction_info: context information for synchronization
 
 #include "mdt.h"
 
 
-#define NILFS_BUFFER_INHERENT_BITS  \
-       ((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \
-        (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Checked))
+#define NILFS_BUFFER_INHERENT_BITS                                     \
+       (BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) |       \
+        BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked))
 
 static struct buffer_head *
 __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
 {
        struct page *page = bh->b_page;
        const unsigned long clear_bits =
-               (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped |
-                1 << BH_Async_Write | 1 << BH_NILFS_Volatile |
-                1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected);
+               (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
+                BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
+                BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
 
        lock_buffer(bh);
        set_mask_bits(&bh->b_state, clear_bits, 0);
        dbh->b_bdev = sbh->b_bdev;
 
        bh = dbh;
-       bits = sbh->b_state & ((1UL << BH_Uptodate) | (1UL << BH_Mapped));
+       bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped));
        while ((bh = bh->b_this_page) != dbh) {
                lock_buffer(bh);
                bits &= bh->b_state;
                unlock_buffer(bh);
        }
-       if (bits & (1UL << BH_Uptodate))
+       if (bits & BIT(BH_Uptodate))
                SetPageUptodate(dpage);
        else
                ClearPageUptodate(dpage);
-       if (bits & (1UL << BH_Mapped))
+       if (bits & BIT(BH_Mapped))
                SetPageMappedToDisk(dpage);
        else
                ClearPageMappedToDisk(dpage);
                create_empty_buffers(dst, sbh->b_size, 0);
 
        if (copy_dirty)
-               mask |= (1UL << BH_Dirty);
+               mask |= BIT(BH_Dirty);
 
        dbh = dbufs = page_buffers(dst);
        do {
        if (page_has_buffers(page)) {
                struct buffer_head *bh, *head;
                const unsigned long clear_bits =
-                       (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped |
-                        1 << BH_Async_Write | 1 << BH_NILFS_Volatile |
-                        1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected);
+                       (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
+                        BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
+                        BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
 
                bh = head = page_buffers(page);
                do {
 
                 */
                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
                                    b_assoc_buffers) {
-                       const unsigned long set_bits = (1 << BH_Uptodate);
+                       const unsigned long set_bits = BIT(BH_Uptodate);
                        const unsigned long clear_bits =
-                               (1 << BH_Dirty | 1 << BH_Async_Write |
-                                1 << BH_Delay | 1 << BH_NILFS_Volatile |
-                                1 << BH_NILFS_Redirected);
+                               (BIT(BH_Dirty) | BIT(BH_Async_Write) |
+                                BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
+                                BIT(BH_NILFS_Redirected));
 
                        set_mask_bits(&bh->b_state, clear_bits, set_bits);
                        if (bh == segbuf->sb_super_root) {
 static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
 {
        spin_lock(&sci->sc_state_lock);
-       if (!(sci->sc_flush_request & (1 << bn))) {
+       if (!(sci->sc_flush_request & BIT(bn))) {
                unsigned long prev_req = sci->sc_flush_request;
 
-               sci->sc_flush_request |= (1 << bn);
+               sci->sc_flush_request |= BIT(bn);
                if (!prev_req)
                        wake_up(&sci->sc_wait_daemon);
        }
 }
 
 #define FLUSH_FILE_BIT (0x1) /* data file only */
-#define FLUSH_DAT_BIT  (1 << NILFS_DAT_INO) /* DAT only */
+#define FLUSH_DAT_BIT  BIT(NILFS_DAT_INO) /* DAT only */
 
 /**
  * nilfs_segctor_accept - record accepted sequence count of log-write requests
 
 
        kaddr = kmap_atomic(su_bh->b_page);
        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
-       if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
+       if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) &&
            su->su_nblocks == cpu_to_le32(0)) {
                kunmap_atomic(kaddr);
                return;
        /* make the segment garbage */
        su->su_lastmod = cpu_to_le64(0);
        su->su_nblocks = cpu_to_le32(0);
-       su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
+       su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY));
        kunmap_atomic(kaddr);
 
        nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
                su2 = su;
                for (j = 0; j < n; j++, su = (void *)su + susz) {
                        if ((le32_to_cpu(su->su_flags) &
-                            ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
+                            ~BIT(NILFS_SEGMENT_USAGE_ERROR)) ||
                            nilfs_segment_is_active(nilfs, segnum + j)) {
                                ret = -EBUSY;
                                kunmap_atomic(kaddr);
                        si->sui_lastmod = le64_to_cpu(su->su_lastmod);
                        si->sui_nblocks = le32_to_cpu(su->su_nblocks);
                        si->sui_flags = le32_to_cpu(su->su_flags) &
-                               ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
+                               ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
                        if (nilfs_segment_is_active(nilfs, segnum + j))
                                si->sui_flags |=
-                                       (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
+                                       BIT(NILFS_SEGMENT_USAGE_ACTIVE);
                }
                kunmap_atomic(kaddr);
                brelse(su_bh);
                         * disk.
                         */
                        sup->sup_sui.sui_flags &=
-                                       ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
+                                       ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
 
                        cleansi = nilfs_suinfo_clean(&sup->sup_sui);
                        cleansu = nilfs_segment_usage_clean(su);