replace all:
little_endian_variable = cpu_to_leX(leX_to_cpu(little_endian_variable) +
					expression_in_cpu_byteorder);
with:
	leX_add_cpu(&little_endian_variable, expression_in_cpu_byteorder);
generated with semantic patch
Signed-off-by: Marcin Slusarz <marcin.slusarz@gmail.com>
Cc: Jeff Mahoney <jeffm@suse.com>
Cc: Chris Mason <chris.mason@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
                if (objectid_to_release == le32_to_cpu(map[i])) {
                        /* This incrementation unallocates the objectid. */
                        //map[i]++;
-                       map[i] = cpu_to_le32(le32_to_cpu(map[i]) + 1);
+                       le32_add_cpu(&map[i], 1);
 
                        /* Did we unallocate the last member of an odd sequence, and can shrink oids? */
                        if (map[i] == map[i + 1]) {
                        /* size of objectid map is not changed */
                        if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) {
                                //objectid_map[i+1]--;
-                               map[i + 1] =
-                                   cpu_to_le32(le32_to_cpu(map[i + 1]) - 1);
+                               le32_add_cpu(&map[i + 1], -1);
                                return;
                        }
 
 
 
                inode_generation =
                    &REISERFS_SB(th->t_super)->s_rs->s_inode_generation;
-               *inode_generation =
-                   cpu_to_le32(le32_to_cpu(*inode_generation) + 1);
+               le32_add_cpu(inode_generation, 1);
        }
 /* USE_INODE_GENERATION_COUNTER */
 #endif