if (get_pages(sbi, F2FS_DIRTY_NODES)) {
                up_write(&sbi->node_write);
+               atomic_inc(&sbi->wb_sync_req[NODE]);
                err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
+               atomic_dec(&sbi->wb_sync_req[NODE]);
                if (err) {
                        up_write(&sbi->node_change);
                        f2fs_unlock_all(sbi);
 
        int ret = 0;
        int done = 0;
        struct pagevec pvec;
+       struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
        int nr_pages;
        pgoff_t uninitialized_var(writeback_index);
        pgoff_t index;
                        bool submitted = false;
 
                        /* give a priority to WB_SYNC threads */
-                       if (atomic_read(&F2FS_M_SB(mapping)->wb_sync_req) &&
+                       if (atomic_read(&sbi->wb_sync_req[DATA]) &&
                                        wbc->sync_mode == WB_SYNC_NONE) {
                                done = 1;
                                break;
 
        /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
        if (wbc->sync_mode == WB_SYNC_ALL)
-               atomic_inc(&sbi->wb_sync_req);
-       else if (atomic_read(&sbi->wb_sync_req))
+               atomic_inc(&sbi->wb_sync_req[DATA]);
+       else if (atomic_read(&sbi->wb_sync_req[DATA]))
                goto skip_write;
 
        blk_start_plug(&plug);
        blk_finish_plug(&plug);
 
        if (wbc->sync_mode == WB_SYNC_ALL)
-               atomic_dec(&sbi->wb_sync_req);
+               atomic_dec(&sbi->wb_sync_req[DATA]);
        /*
         * if some pages were truncated, we cannot guarantee its mapping->host
         * to detect pending bios.
 
        struct percpu_counter alloc_valid_block_count;
 
        /* writeback control */
-       atomic_t wb_sync_req;                   /* count # of WB_SYNC threads */
+       atomic_t wb_sync_req[META];     /* count # of WB_SYNC threads */
 
        /* valid inode count */
        struct percpu_counter total_valid_inode_count;
 
                goto out;
        }
 sync_nodes:
+       atomic_inc(&sbi->wb_sync_req[NODE]);
        ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic);
+       atomic_dec(&sbi->wb_sync_req[NODE]);
        if (ret)
                goto out;
 
 
        block_t start_addr;
        int off;
        int phase = 0;
+       bool fggc = (gc_type == FG_GC);
 
        start_addr = START_BLOCK(sbi, segno);
 
 next_step:
        entry = sum;
 
+       if (fggc && phase == 2)
+               atomic_inc(&sbi->wb_sync_req[NODE]);
+
        for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
                nid_t nid = le32_to_cpu(entry->nid);
                struct page *node_page;
 
        if (++phase < 3)
                goto next_step;
+
+       if (fggc)
+               atomic_dec(&sbi->wb_sync_req[NODE]);
 }
 
 /*
 
        int step = 0;
        int nwritten = 0;
        int ret = 0;
-       int nr_pages;
+       int nr_pages, done = 0;
 
        pagevec_init(&pvec);
 
 next_step:
        index = 0;
 
-       while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
-                               PAGECACHE_TAG_DIRTY))) {
+       while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
+                       NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
                int i;
 
                for (i = 0; i < nr_pages; i++) {
                        struct page *page = pvec.pages[i];
                        bool submitted = false;
 
+                       /* give a priority to WB_SYNC threads */
+                       if (atomic_read(&sbi->wb_sync_req[NODE]) &&
+                                       wbc->sync_mode == WB_SYNC_NONE) {
+                               done = 1;
+                               break;
+                       }
+
                        /*
                         * flushing sequence with step:
                         * 0. indirect nodes
        if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
                goto skip_write;
 
+       if (wbc->sync_mode == WB_SYNC_ALL)
+               atomic_inc(&sbi->wb_sync_req[NODE]);
+       else if (atomic_read(&sbi->wb_sync_req[NODE]))
+               goto skip_write;
+
        trace_f2fs_writepages(mapping->host, wbc, NODE);
 
        diff = nr_pages_to_write(sbi, NODE, wbc);
        f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
        blk_finish_plug(&plug);
        wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
+
+       if (wbc->sync_mode == WB_SYNC_ALL)
+               atomic_dec(&sbi->wb_sync_req[NODE]);
        return 0;
 
 skip_write:
 
        for (i = 0; i < NR_COUNT_TYPE; i++)
                atomic_set(&sbi->nr_pages[i], 0);
 
-       atomic_set(&sbi->wb_sync_req, 0);
+       for (i = 0; i < META; i++)
+               atomic_set(&sbi->wb_sync_req[i], 0);
 
        INIT_LIST_HEAD(&sbi->s_list);
        mutex_init(&sbi->umount_mutex);