ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
 }
 
-static int f2fs_write_meta_page(struct page *page,
-                               struct writeback_control *wbc)
+static int __f2fs_write_meta_page(struct page *page,
+                               struct writeback_control *wbc,
+                               enum iostat_type io_type)
 {
        struct f2fs_sb_info *sbi = F2FS_P_SB(page);
 
        if (unlikely(f2fs_cp_error(sbi)))
                goto redirty_out;
 
-       write_meta_page(sbi, page);
+       write_meta_page(sbi, page, io_type);
        dec_page_count(sbi, F2FS_DIRTY_META);
 
        if (wbc->for_reclaim)
        return AOP_WRITEPAGE_ACTIVATE;
 }
 
+static int f2fs_write_meta_page(struct page *page,
+                               struct writeback_control *wbc)
+{
+       return __f2fs_write_meta_page(page, wbc, FS_META_IO);
+}
+
 static int f2fs_write_meta_pages(struct address_space *mapping,
                                struct writeback_control *wbc)
 {
 
        trace_f2fs_writepages(mapping->host, wbc, META);
        diff = nr_pages_to_write(sbi, META, wbc);
-       written = sync_meta_pages(sbi, META, wbc->nr_to_write);
+       written = sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
        mutex_unlock(&sbi->cp_mutex);
        wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
        return 0;
 }
 
 long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
-                                               long nr_to_write)
+                               long nr_to_write, enum iostat_type io_type)
 {
        struct address_space *mapping = META_MAPPING(sbi);
        pgoff_t index = 0, end = ULONG_MAX, prev = ULONG_MAX;
                        if (!clear_page_dirty_for_io(page))
                                goto continue_unlock;
 
-                       if (mapping->a_ops->writepage(page, &wbc)) {
+                       if (__f2fs_write_meta_page(page, &wbc, io_type)) {
                                unlock_page(page);
                                break;
                        }
        if (inode) {
                unsigned long cur_ino = inode->i_ino;
 
+               if (is_dir)
+                       F2FS_I(inode)->cp_task = current;
+
                filemap_fdatawrite(inode->i_mapping);
+
+               if (is_dir)
+                       F2FS_I(inode)->cp_task = NULL;
+
                iput(inode);
                /* We need to give cpu to another writers. */
                if (ino == cur_ino) {
 
        if (get_pages(sbi, F2FS_DIRTY_NODES)) {
                up_write(&sbi->node_write);
-               err = sync_node_pages(sbi, &wbc, false);
+               err = sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
                if (err) {
                        up_write(&sbi->node_change);
                        f2fs_unlock_all(sbi);
 
        /* Flush all the NAT/SIT pages */
        while (get_pages(sbi, F2FS_DIRTY_META)) {
-               sync_meta_pages(sbi, META, LONG_MAX);
+               sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
                if (unlikely(f2fs_cp_error(sbi)))
                        return -EIO;
        }
 
                /* Flush all the NAT BITS pages */
                while (get_pages(sbi, F2FS_DIRTY_META)) {
-                       sync_meta_pages(sbi, META, LONG_MAX);
+                       sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
                        if (unlikely(f2fs_cp_error(sbi)))
                                return -EIO;
                }
        percpu_counter_set(&sbi->alloc_valid_block_count, 0);
 
        /* Here, we only have one bio having CP pack */
-       sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
+       sync_meta_pages(sbi, META_FLUSH, LONG_MAX, FS_CP_META_IO);
 
        /* wait for previous submitted meta pages writeback */
        wait_on_all_pages_writeback(sbi);
 
 }
 
 static int __write_data_page(struct page *page, bool *submitted,
-                               struct writeback_control *wbc)
+                               struct writeback_control *wbc,
+                               enum iostat_type io_type)
 {
        struct inode *inode = page->mapping->host;
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
                .encrypted_page = NULL,
                .submitted = false,
                .need_lock = LOCK_RETRY,
+               .io_type = io_type,
        };
 
        trace_f2fs_writepage(page, DATA);
 static int f2fs_write_data_page(struct page *page,
                                        struct writeback_control *wbc)
 {
-       return __write_data_page(page, NULL, wbc);
+       return __write_data_page(page, NULL, wbc, FS_DATA_IO);
 }
 
 /*
  * warm/hot data page.
  */
 static int f2fs_write_cache_pages(struct address_space *mapping,
-                                       struct writeback_control *wbc)
+                                       struct writeback_control *wbc,
+                                       enum iostat_type io_type)
 {
        int ret = 0;
        int done = 0;
                        if (!clear_page_dirty_for_io(page))
                                goto continue_unlock;
 
-                       ret = __write_data_page(page, &submitted, wbc);
+                       ret = __write_data_page(page, &submitted, wbc, io_type);
                        if (unlikely(ret)) {
                                /*
                                 * keep nr_to_write, since vfs uses this to
        return ret;
 }
 
-static int f2fs_write_data_pages(struct address_space *mapping,
-                           struct writeback_control *wbc)
+int __f2fs_write_data_pages(struct address_space *mapping,
+                                               struct writeback_control *wbc,
+                                               enum iostat_type io_type)
 {
        struct inode *inode = mapping->host;
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
                goto skip_write;
 
        blk_start_plug(&plug);
-       ret = f2fs_write_cache_pages(mapping, wbc);
+       ret = f2fs_write_cache_pages(mapping, wbc, io_type);
        blk_finish_plug(&plug);
 
        if (wbc->sync_mode == WB_SYNC_ALL)
        return 0;
 }
 
+static int f2fs_write_data_pages(struct address_space *mapping,
+                           struct writeback_control *wbc)
+{
+       struct inode *inode = mapping->host;
+
+       return __f2fs_write_data_pages(mapping, wbc,
+                       F2FS_I(inode)->cp_task == current ?
+                       FS_CP_DATA_IO : FS_DATA_IO);
+}
+
 static void f2fs_write_failed(struct address_space *mapping, loff_t to)
 {
        struct inode *inode = mapping->host;
        up_read(&F2FS_I(inode)->dio_rwsem[rw]);
 
        if (rw == WRITE) {
-               if (err > 0)
+               if (err > 0) {
+                       f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
+                                                                       err);
                        set_inode_flag(inode, FI_UPDATE_WRITE);
-               else if (err < 0)
+               } else if (err < 0) {
                        f2fs_write_failed(mapping, offset + count);
+               }
        }
 
        trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
 
        f2fs_hash_t chash;              /* hash value of given file name */
        unsigned int clevel;            /* maximum level of given file name */
        struct task_struct *task;       /* lookup and create consistency */
+       struct task_struct *cp_task;    /* separate cp/wb IO stats*/
        nid_t i_xattr_nid;              /* node id that contains xattrs */
        loff_t  last_disk_size;         /* lastly written file size */
 
        LOCK_RETRY,
 };
 
+enum iostat_type {
+       APP_DIRECT_IO,                  /* app direct IOs */
+       APP_BUFFERED_IO,                /* app buffered IOs */
+       APP_WRITE_IO,                   /* app write IOs */
+       APP_MAPPED_IO,                  /* app mapped IOs */
+       FS_DATA_IO,                     /* data IOs from kworker/fsync/reclaimer */
+       FS_NODE_IO,                     /* node IOs from kworker/fsync/reclaimer */
+       FS_META_IO,                     /* meta IOs from kworker/reclaimer */
+       FS_GC_DATA_IO,                  /* data IOs from forground gc */
+       FS_GC_NODE_IO,                  /* node IOs from forground gc */
+       FS_CP_DATA_IO,                  /* data IOs from checkpoint */
+       FS_CP_NODE_IO,                  /* node IOs from checkpoint */
+       FS_CP_META_IO,                  /* meta IOs from checkpoint */
+       FS_DISCARD,                     /* discard */
+       NR_IO_TYPE,
+};
+
 struct f2fs_io_info {
        struct f2fs_sb_info *sbi;       /* f2fs_sb_info pointer */
        enum page_type type;    /* contains DATA/NODE/META/META_FLUSH */
        bool submitted;         /* indicate IO submission */
        int need_lock;          /* indicate we need to lock cp_rwsem */
        bool in_list;           /* indicate fio is in io_list */
+       enum iostat_type io_type;       /* io type */
 };
 
 #define is_read_io(rw) ((rw) == READ)
 #endif
        spinlock_t stat_lock;                   /* lock for stat operations */
 
+       /* For app/fs IO statistics */
+       spinlock_t iostat_lock;
+       unsigned long long write_iostat[NR_IO_TYPE];
+       bool iostat_enable;
+
        /* For sysfs suppport */
        struct kobject s_kobj;
        struct completion s_kobj_unregister;
                sizeof((f2fs_inode)->field))                    \
                <= (F2FS_OLD_ATTRIBUTE_SIZE + extra_isize))     \
 
+static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
+{
+       int i;
+
+       spin_lock(&sbi->iostat_lock);
+       for (i = 0; i < NR_IO_TYPE; i++)
+               sbi->write_iostat[i] = 0;
+       spin_unlock(&sbi->iostat_lock);
+}
+
+static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
+                       enum iostat_type type, unsigned long long io_bytes)
+{
+       if (!sbi->iostat_enable)
+               return;
+       spin_lock(&sbi->iostat_lock);
+       sbi->write_iostat[type] += io_bytes;
+
+       if (type == APP_WRITE_IO || type == APP_DIRECT_IO)
+               sbi->write_iostat[APP_BUFFERED_IO] =
+                       sbi->write_iostat[APP_WRITE_IO] -
+                       sbi->write_iostat[APP_DIRECT_IO];
+       spin_unlock(&sbi->iostat_lock);
+}
+
 /*
  * file.c
  */
 int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
                        struct writeback_control *wbc, bool atomic);
 int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc,
-                       bool do_balance);
+                       bool do_balance, enum iostat_type io_type);
 void build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
 bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc);
 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
 void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr);
-void write_meta_page(struct f2fs_sb_info *sbi, struct page *page);
+void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+                                               enum iostat_type io_type);
 void write_node_page(unsigned int nid, struct f2fs_io_info *fio);
 void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio);
 int rewrite_data_page(struct f2fs_io_info *fio);
                        int type, bool sync);
 void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
 long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
-                       long nr_to_write);
+                       long nr_to_write, enum iostat_type io_type);
 void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
 void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
 void release_ino_entry(struct f2fs_sb_info *sbi, bool all);
 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                        u64 start, u64 len);
 void f2fs_set_page_dirty_nobuffers(struct page *page);
+int __f2fs_write_data_pages(struct address_space *mapping,
+                                               struct writeback_control *wbc,
+                                               enum iostat_type io_type);
 void f2fs_invalidate_page(struct page *page, unsigned int offset,
                        unsigned int length);
 int f2fs_release_page(struct page *page, gfp_t wait);
 
        if (!PageUptodate(page))
                SetPageUptodate(page);
 
+       f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
+
        trace_f2fs_vm_page_mkwrite(page, DATA);
 mapped:
        /* fill the page */
                f2fs_stop_checkpoint(sbi, false);
                break;
        case F2FS_GOING_DOWN_METAFLUSH:
-               sync_meta_pages(sbi, META, LONG_MAX);
+               sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
                f2fs_stop_checkpoint(sbi, false);
                break;
        default:
                ret = __generic_file_write_iter(iocb, from);
                blk_finish_plug(&plug);
                clear_inode_flag(inode, FI_NO_PREALLOC);
+
+               if (ret > 0)
+                       f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
        }
        inode_unlock(inode);
 
 
        fio.new_blkaddr = newaddr;
        f2fs_submit_page_write(&fio);
 
+       f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
+
        f2fs_update_data_blkaddr(&dn, newaddr);
        set_inode_flag(inode, FI_APPEND_WRITE);
        if (page->index == 0)
                        .page = page,
                        .encrypted_page = NULL,
                        .need_lock = LOCK_REQ,
+                       .io_type = FS_GC_DATA_IO,
                };
                bool is_dirty = PageDirty(page);
                int err;
 
                .op_flags = REQ_SYNC | REQ_PRIO,
                .page = page,
                .encrypted_page = NULL,
+               .io_type = FS_DATA_IO,
        };
        int dirty, err;
 
 
 }
 
 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
-                               struct writeback_control *wbc, bool do_balance)
+                               struct writeback_control *wbc, bool do_balance,
+                               enum iostat_type io_type)
 {
        struct f2fs_sb_info *sbi = F2FS_P_SB(page);
        nid_t nid;
                .page = page,
                .encrypted_page = NULL,
                .submitted = false,
+               .io_type = io_type,
        };
 
        trace_f2fs_writepage(page, NODE);
 static int f2fs_write_node_page(struct page *page,
                                struct writeback_control *wbc)
 {
-       return __write_node_page(page, false, NULL, wbc, false);
+       return __write_node_page(page, false, NULL, wbc, false, FS_NODE_IO);
 }
 
 int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
 
                        ret = __write_node_page(page, atomic &&
                                                page == last_page,
-                                               &submitted, wbc, true);
+                                               &submitted, wbc, true,
+                                               FS_NODE_IO);
                        if (ret) {
                                unlock_page(page);
                                f2fs_put_page(last_page, 0);
 }
 
 int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc,
-                                                       bool do_balance)
+                               bool do_balance, enum iostat_type io_type)
 {
        pgoff_t index, end;
        struct pagevec pvec;
                        set_dentry_mark(page, 0);
 
                        ret = __write_node_page(page, false, &submitted,
-                                                       wbc, do_balance);
+                                               wbc, do_balance, io_type);
                        if (ret)
                                unlock_page(page);
                        else if (submitted)
        diff = nr_pages_to_write(sbi, NODE, wbc);
        wbc->sync_mode = WB_SYNC_NONE;
        blk_start_plug(&plug);
-       sync_node_pages(sbi, wbc, true);
+       sync_node_pages(sbi, wbc, true, FS_NODE_IO);
        blk_finish_plug(&plug);
        wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
        return 0;
 
                .type = DATA,
                .op = REQ_OP_WRITE,
                .op_flags = REQ_SYNC | REQ_PRIO,
+               .io_type = FS_DATA_IO,
        };
        pgoff_t last_idx = ULONG_MAX;
        int err = 0;
                        submit_bio(bio);
                        list_move_tail(&dc->list, &dcc->wait_list);
                        __check_sit_bitmap(sbi, dc->start, dc->start + dc->len);
+
+                       f2fs_update_iostat(sbi, FS_DISCARD, 1);
                }
        } else {
                __remove_discard_cmd(sbi, dc);
        }
 }
 
-void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
+void write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
+                                       enum iostat_type io_type)
 {
        struct f2fs_io_info fio = {
                .sbi = sbi,
 
        set_page_writeback(page);
        f2fs_submit_page_write(&fio);
+
+       f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
 }
 
 void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
 
        set_summary(&sum, nid, 0, 0);
        do_write_page(&sum, fio);
+
+       f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
 }
 
 void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
        set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
        do_write_page(&sum, fio);
        f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
+
+       f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
 }
 
 int rewrite_data_page(struct f2fs_io_info *fio)
 {
+       int err;
+
        fio->new_blkaddr = fio->old_blkaddr;
        stat_inc_inplace_blocks(fio->sbi);
-       return f2fs_submit_page_bio(fio);
+
+       err = f2fs_submit_page_bio(fio);
+
+       f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
+
+       return err;
 }
 
 void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 
        set_sbi_flag(sbi, SBI_POR_DOING);
        spin_lock_init(&sbi->stat_lock);
 
+       /* init iostat info */
+       spin_lock_init(&sbi->iostat_lock);
+       sbi->iostat_enable = false;
+
        for (i = 0; i < NR_PAGE_TYPE; i++) {
                int n = (i == META) ? 1: NR_TEMP_TYPE;
                int j;
 
                return count;
        }
        *ui = t;
+
+       if (!strcmp(a->attr.name, "iostat_enable") && *ui == 0)
+               f2fs_reset_iostat(sbi);
+
        return count;
 }
 
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable);
 #ifdef CONFIG_F2FS_FAULT_INJECTION
 F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
 F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
        ATTR_LIST(dirty_nats_ratio),
        ATTR_LIST(cp_interval),
        ATTR_LIST(idle_interval),
+       ATTR_LIST(iostat_enable),
 #ifdef CONFIG_F2FS_FAULT_INJECTION
        ATTR_LIST(inject_rate),
        ATTR_LIST(inject_type),
        return 0;
 }
 
+static int iostat_info_seq_show(struct seq_file *seq, void *offset)
+{
+       struct super_block *sb = seq->private;
+       struct f2fs_sb_info *sbi = F2FS_SB(sb);
+       time64_t now = ktime_get_real_seconds();
+
+       if (!sbi->iostat_enable)
+               return 0;
+
+       seq_printf(seq, "time:          %-16llu\n", now);
+
+       /* print app IOs */
+       seq_printf(seq, "app buffered:  %-16llu\n",
+                               sbi->write_iostat[APP_BUFFERED_IO]);
+       seq_printf(seq, "app direct:    %-16llu\n",
+                               sbi->write_iostat[APP_DIRECT_IO]);
+       seq_printf(seq, "app mapped:    %-16llu\n",
+                               sbi->write_iostat[APP_MAPPED_IO]);
+
+       /* print fs IOs */
+       seq_printf(seq, "fs data:       %-16llu\n",
+                               sbi->write_iostat[FS_DATA_IO]);
+       seq_printf(seq, "fs node:       %-16llu\n",
+                               sbi->write_iostat[FS_NODE_IO]);
+       seq_printf(seq, "fs meta:       %-16llu\n",
+                               sbi->write_iostat[FS_META_IO]);
+       seq_printf(seq, "fs gc data:    %-16llu\n",
+                               sbi->write_iostat[FS_GC_DATA_IO]);
+       seq_printf(seq, "fs gc node:    %-16llu\n",
+                               sbi->write_iostat[FS_GC_NODE_IO]);
+       seq_printf(seq, "fs cp data:    %-16llu\n",
+                               sbi->write_iostat[FS_CP_DATA_IO]);
+       seq_printf(seq, "fs cp node:    %-16llu\n",
+                               sbi->write_iostat[FS_CP_NODE_IO]);
+       seq_printf(seq, "fs cp meta:    %-16llu\n",
+                               sbi->write_iostat[FS_CP_META_IO]);
+       seq_printf(seq, "fs discard:    %-16llu\n",
+                               sbi->write_iostat[FS_DISCARD]);
+
+       return 0;
+}
+
 #define F2FS_PROC_FILE_DEF(_name)                                      \
 static int _name##_open_fs(struct inode *inode, struct file *file)     \
 {                                                                      \
 
 F2FS_PROC_FILE_DEF(segment_info);
 F2FS_PROC_FILE_DEF(segment_bits);
+F2FS_PROC_FILE_DEF(iostat_info);
 
 int __init f2fs_init_sysfs(void)
 {
                                 &f2fs_seq_segment_info_fops, sb);
                proc_create_data("segment_bits", S_IRUGO, sbi->s_proc,
                                 &f2fs_seq_segment_bits_fops, sb);
+               proc_create_data("iostat_info", S_IRUGO, sbi->s_proc,
+                               &f2fs_seq_iostat_info_fops, sb);
        }
        return 0;
 }
 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
 {
        if (sbi->s_proc) {
+               remove_proc_entry("iostat_info", sbi->s_proc);
                remove_proc_entry("segment_info", sbi->s_proc);
                remove_proc_entry("segment_bits", sbi->s_proc);
                remove_proc_entry(sbi->sb->s_id, f2fs_proc_root);