ckpt->next_free_nid = cpu_to_le32(last_nid);
 }
 
+static bool __need_flush_quota(struct f2fs_sb_info *sbi)
+{
+       if (!is_journalled_quota(sbi))
+               return false;
+       if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH))
+               return false;
+       if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR))
+               return false;
+       if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH))
+               return true;
+       if (get_pages(sbi, F2FS_DIRTY_QDATA))
+               return true;
+       return false;
+}
+
 /*
  * Freeze all the FS-operations for checkpoint.
  */
                .for_reclaim = 0,
        };
        struct blk_plug plug;
-       int err = 0;
+       int err = 0, cnt = 0;
 
        blk_start_plug(&plug);
 
-retry_flush_dents:
+retry_flush_quotas:
+       if (__need_flush_quota(sbi)) {
+               int locked;
+
+               if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) {
+                       set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
+                       f2fs_lock_all(sbi);
+                       goto retry_flush_dents;
+               }
+               clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
+
+               /* only failed during mount/umount/freeze/quotactl */
+               locked = down_read_trylock(&sbi->sb->s_umount);
+               f2fs_quota_sync(sbi->sb, -1);
+               if (locked)
+                       up_read(&sbi->sb->s_umount);
+       }
+
        f2fs_lock_all(sbi);
+       if (__need_flush_quota(sbi)) {
+               f2fs_unlock_all(sbi);
+               cond_resched();
+               goto retry_flush_quotas;
+       }
+
+retry_flush_dents:
        /* write all the dirty dentry pages */
        if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
                f2fs_unlock_all(sbi);
                if (err)
                        goto out;
                cond_resched();
-               goto retry_flush_dents;
+               goto retry_flush_quotas;
        }
 
        /*
         */
        down_write(&sbi->node_change);
 
+       if (__need_flush_quota(sbi)) {
+               up_write(&sbi->node_change);
+               f2fs_unlock_all(sbi);
+               goto retry_flush_quotas;
+       }
+
        if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
                up_write(&sbi->node_change);
                f2fs_unlock_all(sbi);
                if (err)
                        goto out;
                cond_resched();
-               goto retry_flush_dents;
+               goto retry_flush_quotas;
        }
 
 retry_flush_nodes:
        else
                __clear_ckpt_flags(ckpt, CP_DISABLED_FLAG);
 
+       if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH))
+               __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
+       else
+               __clear_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
+
+       if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR))
+               __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
+
        /* set this flag to activate crc|cp_ver for recovery */
        __set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
        __clear_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG);
 
        clear_sbi_flag(sbi, SBI_IS_DIRTY);
        clear_sbi_flag(sbi, SBI_NEED_CP);
+       clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
        sbi->unusable_block_count = 0;
        __set_cp_next_pack(sbi);
 
 
                        inode->i_ino ==  F2FS_NODE_INO(sbi) ||
                        S_ISDIR(inode->i_mode) ||
                        (S_ISREG(inode->i_mode) &&
-                       is_inode_flag_set(inode, FI_ATOMIC_FILE)) ||
+                       (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
                        is_cold_data(page))
                return true;
        return false;
                return true;
        if (S_ISDIR(inode->i_mode))
                return true;
+       if (IS_NOQUOTA(inode))
+               return true;
        if (f2fs_is_atomic_file(inode))
                return true;
        if (fio) {
        }
 
        unlock_page(page);
-       if (!S_ISDIR(inode->i_mode))
+       if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode))
                f2fs_balance_fs(sbi, need_balance_fs);
 
        if (unlikely(f2fs_cp_error(sbi))) {
 {
        if (!S_ISREG(inode->i_mode))
                return false;
+       if (IS_NOQUOTA(inode))
+               return false;
        if (wbc->sync_mode != WB_SYNC_ALL)
                return true;
        if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
        if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
                goto skip_write;
 
-       if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
+       if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
+                       wbc->sync_mode == WB_SYNC_NONE &&
                        get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
                        f2fs_available_free_memory(sbi, DIRTY_DENTS))
                goto skip_write;
                down_write(&F2FS_I(inode)->i_mmap_sem);
 
                truncate_pagecache(inode, i_size);
-               f2fs_truncate_blocks(inode, i_size, true);
+               f2fs_truncate_blocks(inode, i_size, true, true);
 
                up_write(&F2FS_I(inode)->i_mmap_sem);
                up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
        if (err)
                goto fail;
 
-       if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
+       if (need_balance && !IS_NOQUOTA(inode) &&
+                       has_not_enough_free_secs(sbi, 0, 0)) {
                unlock_page(page);
                f2fs_balance_fs(sbi, true);
                lock_page(page);
 
 
 #define DEFAULT_RETRY_IO_COUNT 8       /* maximum retry read IO count */
 
+/* maximum retry quota flush count */
+#define DEFAULT_RETRY_QUOTA_FLUSH_COUNT                8
+
 #define F2FS_LINK_MAX  0xffffffff      /* maximum link count per file */
 
 #define MAX_DIR_RA_PAGES       4       /* maximum ra pages of dir */
        SBI_IS_SHUTDOWN,                        /* shutdown by ioctl */
        SBI_IS_RECOVERED,                       /* recovered orphan/data */
        SBI_CP_DISABLED,                        /* CP was disabled last mount */
+       SBI_QUOTA_NEED_FLUSH,                   /* need to flush quota info in CP */
+       SBI_QUOTA_SKIP_FLUSH,                   /* skip flushing quota in current CP */
+       SBI_QUOTA_NEED_REPAIR,                  /* quota file may be corrupted */
 };
 
 enum {
 {
        block_t valid_block_count;
        unsigned int valid_node_count;
-       bool quota = inode && !is_inode;
+       int err;
 
-       if (quota) {
-               int ret = dquot_reserve_block(inode, 1);
-               if (ret)
-                       return ret;
+       if (is_inode) {
+               if (inode) {
+                       err = dquot_alloc_inode(inode);
+                       if (err)
+                               return err;
+               }
+       } else {
+               err = dquot_reserve_block(inode, 1);
+               if (err)
+                       return err;
        }
 
        if (time_to_inject(sbi, FAULT_BLOCK)) {
        return 0;
 
 enospc:
-       if (quota)
+       if (is_inode) {
+               if (inode)
+                       dquot_free_inode(inode);
+       } else {
                dquot_release_reservation_block(inode, 1);
+       }
        return -ENOSPC;
 }
 
 
        spin_unlock(&sbi->stat_lock);
 
-       if (!is_inode)
+       if (is_inode)
+               dquot_free_inode(inode);
+       else
                f2fs_i_blocks_write(inode, 1, false, true);
 }
 
  */
 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
 void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
-int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
+int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock,
+                                                       bool buf_write);
 int f2fs_truncate(struct inode *inode);
 int f2fs_getattr(const struct path *path, struct kstat *stat,
                        u32 request_mask, unsigned int flags);
 int f2fs_inode_dirtied(struct inode *inode, bool sync);
 void f2fs_inode_synced(struct inode *inode);
 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
+int f2fs_quota_sync(struct super_block *sb, int type);
 void f2fs_quota_off_umount(struct super_block *sb);
 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
 int f2fs_sync_fs(struct super_block *sb, int sync);
 #endif
 
 #endif
+
+static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
+{
+#ifdef CONFIG_QUOTA
+       if (f2fs_sb_has_quota_ino(sbi->sb))
+               return true;
+       if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
+               F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
+               F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
+               return true;
+#endif
+       return false;
+}
 
        return 0;
 }
 
-int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
+int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock,
+                                                       bool buf_write)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        struct dnode_of_data dn;
        int count = 0, err = 0;
        struct page *ipage;
        bool truncate_page = false;
+       int flag = buf_write ? F2FS_GET_BLOCK_PRE_AIO : F2FS_GET_BLOCK_PRE_DIO;
 
        trace_f2fs_truncate_blocks_enter(inode, from);
 
                goto free_partial;
 
        if (lock)
-               f2fs_lock_op(sbi);
+               __do_map_lock(sbi, flag, true);
 
        ipage = f2fs_get_node_page(sbi, inode->i_ino);
        if (IS_ERR(ipage)) {
        err = f2fs_truncate_inode_blocks(inode, free_from);
 out:
        if (lock)
-               f2fs_unlock_op(sbi);
+               __do_map_lock(sbi, flag, false);
 free_partial:
        /* lastly zero out the first data page */
        if (!err)
                        return err;
        }
 
-       err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
+       err = f2fs_truncate_blocks(inode, i_size_read(inode), true, false);
        if (err)
                return err;
 
                !uid_eq(attr->ia_uid, inode->i_uid)) ||
                (attr->ia_valid & ATTR_GID &&
                !gid_eq(attr->ia_gid, inode->i_gid))) {
+               f2fs_lock_op(F2FS_I_SB(inode));
                err = dquot_transfer(inode, attr);
-               if (err)
+               if (err) {
+                       set_sbi_flag(F2FS_I_SB(inode),
+                                       SBI_QUOTA_NEED_REPAIR);
+                       f2fs_unlock_op(F2FS_I_SB(inode));
                        return err;
+               }
+               /*
+                * update uid/gid under lock_op(), so that dquot and inode can
+                * be updated atomically.
+                */
+               if (attr->ia_valid & ATTR_UID)
+                       inode->i_uid = attr->ia_uid;
+               if (attr->ia_valid & ATTR_GID)
+                       inode->i_gid = attr->ia_gid;
+               f2fs_mark_inode_dirty_sync(inode, true);
+               f2fs_unlock_op(F2FS_I_SB(inode));
        }
 
        if (attr->ia_valid & ATTR_SIZE) {
        new_size = i_size_read(inode) - len;
        truncate_pagecache(inode, new_size);
 
-       ret = f2fs_truncate_blocks(inode, new_size, true);
+       ret = f2fs_truncate_blocks(inode, new_size, true, false);
        up_write(&F2FS_I(inode)->i_mmap_sem);
        if (!ret)
                f2fs_i_size_write(inode, new_size);
        f2fs_balance_fs(sbi, true);
 
        down_write(&F2FS_I(inode)->i_mmap_sem);
-       ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
+       ret = f2fs_truncate_blocks(inode, i_size_read(inode), true, false);
        up_write(&F2FS_I(inode)->i_mmap_sem);
        if (ret)
                return ret;
 
                clear_inode_flag(inode, FI_INLINE_DATA);
                f2fs_put_page(ipage, 1);
        } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
-               if (f2fs_truncate_blocks(inode, 0, false))
+               if (f2fs_truncate_blocks(inode, 0, false, false))
                        return false;
                goto process_inline;
        }
        return 0;
 punch_dentry_pages:
        truncate_inode_pages(&dir->i_data, 0);
-       f2fs_truncate_blocks(dir, 0, false);
+       f2fs_truncate_blocks(dir, 0, false, false);
        f2fs_remove_dirty_inode(dir);
        return err;
 }
 
        if (inode->i_nlink || is_bad_inode(inode))
                goto no_delete;
 
-       dquot_initialize(inode);
+       err = dquot_initialize(inode);
+       if (err) {
+               err = 0;
+               set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+       }
 
        f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
        f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
                goto retry;
        }
 
-       if (err)
+       if (err) {
                f2fs_update_inode_page(inode);
-       dquot_free_inode(inode);
+               set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+       }
        sb_end_intwrite(inode->i_sb);
 no_delete:
        dquot_drop(inode);
 
        if (err)
                goto fail_drop;
 
-       err = dquot_alloc_inode(inode);
-       if (err)
-               goto fail_drop;
-
        set_inode_flag(inode, FI_NEW_INODE);
 
        /* If the directory encrypted, then we should encrypt the inode. */
 
        return err;
 }
 
+static int recover_quota_data(struct inode *inode, struct page *page)
+{
+       struct f2fs_inode *raw = F2FS_INODE(page);
+       struct iattr attr;
+       uid_t i_uid = le32_to_cpu(raw->i_uid);
+       gid_t i_gid = le32_to_cpu(raw->i_gid);
+       int err;
+
+       memset(&attr, 0, sizeof(attr));
+
+       attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid);
+       attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid);
+
+       if (!uid_eq(attr.ia_uid, inode->i_uid))
+               attr.ia_valid |= ATTR_UID;
+       if (!gid_eq(attr.ia_gid, inode->i_gid))
+               attr.ia_valid |= ATTR_GID;
+
+       if (!attr.ia_valid)
+               return 0;
+
+       err = dquot_transfer(inode, &attr);
+       if (err)
+               set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
+       return err;
+}
+
 static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
 {
        if (ri->i_inline & F2FS_PIN_FILE)
                clear_inode_flag(inode, FI_DATA_EXIST);
 }
 
-static void recover_inode(struct inode *inode, struct page *page)
+static int recover_inode(struct inode *inode, struct page *page)
 {
        struct f2fs_inode *raw = F2FS_INODE(page);
        char *name;
+       int err;
 
        inode->i_mode = le16_to_cpu(raw->i_mode);
+
+       err = recover_quota_data(inode, page);
+       if (err)
+               return err;
+
        i_uid_write(inode, le32_to_cpu(raw->i_uid));
        i_gid_write(inode, le32_to_cpu(raw->i_gid));
 
        f2fs_msg(inode->i_sb, KERN_NOTICE,
                "recover_inode: ino = %x, name = %s, inline = %x",
                        ino_of_node(page), name, raw->i_inline);
+       return 0;
 }
 
 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
                 * In this case, we can lose the latest inode(x).
                 * So, call recover_inode for the inode update.
                 */
-               if (IS_INODE(page))
-                       recover_inode(entry->inode, page);
+               if (IS_INODE(page)) {
+                       err = recover_inode(entry->inode, page);
+                       if (err)
+                               break;
+               }
                if (entry->last_dentry == blkaddr) {
                        err = recover_dentry(entry->inode, page, dir_list);
                        if (err) {
 
                                congestion_wait(BLK_RW_ASYNC, HZ/50);
                                goto repeat;
                        }
+                       set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
                        return PTR_ERR(page);
                }
 
                }
                if (unlikely(!PageUptodate(page))) {
                        f2fs_put_page(page, 1);
+                       set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
                        return -EIO;
                }
 
                                congestion_wait(BLK_RW_ASYNC, HZ/50);
                                goto retry;
                        }
+                       set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
                        break;
                }
 
 
 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
 {
+       if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
+               f2fs_msg(sbi->sb, KERN_ERR,
+                       "quota sysfile may be corrupted, skip loading it");
+               return 0;
+       }
+
        return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
                                        F2FS_OPTION(sbi).s_jquota_fmt, type);
 }
                test_opt(F2FS_SB(sb), PRJQUOTA),
        };
 
-       sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY;
+       if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
+               f2fs_msg(sb, KERN_ERR,
+                       "quota file may be corrupted, skip loading it");
+               return 0;
+       }
+
+       sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
+
        for (type = 0; type < MAXQUOTAS; type++) {
                qf_inum = f2fs_qf_ino(sb, type);
                if (qf_inum) {
                                        "fsck to fix.", type, err);
                                for (type--; type >= 0; type--)
                                        dquot_quota_off(sb, type);
+                               set_sbi_flag(F2FS_SB(sb),
+                                               SBI_QUOTA_NEED_REPAIR);
                                return err;
                        }
                }
        return 0;
 }
 
-static int f2fs_quota_sync(struct super_block *sb, int type)
+int f2fs_quota_sync(struct super_block *sb, int type)
 {
+       struct f2fs_sb_info *sbi = F2FS_SB(sb);
        struct quota_info *dqopt = sb_dqopt(sb);
        int cnt;
        int ret;
 
        ret = dquot_writeback_dquots(sb, type);
        if (ret)
-               return ret;
+               goto out;
 
        /*
         * Now when everything is written we can discard the pagecache so
         * that userspace sees the changes.
         */
        for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               struct address_space *mapping;
+
                if (type != -1 && cnt != type)
                        continue;
                if (!sb_has_quota_active(sb, cnt))
                        continue;
 
-               ret = filemap_write_and_wait(dqopt->files[cnt]->i_mapping);
+               mapping = dqopt->files[cnt]->i_mapping;
+
+               ret = filemap_fdatawrite(mapping);
                if (ret)
-                       return ret;
+                       goto out;
+
+               /* if we are using journalled quota */
+               if (is_journalled_quota(sbi))
+                       continue;
+
+               ret = filemap_fdatawait(mapping);
+               if (ret)
+                       set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
 
                inode_lock(dqopt->files[cnt]);
                truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
                inode_unlock(dqopt->files[cnt]);
        }
-       return 0;
+out:
+       if (ret)
+               set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
+       return ret;
 }
 
 static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
                                "Fail to turn off disk quota "
                                "(type: %d, err: %d, ret:%d), Please "
                                "run fsck to fix it.", type, err, ret);
-                       set_sbi_flag(F2FS_SB(sb), SBI_NEED_FSCK);
+                       set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
                }
        }
 }
        }
 }
 
+static int f2fs_dquot_commit(struct dquot *dquot)
+{
+       int ret;
+
+       ret = dquot_commit(dquot);
+       if (ret < 0)
+               set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
+       return ret;
+}
+
+static int f2fs_dquot_acquire(struct dquot *dquot)
+{
+       int ret;
+
+       ret = dquot_acquire(dquot);
+       if (ret < 0)
+               set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
+
+       return ret;
+}
+
+static int f2fs_dquot_release(struct dquot *dquot)
+{
+       int ret;
+
+       ret = dquot_release(dquot);
+       if (ret < 0)
+               set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR);
+       return ret;
+}
+
+static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
+{
+       struct super_block *sb = dquot->dq_sb;
+       struct f2fs_sb_info *sbi = F2FS_SB(sb);
+       int ret;
+
+       ret = dquot_mark_dquot_dirty(dquot);
+
+       /* if we are using journalled quota */
+       if (is_journalled_quota(sbi))
+               set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
+
+       return ret;
+}
+
+static int f2fs_dquot_commit_info(struct super_block *sb, int type)
+{
+       int ret;
+
+       ret = dquot_commit_info(sb, type);
+       if (ret < 0)
+               set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
+       return ret;
+}
 
 static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
 {
 
 static const struct dquot_operations f2fs_quota_operations = {
        .get_reserved_space = f2fs_get_reserved_space,
-       .write_dquot    = dquot_commit,
-       .acquire_dquot  = dquot_acquire,
-       .release_dquot  = dquot_release,
-       .mark_dirty     = dquot_mark_dquot_dirty,
-       .write_info     = dquot_commit_info,
+       .write_dquot    = f2fs_dquot_commit,
+       .acquire_dquot  = f2fs_dquot_acquire,
+       .release_dquot  = f2fs_dquot_release,
+       .mark_dirty     = f2fs_dquot_mark_dquot_dirty,
+       .write_info     = f2fs_dquot_commit_info,
        .alloc_dquot    = dquot_alloc,
        .destroy_dquot  = dquot_destroy,
        .get_projid     = f2fs_get_projid,
        .get_nextdqblk  = dquot_get_next_dqblk,
 };
 #else
+int f2fs_quota_sync(struct super_block *sb, int type)
+{
+       return 0;
+}
+
 void f2fs_quota_off_umount(struct super_block *sb)
 {
 }
                goto free_meta_inode;
        }
 
+       if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
+               set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
+
        /* Initialize device list */
        err = f2fs_scan_devices(sbi);
        if (err) {
 
  * For checkpoint
  */
 #define CP_DISABLED_FLAG               0x00001000
+#define CP_QUOTA_NEED_FSCK_FLAG                0x00000800
 #define CP_LARGE_NAT_BITMAP_FLAG       0x00000400
 #define CP_NOCRC_RECOVERY_FLAG 0x00000200
 #define CP_TRIMMED_FLAG                0x00000100