The advantage of mounting with the "volatile" option is that all forms of
sync calls to the upper filesystem are omitted.
+In order to avoid a giving a false sense of safety, the syncfs (and fsync)
+semantics of volatile mounts are slightly different than that of the rest of
+VFS. If any writeback error occurs on the upperdir's filesystem after a
+volatile mount takes place, all sync functions will return an error. Once this
+condition is reached, the filesystem will not recover, and every subsequent sync
+call will return an error, even if the upperdir has not experience a new error
+since the last sync call.
+
When overlay is mounted with "volatile" option, the directory
"$workdir/work/incompat/volatile" is created. During next mount, overlay
checks for this directory and refuses to mount if present. This is a strong
const struct cred *old_cred;
int ret;
- if (!ovl_should_sync(OVL_FS(file_inode(file)->i_sb)))
- return 0;
+ ret = ovl_sync_status(OVL_FS(file_inode(file)->i_sb));
+ if (ret <= 0)
+ return ret;
ret = ovl_real_fdget_meta(file, &real, !datasync);
if (ret)
bool ovl_is_metacopy_dentry(struct dentry *dentry);
char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
int padding);
+int ovl_sync_status(struct ovl_fs *ofs);
static inline bool ovl_is_impuredir(struct super_block *sb,
struct dentry *dentry)
atomic_long_t last_ino;
/* Whiteout dentry cache */
struct dentry *whiteout;
+ /* r/o snapshot of upperdir sb's only taken on volatile mounts */
+ errseq_t errseq;
};
static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs)
struct file *realfile;
int err;
- if (!ovl_should_sync(OVL_FS(file->f_path.dentry->d_sb)))
- return 0;
+ err = ovl_sync_status(OVL_FS(file->f_path.dentry->d_sb));
+ if (err <= 0)
+ return err;
realfile = ovl_dir_real_file(file, true);
err = PTR_ERR_OR_ZERO(realfile);
struct super_block *upper_sb;
int ret;
- if (!ovl_upper_mnt(ofs))
- return 0;
+ ret = ovl_sync_status(ofs);
+ /*
+ * We have to always set the err, because the return value isn't
+ * checked in syncfs, and instead indirectly return an error via
+ * the sb's writeback errseq, which VFS inspects after this call.
+ */
+ if (ret < 0) {
+ errseq_set(&sb->s_wb_err, -EIO);
+ return -EIO;
+ }
+
+ if (!ret)
+ return ret;
- if (!ovl_should_sync(ofs))
- return 0;
/*
* Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC).
* All the super blocks will be iterated, including upper_sb.
sb->s_op = &ovl_super_operations;
if (ofs->config.upperdir) {
+ struct super_block *upper_sb;
+
if (!ofs->config.workdir) {
pr_err("missing 'workdir'\n");
goto out_err;
if (err)
goto out_err;
+ upper_sb = ovl_upper_mnt(ofs)->mnt_sb;
+ if (!ovl_should_sync(ofs)) {
+ ofs->errseq = errseq_sample(&upper_sb->s_wb_err);
+ if (errseq_check(&upper_sb->s_wb_err, ofs->errseq)) {
+ err = -EIO;
+ pr_err("Cannot mount volatile when upperdir has an unseen error. Sync upperdir fs to clear state.\n");
+ goto out_err;
+ }
+ }
+
err = ovl_get_workdir(sb, ofs, &upperpath);
if (err)
goto out_err;
if (!ofs->workdir)
sb->s_flags |= SB_RDONLY;
- sb->s_stack_depth = ovl_upper_mnt(ofs)->mnt_sb->s_stack_depth;
- sb->s_time_gran = ovl_upper_mnt(ofs)->mnt_sb->s_time_gran;
-
+ sb->s_stack_depth = upper_sb->s_stack_depth;
+ sb->s_time_gran = upper_sb->s_time_gran;
}
oe = ovl_get_lowerstack(sb, splitlower, numlower, ofs, layers);
err = PTR_ERR(oe);
kfree(buf);
return ERR_PTR(res);
}
+
+/*
+ * ovl_sync_status() - Check fs sync status for volatile mounts
+ *
+ * Returns 1 if this is not a volatile mount and a real sync is required.
+ *
+ * Returns 0 if syncing can be skipped because mount is volatile, and no errors
+ * have occurred on the upperdir since the mount.
+ *
+ * Returns -errno if it is a volatile mount, and the error that occurred since
+ * the last mount. If the error code changes, it'll return the latest error
+ * code.
+ */
+
+int ovl_sync_status(struct ovl_fs *ofs)
+{
+ struct vfsmount *mnt;
+
+ if (ovl_should_sync(ofs))
+ return 1;
+
+ mnt = ovl_upper_mnt(ofs);
+ if (!mnt)
+ return 0;
+
+ return errseq_check(&mnt->mnt_sb->s_wb_err, ofs->errseq);
+}