}
/**
- * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
+ * wbc_attach_inode - associate wbc with target inode
* @wbc: writeback_control of interest
* @inode: target inode
*
* @inode is locked and about to be written back under the control of @wbc.
- * Record @inode's writeback context into @wbc and unlock the i_lock. On
- * writeback completion, wbc_detach_inode() should be called. This is used
- * to track the cgroup writeback context.
+ * Record @inode's writeback context into @wbc. On writeback completion,
+ * wbc_detach_inode() should be called. This is used to track the cgroup
+ * writeback context.
*/
-static void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
- struct inode *inode)
- __releases(&inode->i_lock)
+static void wbc_attach_inode(struct writeback_control *wbc, struct inode *inode)
{
- if (!inode_cgwb_enabled(inode)) {
- spin_unlock(&inode->i_lock);
+ if (!inode_cgwb_enabled(inode))
return;
- }
+
+ lockdep_assert_held(&inode->i_lock);
wbc->wb = inode_to_wb(inode);
wbc->inode = inode;
wbc->wb_tcand_bytes = 0;
wb_get(wbc->wb);
- spin_unlock(&inode->i_lock);
+}
- /*
- * A dying wb indicates that either the blkcg associated with the
- * memcg changed or the associated memcg is dying. In the first
- * case, a replacement wb should already be available and we should
- * refresh the wb immediately. In the second case, trying to
- * refresh will keep failing.
- */
+/*
+ * A dying wb indicates that either the blkcg associated with the memcg changed
+ * or the associated memcg is dying. In the first case, a replacement wb should
+ * already be available and we should refresh the wb immediately. In the second
+ * case, trying to refresh will keep failing.
+ */
+static void wbc_switch_dying_wb(struct writeback_control *wbc,
+ struct inode *inode)
+{
if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
inode_switch_wbs(inode, wbc->wb_id);
}
{
spin_lock(&inode->i_lock);
inode_attach_wb(inode, NULL);
- wbc_attach_and_unlock_inode(wbc, inode);
+ wbc_attach_inode(wbc, inode);
+ spin_unlock(&inode->i_lock);
+
+ wbc_switch_dying_wb(wbc, inode);
}
EXPORT_SYMBOL_GPL(wbc_attach_fdatawrite_inode);
* @wbc: writeback_control of the just finished writeback
*
* To be called after a writeback attempt of an inode finishes and undoes
- * wbc_attach_and_unlock_inode(). Can be called under any context.
+ * wbc_attach_inode(). Can be called under any context.
*
* As concurrent write sharing of an inode is expected to be very rare and
* memcg only tracks page ownership on first-use basis severely confining
}
}
-static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
- struct inode *inode)
- __releases(&inode->i_lock)
+static inline void wbc_attach_inode(struct writeback_control *wbc,
+ struct inode *inode)
+{
+}
+
+static void wbc_switch_dying_wb(struct writeback_control *wbc,
+ struct inode *inode)
{
- spin_unlock(&inode->i_lock);
}
#endif /* CONFIG_CGROUP_WRITEBACK */
!mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
goto out;
inode->i_state |= I_SYNC;
- wbc_attach_and_unlock_inode(wbc, inode);
+ wbc_attach_inode(wbc, inode);
+ spin_unlock(&inode->i_lock);
+
+ wbc_switch_dying_wb(wbc, inode);
ret = __writeback_single_inode(inode, wbc);
continue;
}
inode->i_state |= I_SYNC;
- wbc_attach_and_unlock_inode(&wbc, inode);
+ wbc_attach_inode(&wbc, inode);
+ spin_unlock(&inode->i_lock);
+
+ wbc_switch_dying_wb(&wbc, inode);
write_chunk = writeback_chunk_size(wb, work);
wbc.nr_to_write = write_chunk;