]> www.infradead.org Git - users/hch/xfs.git/commitdiff
xfs: reduce context switches for synchronous buffered I/O
authorChristoph Hellwig <hch@lst.de>
Sun, 12 Jan 2025 09:33:05 +0000 (10:33 +0100)
committerChristoph Hellwig <hch@lst.de>
Mon, 13 Jan 2025 04:17:38 +0000 (05:17 +0100)
Currently all metadata I/O completions happen in the m_buf_workqueue
workqueue.  But for synchronous I/O (i.e. all buffer reads) there is no
need for that, as there always is a called in process context that is
waiting for the I/O.  Factor out the guts of xfs_buf_ioend into a
separate helper and call it from xfs_buf_iowait to avoid a double
an extra context switch to the workqueue.

Signed-off-by: Christoph Hellwig <hch@lst.de>
fs/xfs/xfs_buf.c

index 7f0fe2f66dc42b7e53b26805ea8a5751ebc54561..82c3c04523d9f393b000c89317bc09940fccf5ea 100644 (file)
@@ -1343,6 +1343,7 @@ xfs_buf_ioend_handle_error(
 resubmit:
        xfs_buf_ioerror(bp, 0);
        bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL);
+       reinit_completion(&bp->b_iowait);
        xfs_buf_submit(bp);
        return true;
 out_stale:
@@ -1353,8 +1354,8 @@ out_stale:
        return false;
 }
 
-static void
-xfs_buf_ioend(
+static bool
+__xfs_buf_ioend(
        struct xfs_buf  *bp)
 {
        trace_xfs_buf_iodone(bp, _RET_IP_);
@@ -1374,7 +1375,7 @@ xfs_buf_ioend(
                }
 
                if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp))
-                       return;
+                       return false;
 
                /* clear the retry state */
                bp->b_last_error = 0;
@@ -1395,7 +1396,15 @@ xfs_buf_ioend(
 
        bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD |
                         _XBF_LOGRECOVERY);
+       return true;
+}
 
+static void
+xfs_buf_ioend(
+       struct xfs_buf  *bp)
+{
+       if (!__xfs_buf_ioend(bp))
+               return;
        if (bp->b_flags & XBF_ASYNC)
                xfs_buf_relse(bp);
        else
@@ -1409,15 +1418,8 @@ xfs_buf_ioend_work(
        struct xfs_buf          *bp =
                container_of(work, struct xfs_buf, b_ioend_work);
 
-       xfs_buf_ioend(bp);
-}
-
-static void
-xfs_buf_ioend_async(
-       struct xfs_buf  *bp)
-{
-       INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
-       queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
+       if (__xfs_buf_ioend(bp))
+               xfs_buf_relse(bp);
 }
 
 void
@@ -1489,7 +1491,13 @@ xfs_buf_bio_end_io(
                 XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
                xfs_buf_ioerror(bp, -EIO);
 
-       xfs_buf_ioend_async(bp);
+       if (bp->b_flags & XBF_ASYNC) {
+               INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
+               queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
+       } else {
+               complete(&bp->b_iowait);
+       }
+
        bio_put(bio);
 }
 
@@ -1566,9 +1574,11 @@ xfs_buf_iowait(
 {
        ASSERT(!(bp->b_flags & XBF_ASYNC));
 
-       trace_xfs_buf_iowait(bp, _RET_IP_);
-       wait_for_completion(&bp->b_iowait);
-       trace_xfs_buf_iowait_done(bp, _RET_IP_);
+       do {
+               trace_xfs_buf_iowait(bp, _RET_IP_);
+               wait_for_completion(&bp->b_iowait);
+               trace_xfs_buf_iowait_done(bp, _RET_IP_);
+       } while (!__xfs_buf_ioend(bp));
 
        return bp->b_error;
 }