Just about all callers of xfs_buf_read() and xfs_buf_get() use XBF_DONTBLOCK.
This is used to make memory allocation use GFP_NOFS rather than GFP_KERNEL to
avoid recursion through memory reclaim back into the filesystem.
All the blocking get calls in growfs occur inside a transaction, even though
they are no part of the transaction, so all allocation will be GFP_NOFS due to
the task flag PF_TRANS being set. The blocking read calls occur during log
recovery, so they will probably be unaffected by converting to GFP_NOFS
allocations.
Hence make XBF_DONTBLOCK behaviour always occur for buffers and kill the flag.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
                dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
                blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
 
-               bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt,
-                                XBF_DONT_BLOCK);
+               bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, 0);
                if (!bp)
                        return ENOMEM;
 
 
 #endif
 
 #define xb_to_gfp(flags) \
-       ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
-         ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
-
-#define xb_to_km(flags) \
-        (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
+       ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
 
 
 static inline int
 {
        struct xfs_buf          *bp;
 
-       bp = kmem_zone_zalloc(xfs_buf_zone, xb_to_km(flags));
+       bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
        if (unlikely(!bp))
                return NULL;
 
        /*
         * We don't want certain flags to appear in b_flags.
         */
-       flags &= ~(XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
+       flags &= ~(XBF_MAPPED|XBF_READ_AHEAD);
 
        atomic_set(&bp->b_hold, 1);
        atomic_set(&bp->b_lru_ref, 1);
                        bp->b_pages = bp->b_page_array;
                } else {
                        bp->b_pages = kmem_alloc(sizeof(struct page *) *
-                                       page_count, xb_to_km(flags));
+                                                page_count, KM_NOFS);
                        if (bp->b_pages == NULL)
                                return -ENOMEM;
                }
         */
        size = BBTOB(bp->b_length);
        if (size < PAGE_SIZE) {
-               bp->b_addr = kmem_alloc(size, xb_to_km(flags));
+               bp->b_addr = kmem_alloc(size, KM_NOFS);
                if (!bp->b_addr) {
                        /* low memory - use alloc_page loop instead */
                        goto use_alloc_page;
                return;
 
        xfs_buf_read(target, blkno, numblks,
-                    XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
+                    XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
 }
 
 /*
        bp->b_pages = NULL;
        bp->b_addr = mem;
 
-       rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
+       rval = _xfs_buf_get_pages(bp, page_count, 0);
        if (rval)
                return rval;
 
 
 
 /* flags used only as arguments to access routines */
 #define XBF_TRYLOCK    (1 << 16)/* lock requested, but do not wait */
-#define XBF_DONT_BLOCK (1 << 17)/* do not block in current thread */
 
 /* flags used only internally */
 #define _XBF_PAGES     (1 << 20)/* backed by refcounted pages */
        { XBF_FUA,              "FUA" }, \
        { XBF_FLUSH,            "FLUSH" }, \
        { XBF_TRYLOCK,          "TRYLOCK" },    /* should never be set */\
-       { XBF_DONT_BLOCK,       "DONT_BLOCK" }, /* ditto */\
        { _XBF_PAGES,           "PAGES" }, \
        { _XBF_KMEM,            "KMEM" }, \
        { _XBF_DELWRI_Q,        "DELWRI_Q" }
 
         * Default to a normal get_buf() call if the tp is NULL.
         */
        if (tp == NULL)
-               return xfs_buf_get(target_dev, blkno, len,
-                                  flags | XBF_DONT_BLOCK);
+               return xfs_buf_get(target_dev, blkno, len, flags);
 
        /*
         * If we find the buffer in the cache with this transaction
                return (bp);
        }
 
-       /*
-        * We always specify the XBF_DONT_BLOCK flag within a transaction
-        * so that get_buf does not try to push out a delayed write buffer
-        * which might cause another transaction to take place (if the
-        * buffer was delayed alloc).  Such recursive transactions can
-        * easily deadlock with our current transaction as well as cause
-        * us to run out of stack space.
-        */
-       bp = xfs_buf_get(target_dev, blkno, len, flags | XBF_DONT_BLOCK);
+       bp = xfs_buf_get(target_dev, blkno, len, flags);
        if (bp == NULL) {
                return NULL;
        }
         * Default to a normal get_buf() call if the tp is NULL.
         */
        if (tp == NULL) {
-               bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
+               bp = xfs_buf_read(target, blkno, len, flags);
                if (!bp)
                        return (flags & XBF_TRYLOCK) ?
                                        EAGAIN : XFS_ERROR(ENOMEM);
                return 0;
        }
 
-       /*
-        * We always specify the XBF_DONT_BLOCK flag within a transaction
-        * so that get_buf does not try to push out a delayed write buffer
-        * which might cause another transaction to take place (if the
-        * buffer was delayed alloc).  Such recursive transactions can
-        * easily deadlock with our current transaction as well as cause
-        * us to run out of stack space.
-        */
-       bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
+       bp = xfs_buf_read(target, blkno, len, flags);
        if (bp == NULL) {
                *bpp = NULL;
                return (flags & XBF_TRYLOCK) ?
 
                byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
 
                bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt),
-                                 XBF_MAPPED | XBF_DONT_BLOCK);
+                                 XBF_MAPPED);
                if (!bp)
                        return XFS_ERROR(ENOMEM);
                error = bp->b_error;
 
        bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
                                        mp->m_rtdev_targp : mp->m_ddev_targp,
-                               BTOBB(mp->m_sb.sb_blocksize), XBF_DONT_BLOCK);
+                                 BTOBB(mp->m_sb.sb_blocksize), 0);
        if (!bp)
                return XFS_ERROR(ENOMEM);