if (percpu_ref_tryget_live(&q->q_usage_counter))
                        return 0;
 
-               if (!(gfp & __GFP_WAIT))
+               if (!gfpflags_allow_blocking(gfp))
                        return -EBUSY;
 
                ret = wait_event_interruptible(q->mq_freeze_wq,
        do {
                struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 
-               if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) {
+               if (likely(blk_queue_enter(q, __GFP_DIRECT_RECLAIM) == 0)) {
 
                        q->make_request_fn(q, bio);
 
 
                ctx = blk_mq_get_ctx(q);
                hctx = q->mq_ops->map_queue(q, ctx->cpu);
                blk_mq_set_alloc_data(&alloc_data, q,
-                               __GFP_WAIT|__GFP_HIGH, false, ctx, hctx);
+                               __GFP_RECLAIM|__GFP_HIGH, false, ctx, hctx);
                rq = __blk_mq_alloc_request(&alloc_data, rw);
                ctx = alloc_data.ctx;
                hctx = alloc_data.hctx;
 
 
        }
 
-       rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
+       rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_RECLAIM);
        if (IS_ERR(rq)) {
                err = PTR_ERR(rq);
                goto error_free_buffer;
                break;
        }
 
-       if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
+       if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_RECLAIM)) {
                err = DRIVER_ERROR << 24;
                goto error;
        }
        struct request *rq;
        int err;
 
-       rq = blk_get_request(q, WRITE, __GFP_WAIT);
+       rq = blk_get_request(q, WRITE, __GFP_RECLAIM);
        if (IS_ERR(rq))
                return PTR_ERR(rq);
        blk_rq_set_block_pc(rq);
 
        bm_set_page_unchanged(b->bm_pages[page_nr]);
 
        if (ctx->flags & BM_AIO_COPY_PAGES) {
-               page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT);
+               page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_RECLAIM);
                copy_highpage(page, b->bm_pages[page_nr]);
                bm_store_page_idx(page, page_nr);
        } else
 
 {
        struct request *rq;
 
-       rq = blk_mq_alloc_request(dd->queue, 0, __GFP_WAIT, true);
+       rq = blk_mq_alloc_request(dd->queue, 0, __GFP_RECLAIM, true);
        return blk_mq_rq_to_pdu(rq);
 }
 
 
        struct request *rq;
        int err = 0;
 
-       rq = blk_get_request(disk->gd->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(disk->gd->queue, READ, __GFP_RECLAIM);
        if (IS_ERR(rq))
                return PTR_ERR(rq);
 
 
        int ret = 0;
 
        rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
-                            WRITE : READ, __GFP_WAIT);
+                            WRITE : READ, __GFP_RECLAIM);
        if (IS_ERR(rq))
                return PTR_ERR(rq);
        blk_rq_set_block_pc(rq);
 
        if (cgc->buflen) {
                ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
-                                     __GFP_WAIT);
+                                     __GFP_RECLAIM);
                if (ret)
                        goto out;
        }
 
        mapping = file_inode(obj->base.filp)->i_mapping;
        gfp = mapping_gfp_mask(mapping);
        gfp |= __GFP_NORETRY | __GFP_NOWARN;
-       gfp &= ~(__GFP_IO | __GFP_WAIT);
+       gfp &= ~(__GFP_IO | __GFP_RECLAIM);
        sg = st->sgl;
        st->nents = 0;
        for (i = 0; i < page_count; i++) {
 
        struct request *rq;
        int error;
 
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
        rq->special = (char *)pc;
 
 
                struct request *rq;
                int error;
 
-               rq = blk_get_request(drive->queue, write, __GFP_WAIT);
+               rq = blk_get_request(drive->queue, write, __GFP_RECLAIM);
 
                memcpy(rq->cmd, cmd, BLK_MAX_CDB);
                rq->cmd_type = REQ_TYPE_ATA_PC;
 
        struct request *rq;
        int ret;
 
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
        rq->cmd_flags = REQ_QUIET;
        ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
 
        if (!(setting->flags & DS_SYNC))
                return setting->set(drive, arg);
 
-       rq = blk_get_request(q, READ, __GFP_WAIT);
+       rq = blk_get_request(q, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
        rq->cmd_len = 5;
        rq->cmd[0] = REQ_DEVSET_EXEC;
 
        if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
                return -EBUSY;
 
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
 
        drive->mult_req = arg;
 
        if (NULL == (void *) arg) {
                struct request *rq;
 
-               rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+               rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
                rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
                err = blk_execute_rq(drive->queue, NULL, rq, 0);
                blk_put_request(rq);
        struct request *rq;
        int ret = 0;
 
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
        rq->cmd_len = 1;
        rq->cmd[0] = REQ_DRIVE_RESET;
 
        }
        spin_unlock_irq(&hwif->lock);
 
-       rq = blk_get_request(q, READ, __GFP_WAIT);
+       rq = blk_get_request(q, READ, __GFP_RECLAIM);
        rq->cmd[0] = REQ_PARK_HEADS;
        rq->cmd_len = 1;
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
 
        }
 
        memset(&rqpm, 0, sizeof(rqpm));
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_ATA_PM_SUSPEND;
        rq->special = &rqpm;
        rqpm.pm_step = IDE_PM_START_SUSPEND;
        }
 
        memset(&rqpm, 0, sizeof(rqpm));
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_ATA_PM_RESUME;
        rq->cmd_flags |= REQ_PREEMPT;
        rq->special = &rqpm;
 
        BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
        BUG_ON(size < 0 || size % tape->blk_size);
 
-       rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_DRV_PRIV;
        rq->cmd[13] = cmd;
        rq->rq_disk = tape->disk;
 
        if (size) {
                ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
-                                     __GFP_WAIT);
+                                     __GFP_RECLAIM);
                if (ret)
                        goto out_put;
        }
 
        int error;
        int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE;
 
-       rq = blk_get_request(drive->queue, rw, __GFP_WAIT);
+       rq = blk_get_request(drive->queue, rw, __GFP_RECLAIM);
        rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
 
        /*
         */
        if (nsect) {
                error = blk_rq_map_kern(drive->queue, rq, buf,
-                                       nsect * SECTOR_SIZE, __GFP_WAIT);
+                                       nsect * SECTOR_SIZE, __GFP_RECLAIM);
                if (error)
                        goto put_req;
        }
 
         * heavy filesystem activity makes these fail, and we can
         * use compound pages.
         */
-       gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
+       gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
 
        egrcnt = rcd->rcvegrcnt;
        egroff = rcd->rcvegr_tid_base;
 
 
 /*
  * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
- * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
+ * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use
  * __GFP_NOWARN, to suppress page allocation failure warnings.
  */
 #define VMW_PAGE_ALLOC_NOSLEEP         (__GFP_HIGHMEM|__GFP_NOWARN)
 
        req->special = (void *)0;
 
        if (buffer && bufflen) {
-               ret = blk_rq_map_kern(q, req, buffer, bufflen, __GFP_WAIT);
+               ret = blk_rq_map_kern(q, req, buffer, bufflen,
+                                     __GFP_DIRECT_RECLAIM);
                if (ret)
                        goto out;
        } else if (ubuffer && bufflen) {
-               ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, __GFP_WAIT);
+               ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
+                                     __GFP_DIRECT_RECLAIM);
                if (ret)
                        goto out;
                bio = req->bio;
 
        struct request *req;
 
        /*
-        * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a
+        * blk_get_request with GFP_KERNEL (__GFP_RECLAIM) sleeps until a
         * request becomes available
         */
        req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
 
        int write = (data_direction == DMA_TO_DEVICE);
        int ret = DRIVER_ERROR << 24;
 
-       req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
+       req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM);
        if (IS_ERR(req))
                return ret;
        blk_rq_set_block_pc(req);
 
        if (bufflen &&  blk_rq_map_kern(sdev->request_queue, req,
-                                       buffer, bufflen, __GFP_WAIT))
+                                       buffer, bufflen, __GFP_RECLAIM))
                goto out;
 
        req->cmd_len = COMMAND_SIZE(cmd[0]);
 
         * heavy filesystem activity makes these fail, and we can
         * use compound pages.
         */
-       gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
+       gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
 
        /*
         * The minimum size of the eager buffers is a groups of MTU-sized
 
         * heavy filesystem activity makes these fail, and we can
         * use compound pages.
         */
-       gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
+       gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
 
        egrcnt = dd->ipath_rcvegrcnt;
        /* TID number offset for this port */
 
 #define CACHEFILES_DEBUG_KLEAVE        2
 #define CACHEFILES_DEBUG_KDEBUG        4
 
-#define cachefiles_gfp (__GFP_WAIT | __GFP_NORETRY | __GFP_NOMEMALLOC)
+#define cachefiles_gfp (__GFP_RECLAIM | __GFP_NORETRY | __GFP_NOMEMALLOC)
 
 /*
  * node records
 
 
        /*
         * bio_alloc() is guaranteed to return a bio when called with
-        * __GFP_WAIT and we request a valid number of vectors.
+        * __GFP_RECLAIM and we request a valid number of vectors.
         */
        bio = bio_alloc(GFP_KERNEL, nr_vecs);
 
 
 }
 
 /* Default GFP flags using highmem */
-#define NILFS_MDT_GFP      (__GFP_WAIT | __GFP_IO | __GFP_HIGHMEM)
+#define NILFS_MDT_GFP      (__GFP_RECLAIM | __GFP_IO | __GFP_HIGHMEM)
 
 int nilfs_mdt_get_block(struct inode *, unsigned long, int,
                        void (*init_block)(struct inode *,
 
  * can be cleared when the reclaiming of pages would cause unnecessary
  * disruption.
  */
-#define __GFP_WAIT ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
+#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
 #define __GFP_DIRECT_RECLAIM   ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
 #define __GFP_KSWAPD_RECLAIM   ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
 
  */
 #define GFP_ATOMIC     (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
 #define GFP_NOWAIT     (__GFP_KSWAPD_RECLAIM)
-#define GFP_NOIO       (__GFP_WAIT)
-#define GFP_NOFS       (__GFP_WAIT | __GFP_IO)
-#define GFP_KERNEL     (__GFP_WAIT | __GFP_IO | __GFP_FS)
-#define GFP_TEMPORARY  (__GFP_WAIT | __GFP_IO | __GFP_FS | \
+#define GFP_NOIO       (__GFP_RECLAIM)
+#define GFP_NOFS       (__GFP_RECLAIM | __GFP_IO)
+#define GFP_KERNEL     (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
+#define GFP_TEMPORARY  (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \
                         __GFP_RECLAIMABLE)
-#define GFP_USER       (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
+#define GFP_USER       (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
 #define GFP_HIGHUSER   (GFP_USER | __GFP_HIGHMEM)
 #define GFP_HIGHUSER_MOVABLE   (GFP_HIGHUSER | __GFP_MOVABLE)
 #define GFP_TRANSHUGE  ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
 #define GFP_MOVABLE_SHIFT 3
 
 /* Control page allocator reclaim behavior */
-#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
+#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
                        __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
                        __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
 
 /* Control slab gfp mask during early boot */
-#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
+#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
 
 /* Control allocation constraints */
 #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
 
        struct bio *bio;
        int error = 0;
 
-       bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
+       bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
        bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
        bio->bi_bdev = hib_resume_bdev;
 
                return -ENOSPC;
 
        if (hb) {
-               src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN |
+               src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN |
                                              __GFP_NORETRY);
                if (src) {
                        copy_page(src, buf);
                        ret = hib_wait_io(hb); /* Free pages */
                        if (ret)
                                return ret;
-                       src = (void *)__get_free_page(__GFP_WAIT |
+                       src = (void *)__get_free_page(__GFP_RECLAIM |
                                                      __GFP_NOWARN |
                                                      __GFP_NORETRY);
                        if (src) {
        nr_threads = num_online_cpus() - 1;
        nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
 
-       page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
+       page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH);
        if (!page) {
                printk(KERN_ERR "PM: Failed to allocate LZO page\n");
                ret = -ENOMEM;
                last = tmp;
 
                tmp->map = (struct swap_map_page *)
-                          __get_free_page(__GFP_WAIT | __GFP_HIGH);
+                          __get_free_page(__GFP_RECLAIM | __GFP_HIGH);
                if (!tmp->map) {
                        release_swap_reader(handle);
                        return -ENOMEM;
 
        for (i = 0; i < read_pages; i++) {
                page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
-                                                 __GFP_WAIT | __GFP_HIGH :
-                                                 __GFP_WAIT | __GFP_NOWARN |
-                                                 __GFP_NORETRY);
+                                                 __GFP_RECLAIM | __GFP_HIGH :
+                                                 __GFP_RECLAIM | __GFP_NOWARN |
+                                                 __GFP_NORETRY);
 
                if (!page[i]) {
                        if (i < LZO_CMP_PAGES) {
 
  * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
  *
  * @gfp indicates whether or not to wait until a free id is available (it's not
- * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep
+ * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep
  * however long it takes until another thread frees an id (same semantics as a
  * mempool).
  *
 
 
 static struct {
        struct fault_attr attr;
-       bool ignore_gfp_wait;
+       bool ignore_gfp_reclaim;
        bool cache_filter;
 } failslab = {
        .attr = FAULT_ATTR_INITIALIZER,
-       .ignore_gfp_wait = true,
+       .ignore_gfp_reclaim = true,
        .cache_filter = false,
 };
 
        if (gfpflags & __GFP_NOFAIL)
                return false;
 
-        if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT))
+       if (failslab.ignore_gfp_reclaim && (gfpflags & __GFP_RECLAIM))
                return false;
 
        if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB))
                return PTR_ERR(dir);
 
        if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
-                               &failslab.ignore_gfp_wait))
+                               &failslab.ignore_gfp_reclaim))
                goto fail;
        if (!debugfs_create_bool("cache-filter", mode, dir,
                                &failslab.cache_filter))
 
  * page is known to the local caching routines.
  *
  * The @gfp_mask argument specifies whether I/O may be performed to release
- * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
+ * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
  *
  */
 int try_to_release_page(struct page *page, gfp_t gfp_mask)
 
 
 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
 {
-       return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
+       return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_RECLAIM)) | extra_gfp;
 }
 
 /* Caller must hold page table lock. */
 
        /*
         * If the hierarchy is above the normal consumption range, schedule
         * reclaim on returning to userland.  We can perform reclaim here
-        * if __GFP_WAIT but let's always punt for simplicity and so that
+        * if __GFP_RECLAIM but let's always punt for simplicity and so that
         * GFP_KERNEL can consistently be used during reclaim.  @memcg is
         * not recorded as it most likely matches current's and won't
         * change in the meantime.  As high limit is checked again before
 
                goto out_dropref;
 
        new_page = alloc_pages_node(node,
-               (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT,
+               (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM,
                HPAGE_PMD_ORDER);
        if (!new_page)
                goto out_fail;
 
        struct fault_attr attr;
 
        bool ignore_gfp_highmem;
-       bool ignore_gfp_wait;
+       bool ignore_gfp_reclaim;
        u32 min_order;
 } fail_page_alloc = {
        .attr = FAULT_ATTR_INITIALIZER,
-       .ignore_gfp_wait = true,
+       .ignore_gfp_reclaim = true,
        .ignore_gfp_highmem = true,
        .min_order = 1,
 };
                return false;
        if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
                return false;
-       if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_DIRECT_RECLAIM))
+       if (fail_page_alloc.ignore_gfp_reclaim &&
+                       (gfp_mask & __GFP_DIRECT_RECLAIM))
                return false;
 
        return should_fail(&fail_page_alloc.attr, 1 << order);
                return PTR_ERR(dir);
 
        if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
-                               &fail_page_alloc.ignore_gfp_wait))
+                               &fail_page_alloc.ignore_gfp_reclaim))
                goto fail;
        if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
                                &fail_page_alloc.ignore_gfp_highmem))
 
 {
        void *ptr;
        int order = ima_maxorder;
-       gfp_t gfp_mask = __GFP_WAIT | __GFP_NOWARN | __GFP_NORETRY;
+       gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
 
        if (order)
                order = min(get_order(max_size), order);