kvfree(ptr);
 }
 
-
-static inline void *
-kmem_zalloc(size_t size, xfs_km_flags_t flags)
-{
-       return kmem_alloc(size, flags | KM_ZERO);
-}
-
 /*
  * Zone interfaces
  */
 
                        continue;
                }
 
-               pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
+               pag = kzalloc(sizeof(*pag), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
                if (!pag) {
                        error = -ENOMEM;
                        goto out_unwind_new_pags;
 
                struct xfs_attr_leafblock *tmp_leaf;
                struct xfs_attr3_icleaf_hdr tmphdr;
 
-               tmp_leaf = kmem_zalloc(state->args->geo->blksize, 0);
+               tmp_leaf = kzalloc(state->args->geo->blksize,
+                               GFP_KERNEL | __GFP_NOFAIL);
 
                /*
                 * Copy the header into the temp leaf so that all the stuff
 
 
                /* Allocate a new incore btree root block. */
                new_size = bbl->iroot_size(cur, level, nr_this_block, priv);
-               ifp->if_broot = kmem_zalloc(new_size, 0);
+               ifp->if_broot = kzalloc(new_size, GFP_KERNEL);
                ifp->if_broot_bytes = (int)new_size;
 
                /* Initialize it and send it out. */
 
        int                     error = 0, nirecs, i;
 
        if (nfsb > 1)
-               irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_NOFS);
+               irecs = kzalloc(sizeof(irec) * nfsb, GFP_NOFS | __GFP_NOFAIL);
 
        nirecs = nfsb;
        error = xfs_bmapi_read(dp, bno, nfsb, irecs, &nirecs,
         * larger one that needs to be free by the caller.
         */
        if (nirecs > 1) {
-               map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_NOFS);
+               map = kzalloc(nirecs * sizeof(struct xfs_buf_map),
+                               GFP_NOFS | __GFP_NOFAIL);
                if (!map) {
                        error = -ENOMEM;
                        goto out_free_irecs;
 
                return ERR_PTR(error);
 
        /* Create an object to capture the defer ops. */
-       dfc = kmem_zalloc(sizeof(*dfc), KM_NOFS);
+       dfc = kzalloc(sizeof(*dfc), GFP_NOFS | __GFP_NOFAIL);
        INIT_LIST_HEAD(&dfc->dfc_list);
        INIT_LIST_HEAD(&dfc->dfc_dfops);
 
 
        ASSERT(mp->m_sb.sb_versionnum & XFS_SB_VERSION_DIRV2BIT);
        ASSERT(xfs_dir2_dirblock_bytes(&mp->m_sb) <= XFS_MAX_BLOCKSIZE);
 
-       mp->m_dir_geo = kmem_zalloc(sizeof(struct xfs_da_geometry),
-                                   KM_MAYFAIL);
-       mp->m_attr_geo = kmem_zalloc(sizeof(struct xfs_da_geometry),
-                                    KM_MAYFAIL);
+       mp->m_dir_geo = kzalloc(sizeof(struct xfs_da_geometry),
+                               GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+       mp->m_attr_geo = kzalloc(sizeof(struct xfs_da_geometry),
+                               GFP_KERNEL | __GFP_RETRY_MAYFAIL);
        if (!mp->m_dir_geo || !mp->m_attr_geo) {
                kmem_free(mp->m_dir_geo);
                kmem_free(mp->m_attr_geo);
        if (error)
                return error;
 
-       args = kmem_zalloc(sizeof(*args), KM_NOFS);
+       args = kzalloc(sizeof(*args), GFP_NOFS | __GFP_NOFAIL);
        if (!args)
                return -ENOMEM;
 
                XFS_STATS_INC(dp->i_mount, xs_dir_create);
        }
 
-       args = kmem_zalloc(sizeof(*args), KM_NOFS);
+       args = kzalloc(sizeof(*args), GFP_NOFS | __GFP_NOFAIL);
        if (!args)
                return -ENOMEM;
 
         * lockdep Doing this avoids having to add a bunch of lockdep class
         * annotations into the reclaim path for the ilock.
         */
-       args = kmem_zalloc(sizeof(*args), KM_NOFS);
+       args = kzalloc(sizeof(*args), GFP_NOFS | __GFP_NOFAIL);
        args->geo = dp->i_mount->m_dir_geo;
        args->name = name->name;
        args->namelen = name->len;
        ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
        XFS_STATS_INC(dp->i_mount, xs_dir_remove);
 
-       args = kmem_zalloc(sizeof(*args), KM_NOFS);
+       args = kzalloc(sizeof(*args), GFP_NOFS | __GFP_NOFAIL);
        if (!args)
                return -ENOMEM;
 
        if (rval)
                return rval;
 
-       args = kmem_zalloc(sizeof(*args), KM_NOFS);
+       args = kzalloc(sizeof(*args), GFP_NOFS | __GFP_NOFAIL);
        if (!args)
                return -ENOMEM;
 
 
 xfs_iext_grow(
        struct xfs_ifork        *ifp)
 {
-       struct xfs_iext_node    *node = kmem_zalloc(NODE_SIZE, KM_NOFS);
+       struct xfs_iext_node    *node = kzalloc(NODE_SIZE,
+                                               GFP_NOFS | __GFP_NOFAIL);
        int                     i;
 
        if (ifp->if_height == 1) {
        int                     *nr_entries)
 {
        struct xfs_iext_node    *node = *nodep;
-       struct xfs_iext_node    *new = kmem_zalloc(NODE_SIZE, KM_NOFS);
+       struct xfs_iext_node    *new = kzalloc(NODE_SIZE,
+                                               GFP_NOFS | __GFP_NOFAIL);
        const int               nr_move = KEYS_PER_NODE / 2;
        int                     nr_keep = nr_move + (KEYS_PER_NODE & 1);
        int                     i = 0;
        int                     *nr_entries)
 {
        struct xfs_iext_leaf    *leaf = cur->leaf;
-       struct xfs_iext_leaf    *new = kmem_zalloc(NODE_SIZE, KM_NOFS);
+       struct xfs_iext_leaf    *new = kzalloc(NODE_SIZE,
+                                               GFP_NOFS | __GFP_NOFAIL);
        const int               nr_move = RECS_PER_LEAF / 2;
        int                     nr_keep = nr_move + (RECS_PER_LEAF & 1);
        int                     i;
 {
        ASSERT(ifp->if_bytes == 0);
 
-       ifp->if_data = kmem_zalloc(sizeof(struct xfs_iext_rec), KM_NOFS);
+       ifp->if_data = kzalloc(sizeof(struct xfs_iext_rec),
+                                       GFP_NOFS | __GFP_NOFAIL);
        ifp->if_height = 1;
 
        /* now that we have a node step into it */
 
        if (error)
                return ERR_PTR(error);
 
-       attr = kmem_zalloc(sizeof(struct xfs_attr_intent) +
-                          sizeof(struct xfs_da_args), KM_NOFS);
+       attr = kzalloc(sizeof(struct xfs_attr_intent) +
+                       sizeof(struct xfs_da_args), GFP_NOFS | __GFP_NOFAIL);
        args = (struct xfs_da_args *)(attr + 1);
 
        attr->xattri_da_args = args;
 
                return 0;
        }
 
-       bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
-                               KM_NOFS);
+       bp->b_maps = kzalloc(map_count * sizeof(struct xfs_buf_map),
+                               GFP_NOFS | __GFP_NOFAIL);
        if (!bp->b_maps)
                return -ENOMEM;
        return 0;
 #if defined(CONFIG_FS_DAX) && defined(CONFIG_MEMORY_FAILURE)
        ops = &xfs_dax_holder_operations;
 #endif
-       btp = kmem_zalloc(sizeof(*btp), KM_NOFS);
+       btp = kzalloc(sizeof(*btp), GFP_NOFS | __GFP_NOFAIL);
 
        btp->bt_mount = mp;
        btp->bt_bdev_handle = bdev_handle;
 
                return;
        }
 
-       bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
-                               0);
+       bip->bli_formats = kzalloc(count * sizeof(struct xfs_buf_log_format),
+                               GFP_KERNEL | __GFP_NOFAIL);
 }
 
 STATIC void
 
 {
        int ret;
 
-       mp->m_errortag = kmem_zalloc(sizeof(unsigned int) * XFS_ERRTAG_MAX,
-                       KM_MAYFAIL);
+       mp->m_errortag = kzalloc(sizeof(unsigned int) * XFS_ERRTAG_MAX,
+                               GFP_KERNEL | __GFP_RETRY_MAYFAIL);
        if (!mp->m_errortag)
                return -ENOMEM;
 
 
        struct rb_node          **rbp;
        struct rb_node          *parent = NULL;
 
-       new = kmem_zalloc(sizeof(struct xfs_extent_busy), 0);
+       new = kzalloc(sizeof(struct xfs_extent_busy),
+                       GFP_KERNEL | __GFP_NOFAIL);
        new->agno = pag->pag_agno;
        new->bno = bno;
        new->length = len;
 
 
        ASSERT(breq->icount == 1);
 
-       bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
-                       KM_MAYFAIL);
+       bc.buf = kzalloc(sizeof(struct xfs_bulkstat),
+                       GFP_KERNEL | __GFP_RETRY_MAYFAIL);
        if (!bc.buf)
                return -ENOMEM;
 
        if (xfs_bulkstat_already_done(breq->mp, breq->startino))
                return 0;
 
-       bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
-                       KM_MAYFAIL);
+       bc.buf = kzalloc(sizeof(struct xfs_bulkstat),
+                       GFP_KERNEL | __GFP_RETRY_MAYFAIL);
        if (!bc.buf)
                return -ENOMEM;
 
 
                if (xfs_pwork_ctl_want_abort(&pctl))
                        break;
 
-               iwag = kmem_zalloc(sizeof(struct xfs_iwalk_ag), 0);
+               iwag = kzalloc(sizeof(struct xfs_iwalk_ag),
+                               GFP_KERNEL | __GFP_NOFAIL);
                iwag->mp = mp;
 
                /*
 
        int                     error = -ENOMEM;
        uint                    log2_size = 0;
 
-       log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL);
+       log = kzalloc(sizeof(struct xlog), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
        if (!log) {
                xfs_warn(mp, "Log allocation failed: No memory!");
                goto out;
                size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
                                sizeof(struct bio_vec);
 
-               iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL);
+               iclog = kzalloc(sizeof(*iclog) + bvec_size,
+                               GFP_KERNEL | __GFP_RETRY_MAYFAIL);
                if (!iclog)
                        goto out_free_iclog;
 
 
 {
        struct xfs_cil_ctx      *ctx;
 
-       ctx = kmem_zalloc(sizeof(*ctx), KM_NOFS);
+       ctx = kzalloc(sizeof(*ctx), GFP_NOFS | __GFP_NOFAIL);
        INIT_LIST_HEAD(&ctx->committing);
        INIT_LIST_HEAD(&ctx->busy_extents.extent_list);
        INIT_LIST_HEAD(&ctx->log_items);
        struct xlog_cil_pcp     *cilpcp;
        int                     cpu;
 
-       cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
+       cil = kzalloc(sizeof(*cil), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
        if (!cil)
                return -ENOMEM;
        /*
 
 {
        struct xlog_recover_item *item;
 
-       item = kmem_zalloc(sizeof(struct xlog_recover_item), 0);
+       item = kzalloc(sizeof(struct xlog_recover_item),
+                       GFP_KERNEL | __GFP_NOFAIL);
        INIT_LIST_HEAD(&item->ri_list);
        list_add_tail(&item->ri_list, head);
 }
                }
 
                item->ri_total = in_f->ilf_size;
-               item->ri_buf =
-                       kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
-                                   0);
+               item->ri_buf = kzalloc(item->ri_total * sizeof(xfs_log_iovec_t),
+                               GFP_KERNEL | __GFP_NOFAIL);
        }
 
        if (item->ri_total <= item->ri_cnt) {
         * This is a new transaction so allocate a new recovery container to
         * hold the recovery ops that will follow.
         */
-       trans = kmem_zalloc(sizeof(struct xlog_recover), 0);
+       trans = kzalloc(sizeof(struct xlog_recover), GFP_KERNEL | __GFP_NOFAIL);
        trans->r_log_tid = tid;
        trans->r_lsn = be64_to_cpu(rhead->h_lsn);
        INIT_LIST_HEAD(&trans->r_itemq);
 
        if (!(grp_time = msecs_to_jiffies(lifetime_ms) / grp_count))
                return -EINVAL;
 
-       if (!(mru = kmem_zalloc(sizeof(*mru), 0)))
+       mru = kzalloc(sizeof(*mru), GFP_KERNEL | __GFP_NOFAIL);
+       if (!mru)
                return -ENOMEM;
 
        /* An extra list is needed to avoid reaping up to a grp_time early. */
        mru->grp_count = grp_count + 1;
-       mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), 0);
-
+       mru->lists = kzalloc(mru->grp_count * sizeof(*mru->lists),
+                               GFP_KERNEL | __GFP_NOFAIL);
        if (!mru->lists) {
                err = -ENOMEM;
                goto exit;
 
 
        ASSERT(XFS_IS_QUOTA_ON(mp));
 
-       qinf = mp->m_quotainfo = kmem_zalloc(sizeof(struct xfs_quotainfo), 0);
+       qinf = mp->m_quotainfo = kzalloc(sizeof(struct xfs_quotainfo),
+                                       GFP_KERNEL | __GFP_NOFAIL);
 
        error = list_lru_init(&qinf->qi_lru);
        if (error)
 
 
        ASSERT(nextents > 0);
        if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
-               cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
-                               0);
+               cuip = kzalloc(xfs_cui_log_item_sizeof(nextents),
+                               GFP_KERNEL | __GFP_NOFAIL);
        else
                cuip = kmem_cache_zalloc(xfs_cui_cache,
                                         GFP_KERNEL | __GFP_NOFAIL);
 
 
        ASSERT(nextents > 0);
        if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
-               ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0);
+               ruip = kzalloc(xfs_rui_log_item_sizeof(nextents),
+                               GFP_KERNEL | __GFP_NOFAIL);
        else
                ruip = kmem_cache_zalloc(xfs_rui_cache,
                                         GFP_KERNEL | __GFP_NOFAIL);
 
 {
        struct xfs_ail  *ailp;
 
-       ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
+       ailp = kzalloc(sizeof(struct xfs_ail),
+                       GFP_KERNEL | __GFP_RETRY_MAYFAIL);
        if (!ailp)
                return -ENOMEM;