]> www.infradead.org Git - users/willy/xarray.git/commitdiff
null_blk: Convert to XArray
authorMatthew Wilcox <willy@infradead.org>
Thu, 18 Oct 2018 19:18:13 +0000 (15:18 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 8 Aug 2019 02:35:37 +0000 (22:35 -0400)
By changing the locking we could remove the slightly awkward dance in
null_insert_page(), but I'll leave that for someone who's more familiar
with the driver.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
drivers/block/null_blk.h
drivers/block/null_blk_main.c

index a1b9929bd911926c94d4c86b886c4da629a5ef58..f42e347d2e1508e4e397cc50a0e58212f949a239 100644 (file)
@@ -35,8 +35,8 @@ struct nullb_queue {
 struct nullb_device {
        struct nullb *nullb;
        struct config_item item;
-       struct radix_tree_root data; /* data stored in the disk */
-       struct radix_tree_root cache; /* disk cache data */
+       struct xarray data; /* data stored in the disk */
+       struct xarray cache; /* disk cache data */
        unsigned long flags; /* device flags */
        unsigned int curr_cache;
        struct badblocks badblocks;
index 99328ded60d124695fe7fd307b7420d2e94bb8a6..c3c9ccfa2ecea4eb4786b2f7793b2a6cf3855cc1 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/sched.h>
 #include <linux/fs.h>
 #include <linux/init.h>
+#include <linux/xarray.h>
 #include "null_blk.h"
 
 #define PAGE_SECTORS_SHIFT     (PAGE_SHIFT - SECTOR_SHIFT)
@@ -509,8 +510,8 @@ static struct nullb_device *null_alloc_dev(void)
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
        if (!dev)
                return NULL;
-       INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
-       INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
+       xa_init_flags(&dev->data, XA_FLAGS_LOCK_IRQ);
+       xa_init_flags(&dev->cache, XA_FLAGS_LOCK_IRQ);
        if (badblocks_init(&dev->badblocks, 0)) {
                kfree(dev);
                return NULL;
@@ -691,18 +692,18 @@ static void null_free_sector(struct nullb *nullb, sector_t sector,
        unsigned int sector_bit;
        u64 idx;
        struct nullb_page *t_page, *ret;
-       struct radix_tree_root *root;
+       struct xarray *xa;
 
-       root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
+       xa = is_cache ? &nullb->dev->cache : &nullb->dev->data;
        idx = sector >> PAGE_SECTORS_SHIFT;
        sector_bit = (sector & SECTOR_MASK);
 
-       t_page = radix_tree_lookup(root, idx);
+       t_page = xa_load(xa, idx);
        if (t_page) {
                __clear_bit(sector_bit, t_page->bitmap);
 
                if (null_page_empty(t_page)) {
-                       ret = radix_tree_delete_item(root, idx, t_page);
+                       ret = xa_cmpxchg(xa, idx, t_page, NULL, 0);
                        WARN_ON(ret != t_page);
                        null_free_page(ret);
                        if (is_cache)
@@ -711,47 +712,17 @@ static void null_free_sector(struct nullb *nullb, sector_t sector,
        }
 }
 
-static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
-       struct nullb_page *t_page, bool is_cache)
-{
-       struct radix_tree_root *root;
-
-       root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
-
-       if (radix_tree_insert(root, idx, t_page)) {
-               null_free_page(t_page);
-               t_page = radix_tree_lookup(root, idx);
-               WARN_ON(!t_page || t_page->page->index != idx);
-       } else if (is_cache)
-               nullb->dev->curr_cache += PAGE_SIZE;
-
-       return t_page;
-}
-
 static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
 {
-       unsigned long pos = 0;
-       int nr_pages;
-       struct nullb_page *ret, *t_pages[FREE_BATCH];
-       struct radix_tree_root *root;
-
-       root = is_cache ? &dev->cache : &dev->data;
-
-       do {
-               int i;
-
-               nr_pages = radix_tree_gang_lookup(root,
-                               (void **)t_pages, pos, FREE_BATCH);
-
-               for (i = 0; i < nr_pages; i++) {
-                       pos = t_pages[i]->page->index;
-                       ret = radix_tree_delete_item(root, pos, t_pages[i]);
-                       WARN_ON(ret != t_pages[i]);
-                       null_free_page(ret);
-               }
+       struct nullb_page *t_page;
+       XA_STATE(xas, is_cache ? &dev->cache : &dev->data, 0);
 
-               pos++;
-       } while (nr_pages == FREE_BATCH);
+       xas_lock(&xas);
+       xas_for_each(&xas, t_page, ULONG_MAX) {
+               xas_store(&xas, NULL);
+               null_free_page(t_page);
+       }
+       xas_unlock(&xas);
 
        if (is_cache)
                dev->curr_cache = 0;
@@ -763,13 +734,13 @@ static struct nullb_page *__null_lookup_page(struct nullb *nullb,
        unsigned int sector_bit;
        u64 idx;
        struct nullb_page *t_page;
-       struct radix_tree_root *root;
+       struct xarray *xa;
 
        idx = sector >> PAGE_SECTORS_SHIFT;
        sector_bit = (sector & SECTOR_MASK);
 
-       root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
-       t_page = radix_tree_lookup(root, idx);
+       xa = is_cache ? &nullb->dev->cache : &nullb->dev->data;
+       t_page = xa_load(xa, idx);
        WARN_ON(t_page && t_page->page->index != idx);
 
        if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
@@ -795,8 +766,9 @@ static struct nullb_page *null_insert_page(struct nullb *nullb,
        __releases(&nullb->lock)
        __acquires(&nullb->lock)
 {
-       u64 idx;
-       struct nullb_page *t_page;
+       struct xarray *xa;
+       unsigned long idx;
+       struct nullb_page *exist, *t_page;
 
        t_page = null_lookup_page(nullb, sector, true, ignore_cache);
        if (t_page)
@@ -808,14 +780,21 @@ static struct nullb_page *null_insert_page(struct nullb *nullb,
        if (!t_page)
                goto out_lock;
 
-       if (radix_tree_preload(GFP_NOIO))
+       idx = sector >> PAGE_SECTORS_SHIFT;
+       xa = ignore_cache ? &nullb->dev->data : &nullb->dev->cache;
+       if (xa_insert_irq(xa, idx, NULL, GFP_NOIO) == -ENOMEM)
                goto out_freepage;
 
        spin_lock_irq(&nullb->lock);
-       idx = sector >> PAGE_SECTORS_SHIFT;
        t_page->page->index = idx;
-       t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
-       radix_tree_preload_end();
+       exist = xa_cmpxchg(xa, idx, XA_ZERO_ENTRY, t_page, GFP_ATOMIC);
+       if (exist) {
+               null_free_page(t_page);
+               t_page = exist;
+       } else if (!ignore_cache)
+               nullb->dev->curr_cache += PAGE_SIZE;
+
+       WARN_ON(t_page->page->index != idx);
 
        return t_page;
 out_freepage:
@@ -841,8 +820,7 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
        if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
                null_free_page(c_page);
                if (t_page && null_page_empty(t_page)) {
-                       ret = radix_tree_delete_item(&nullb->dev->data,
-                               idx, t_page);
+                       xa_cmpxchg(&nullb->dev->data, idx, t_page, NULL, 0);
                        null_free_page(t_page);
                }
                return 0;
@@ -867,7 +845,7 @@ static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
        kunmap_atomic(dst);
        kunmap_atomic(src);
 
-       ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
+       ret = xa_cmpxchg(&nullb->dev->cache, idx, c_page, NULL, 0);
        null_free_page(ret);
        nullb->dev->curr_cache -= PAGE_SIZE;
 
@@ -885,8 +863,9 @@ again:
             nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
                return 0;
 
-       nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
-                       (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
+       nr_pages = xa_extract(&nullb->dev->cache, (void **)c_pages,
+                               nullb->cache_flush_pos, ULONG_MAX,
+                               FREE_BATCH, XA_PRESENT);
        /*
         * nullb_flush_cache_page could unlock before using the c_pages. To
         * avoid race, we don't allow page free
@@ -1027,7 +1006,7 @@ static int null_handle_flush(struct nullb *nullb)
                        break;
        }
 
-       WARN_ON(!radix_tree_empty(&nullb->dev->cache));
+       WARN_ON(!xa_empty(&nullb->dev->cache));
        spin_unlock_irq(&nullb->lock);
        return err;
 }