]> www.infradead.org Git - users/hch/misc.git/commitdiff
maple_tree: move up mas_wr_store_setup() and mas_wr_prealloc_setup()
authorSidhartha Kumar <sidhartha.kumar@oracle.com>
Wed, 14 Aug 2024 16:19:30 +0000 (12:19 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 2 Sep 2024 03:26:14 +0000 (20:26 -0700)
Subsequent patches require these definitions to be higher, no functional
changes intended.

Link: https://lkml.kernel.org/r/20240814161944.55347-4-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
lib/maple_tree.c

index 407c0be6e42ff2543a74b5e90754e4f9e44cd3bb..de4a91ced8ca257becf64c03f98284c67f243f5c 100644 (file)
@@ -4227,6 +4227,54 @@ static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas)
                mas_wr_modify(wr_mas);
 }
 
+static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
+{
+       if (!mas_is_active(wr_mas->mas)) {
+               if (mas_is_start(wr_mas->mas))
+                       return;
+
+               if (unlikely(mas_is_paused(wr_mas->mas)))
+                       goto reset;
+
+               if (unlikely(mas_is_none(wr_mas->mas)))
+                       goto reset;
+
+               if (unlikely(mas_is_overflow(wr_mas->mas)))
+                       goto reset;
+
+               if (unlikely(mas_is_underflow(wr_mas->mas)))
+                       goto reset;
+       }
+
+       /*
+        * A less strict version of mas_is_span_wr() where we allow spanning
+        * writes within this node.  This is to stop partial walks in
+        * mas_prealloc() from being reset.
+        */
+       if (wr_mas->mas->last > wr_mas->mas->max)
+               goto reset;
+
+       if (wr_mas->entry)
+               return;
+
+       if (mte_is_leaf(wr_mas->mas->node) &&
+           wr_mas->mas->last == wr_mas->mas->max)
+               goto reset;
+
+       return;
+
+reset:
+       mas_reset(wr_mas->mas);
+}
+
+static inline void mas_wr_prealloc_setup(struct ma_wr_state *wr_mas)
+{
+       struct ma_state *mas = wr_mas->mas;
+
+       mas_wr_store_setup(wr_mas);
+       wr_mas->content = mas_start(mas);
+}
+
 /**
  * mas_insert() - Internal call to insert a value
  * @mas: The maple state
@@ -5358,54 +5406,6 @@ static inline void mte_destroy_walk(struct maple_enode *enode,
                mt_destroy_walk(enode, mt, true);
        }
 }
-
-static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
-{
-       if (!mas_is_active(wr_mas->mas)) {
-               if (mas_is_start(wr_mas->mas))
-                       return;
-
-               if (unlikely(mas_is_paused(wr_mas->mas)))
-                       goto reset;
-
-               if (unlikely(mas_is_none(wr_mas->mas)))
-                       goto reset;
-
-               if (unlikely(mas_is_overflow(wr_mas->mas)))
-                       goto reset;
-
-               if (unlikely(mas_is_underflow(wr_mas->mas)))
-                       goto reset;
-       }
-
-       /*
-        * A less strict version of mas_is_span_wr() where we allow spanning
-        * writes within this node.  This is to stop partial walks in
-        * mas_prealloc() from being reset.
-        */
-       if (wr_mas->mas->last > wr_mas->mas->max)
-               goto reset;
-
-       if (wr_mas->entry)
-               return;
-
-       if (mte_is_leaf(wr_mas->mas->node) &&
-           wr_mas->mas->last == wr_mas->mas->max)
-               goto reset;
-
-       return;
-
-reset:
-       mas_reset(wr_mas->mas);
-}
-
-static inline void mas_wr_prealloc_setup(struct ma_wr_state *wr_mas)
-{
-       struct ma_state *mas = wr_mas->mas;
-
-       mas_wr_store_setup(wr_mas);
-       wr_mas->content = mas_start(mas);
-}
 /* Interface */
 
 /**