]> www.infradead.org Git - linux.git/commitdiff
dm vdo vio-pool: add a pool pointer to pooled_vio
authorKen Raeburn <raeburn@redhat.com>
Sat, 1 Feb 2025 02:18:03 +0000 (21:18 -0500)
committerMikulas Patocka <mpatocka@redhat.com>
Mon, 3 Feb 2025 13:14:45 +0000 (14:14 +0100)
This allows us to simplify the return_vio_to_pool interface.

Also, we don't need to use vdo_forget on local variables or arguments
that are about to go out of scope anyway.

Signed-off-by: Ken Raeburn <raeburn@redhat.com>
Signed-off-by: Matthew Sakai <msakai@redhat.com>
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
drivers/md/dm-vdo/block-map.c
drivers/md/dm-vdo/slab-depot.c
drivers/md/dm-vdo/vio.c
drivers/md/dm-vdo/vio.h

index 89cb7942ec5cc92b8bc5a06b209f26f6a69d7445..bc836f95f8b51f0884845e127cf81c022a540104 100644 (file)
@@ -1544,7 +1544,7 @@ static void write_page_if_not_dirtied(struct vdo_waiter *waiter, void *context)
 
 static void return_to_pool(struct block_map_zone *zone, struct pooled_vio *vio)
 {
-       return_vio_to_pool(zone->vio_pool, vio);
+       return_vio_to_pool(vio);
        check_for_drain_complete(zone);
 }
 
@@ -1837,7 +1837,7 @@ static void finish_block_map_page_load(struct vdo_completion *completion)
 
        if (!vdo_copy_valid_page(vio->data, nonce, pbn, page))
                vdo_format_block_map_page(page, nonce, pbn, false);
-       return_vio_to_pool(zone->vio_pool, pooled);
+       return_vio_to_pool(pooled);
 
        /* Release our claim to the load and wake any waiters */
        release_page_lock(data_vio, "load");
@@ -1851,10 +1851,9 @@ static void handle_io_error(struct vdo_completion *completion)
        struct vio *vio = as_vio(completion);
        struct pooled_vio *pooled = container_of(vio, struct pooled_vio, vio);
        struct data_vio *data_vio = completion->parent;
-       struct block_map_zone *zone = pooled->context;
 
        vio_record_metadata_io_error(vio);
-       return_vio_to_pool(zone->vio_pool, pooled);
+       return_vio_to_pool(pooled);
        abort_load(data_vio, result);
 }
 
@@ -2499,7 +2498,7 @@ static void finish_cursor(struct cursor *cursor)
        struct cursors *cursors = cursor->parent;
        struct vdo_completion *completion = cursors->completion;
 
-       return_vio_to_pool(cursors->pool, vdo_forget(cursor->vio));
+       return_vio_to_pool(vdo_forget(cursor->vio));
        if (--cursors->active_roots > 0)
                return;
 
index 8f0a35c63af686df306880d92a4ae3a6b03578e9..7249b281f99f8f06de5b8337fd3281f02c4a74f9 100644 (file)
@@ -414,8 +414,7 @@ static void complete_reaping(struct vdo_completion *completion)
 {
        struct slab_journal *journal = completion->parent;
 
-       return_vio_to_pool(journal->slab->allocator->vio_pool,
-                          vio_as_pooled_vio(as_vio(vdo_forget(completion))));
+       return_vio_to_pool(vio_as_pooled_vio(as_vio(completion)));
        finish_reaping(journal);
        reap_slab_journal(journal);
 }
@@ -698,7 +697,7 @@ static void complete_write(struct vdo_completion *completion)
        sequence_number_t committed = get_committing_sequence_number(pooled);
 
        list_del_init(&pooled->list_entry);
-       return_vio_to_pool(journal->slab->allocator->vio_pool, vdo_forget(pooled));
+       return_vio_to_pool(pooled);
 
        if (result != VDO_SUCCESS) {
                vio_record_metadata_io_error(as_vio(completion));
@@ -1076,7 +1075,7 @@ static void finish_reference_block_write(struct vdo_completion *completion)
        /* Release the slab journal lock. */
        adjust_slab_journal_block_reference(&slab->journal,
                                            block->slab_journal_lock_to_release, -1);
-       return_vio_to_pool(slab->allocator->vio_pool, pooled);
+       return_vio_to_pool(pooled);
 
        /*
         * We can't clear the is_writing flag earlier as releasing the slab journal lock may cause
@@ -1170,7 +1169,7 @@ static void handle_io_error(struct vdo_completion *completion)
        struct vdo_slab *slab = ((struct reference_block *) completion->parent)->slab;
 
        vio_record_metadata_io_error(vio);
-       return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio));
+       return_vio_to_pool(vio_as_pooled_vio(vio));
        slab->active_count--;
        vdo_enter_read_only_mode(slab->allocator->depot->vdo, result);
        check_if_slab_drained(slab);
@@ -2242,7 +2241,7 @@ static void finish_reference_block_load(struct vdo_completion *completion)
        struct vdo_slab *slab = block->slab;
 
        unpack_reference_block((struct packed_reference_block *) vio->data, block);
-       return_vio_to_pool(slab->allocator->vio_pool, pooled);
+       return_vio_to_pool(pooled);
        slab->active_count--;
        clear_provisional_references(block);
 
@@ -2429,7 +2428,7 @@ static void finish_loading_journal(struct vdo_completion *completion)
                initialize_journal_state(journal);
        }
 
-       return_vio_to_pool(slab->allocator->vio_pool, vio_as_pooled_vio(vio));
+       return_vio_to_pool(vio_as_pooled_vio(vio));
        vdo_finish_loading_with_result(&slab->state, allocate_counters_if_clean(slab));
 }
 
@@ -2449,7 +2448,7 @@ static void handle_load_error(struct vdo_completion *completion)
        struct vio *vio = as_vio(completion);
 
        vio_record_metadata_io_error(vio);
-       return_vio_to_pool(journal->slab->allocator->vio_pool, vio_as_pooled_vio(vio));
+       return_vio_to_pool(vio_as_pooled_vio(vio));
        vdo_finish_loading_with_result(&journal->slab->state, result);
 }
 
index e710f3c5a972dd763383f8a3819aca336caf1628..4d96989a716d8b0b532489f22c36d81297396d3c 100644 (file)
@@ -345,6 +345,7 @@ int make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t thread_id,
                }
 
                pooled->context = context;
+               pooled->pool = pool;
                list_add_tail(&pooled->pool_entry, &pool->available);
        }
 
@@ -419,12 +420,13 @@ void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter)
 }
 
 /**
- * return_vio_to_pool() - Return a vio to the pool
- * @pool: The vio pool.
+ * return_vio_to_pool() - Return a vio to its pool
  * @vio: The pooled vio to return.
  */
-void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio)
+void return_vio_to_pool(struct pooled_vio *vio)
 {
+       struct vio_pool *pool = vio->pool;
+
        VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
                            "vio pool entry returned on same thread as it was acquired");
 
index 3490e9f59b04aa5a5d17093a2e5fd0593b979d37..2e3f878e2074d637988dce0d1fa51613cfc407cc 100644 (file)
@@ -30,6 +30,8 @@ struct pooled_vio {
        void *context;
        /* The list entry used by the pool */
        struct list_head pool_entry;
+       /* The pool this vio is allocated from */
+       struct vio_pool *pool;
 };
 
 /**
@@ -194,6 +196,6 @@ int __must_check make_vio_pool(struct vdo *vdo, size_t pool_size, thread_id_t th
 void free_vio_pool(struct vio_pool *pool);
 bool __must_check is_vio_pool_busy(struct vio_pool *pool);
 void acquire_vio_from_pool(struct vio_pool *pool, struct vdo_waiter *waiter);
-void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio);
+void return_vio_to_pool(struct pooled_vio *vio);
 
 #endif /* VIO_H */