return cluster_index(si, ci) * SWAPFILE_CLUSTER;
}
+static struct swap_table *swap_table_alloc(gfp_t gfp)
+{
+ struct folio *folio;
+
+ if (!SWP_TABLE_USE_PAGE)
+ return kmem_cache_zalloc(swap_table_cachep, gfp);
+
+ folio = folio_alloc(gfp | __GFP_ZERO, 0);
+ if (folio)
+ return folio_address(folio);
+ return NULL;
+}
+
+static void swap_table_free_folio_rcu_cb(struct rcu_head *head)
+{
+ struct folio *folio;
+
+ folio = page_folio(container_of(head, struct page, rcu_head));
+ folio_put(folio);
+}
+
+static void swap_table_free(struct swap_table *table)
+{
+ if (!SWP_TABLE_USE_PAGE) {
+ kmem_cache_free(swap_table_cachep, table);
+ return;
+ }
+
+ call_rcu(&(folio_page(virt_to_folio(table), 0)->rcu_head),
+ swap_table_free_folio_rcu_cb);
+}
+
static void swap_cluster_free_table(struct swap_cluster_info *ci)
{
unsigned int ci_off;
table = (void *)rcu_dereference_protected(ci->table, true);
rcu_assign_pointer(ci->table, NULL);
- kmem_cache_free(swap_table_cachep, table);
+ swap_table_free(table);
}
/*
lockdep_assert_held(&ci->lock);
lockdep_assert_held(&this_cpu_ptr(&percpu_swap_cluster)->lock);
- table = kmem_cache_zalloc(swap_table_cachep,
- __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ table = swap_table_alloc(__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
if (table) {
rcu_assign_pointer(ci->table, table);
return ci;
if (!(si->flags & SWP_SOLIDSTATE))
spin_unlock(&si->global_cluster_lock);
local_unlock(&percpu_swap_cluster.lock);
- table = kmem_cache_zalloc(swap_table_cachep, __GFP_HIGH | GFP_KERNEL);
+ table = swap_table_alloc(__GFP_HIGH | GFP_KERNEL);
local_lock(&percpu_swap_cluster.lock);
if (!(si->flags & SWP_SOLIDSTATE))
free_table:
if (table)
- kmem_cache_free(swap_table_cachep, table);
+ swap_table_free(table);
return ci;
}
ci = cluster_info + idx;
if (!ci->table) {
- table = kmem_cache_zalloc(swap_table_cachep, GFP_KERNEL);
+ table = swap_table_alloc(GFP_KERNEL);
if (!table)
return -ENOMEM;
rcu_assign_pointer(ci->table, table);
* only, and all swap cache readers (swap_cache_*) verifies
* the content before use. So it's safe to use RCU slab here.
*/
- swap_table_cachep = kmem_cache_create("swap_table",
- sizeof(struct swap_table),
- 0, SLAB_PANIC | SLAB_TYPESAFE_BY_RCU, NULL);
+ if (!SWP_TABLE_USE_PAGE)
+ swap_table_cachep = kmem_cache_create("swap_table",
+ sizeof(struct swap_table),
+ 0, SLAB_PANIC | SLAB_TYPESAFE_BY_RCU, NULL);
#ifdef CONFIG_MIGRATION
if (swapfile_maximum_size >= (1UL << SWP_MIG_TOTAL_BITS))