folio_put(folio);
}
+/*
+ * Check if the hstate represents gigantic pages but gigantic page
+ * runtime support is not available. This is a common condition used to
+ * skip operations that cannot be performed on gigantic pages when runtime
+ * support is disabled.
+ */
+static inline bool hstate_is_gigantic_no_runtime(struct hstate *h)
+{
+ return hstate_is_gigantic(h) && !gigantic_page_runtime_supported();
+}
+
static inline bool subpool_is_free(struct hugepage_subpool *spool)
{
if (spool->count)
VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
lockdep_assert_held(&hugetlb_lock);
- if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
+ if (hstate_is_gigantic_no_runtime(h))
return;
list_del(&folio->lru);
{
bool clear_flag = folio_test_hugetlb_vmemmap_optimized(folio);
- if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
+ if (hstate_is_gigantic_no_runtime(h))
return;
/*
/* Uncommit the reservation */
h->resv_huge_pages -= unused_resv_pages;
- if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
+ if (hstate_is_gigantic_no_runtime(h))
goto out;
/*
* - If CMA allocation is possible, we can not demote
* HUGETLB_PAGE_ORDER or smaller size pages.
*/
- if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
+ if (hstate_is_gigantic_no_runtime(h))
continue;
if (hugetlb_cma_total_size() && h->order <= HUGETLB_PAGE_ORDER)
continue;
int err;
nodemask_t nodes_allowed, *n_mask;
- if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
+ if (hstate_is_gigantic_no_runtime(h))
return -EINVAL;
if (nid == NUMA_NO_NODE) {