static inline bool
z_erofs_pagevec_ctor_enqueue(struct z_erofs_pagevec_ctor *ctor,
struct page *page,
- enum z_erofs_page_type type)
+ enum z_erofs_page_type type,
+ bool pvec_safereuse)
{
- if (unlikely(ctor->next == NULL && type))
- if (ctor->index + 1 == ctor->nr)
+ if (!ctor->next) {
+ /* some pages cannot be reused as pvec safely without I/O */
+ if (type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && !pvec_safereuse)
+ type = Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED;
+
+ if (type != Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
+ ctor->index + 1 == ctor->nr)
return false;
+ }
if (unlikely(ctor->index >= ctor->nr))
z_erofs_pagevec_ctor_pagedown(ctor, false);
}
/* callers must be with work->lock held */
-static int z_erofs_vle_work_add_page(
- struct z_erofs_vle_work_builder *builder,
- struct page *page,
- enum z_erofs_page_type type)
+static int z_erofs_vle_work_add_page(struct z_erofs_vle_work_builder *builder,
+ struct page *page,
+ enum z_erofs_page_type type,
+ bool pvec_safereuse)
{
int ret;
try_to_reuse_as_compressed_page(builder, page))
return 0;
- ret = z_erofs_pagevec_ctor_enqueue(&builder->vector, page, type);
+ ret = z_erofs_pagevec_ctor_enqueue(&builder->vector, page, type,
+ pvec_safereuse);
builder->work->vcnt += (unsigned)ret;
-
return ret ? 0 : -EAGAIN;
}
tight &= builder_is_followed(builder);
retry:
- err = z_erofs_vle_work_add_page(builder, page, page_type);
+ err = z_erofs_vle_work_add_page(builder, page, page_type,
+ builder_is_followed(builder));
/* should allocate an additional staging page for pagevec */
if (err == -EAGAIN) {
struct page *const newpage =
__stagingpage_alloc(page_pool, GFP_NOFS);
err = z_erofs_vle_work_add_page(builder,
- newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
+ newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE, true);
if (likely(!err))
goto retry;
}