void __page_pool_put_page(struct page_pool *pool,
                          struct page *page, bool allow_direct);
 
-static inline void page_pool_put_page(struct page_pool *pool, struct page *page)
+static inline void page_pool_put_page(struct page_pool *pool,
+                                     struct page *page, bool allow_direct)
 {
        /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
         * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
         */
 #ifdef CONFIG_PAGE_POOL
-       __page_pool_put_page(pool, page, false);
+       __page_pool_put_page(pool, page, allow_direct);
 #endif
 }
 /* Very limited use-cases allow recycle direct */
 
 }
 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
 
-static void xdp_return(void *data, struct xdp_mem_info *mem)
+/* XDP RX runs under NAPI protection, and in different delivery error
+ * scenarios (e.g. queue full), it is possible to return the xdp_frame
+ * while still leveraging this protection.  The @napi_direct boolian
+ * is used for those calls sites.  Thus, allowing for faster recycling
+ * of xdp_frames/pages in those cases.
+ */
+static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
 {
        struct xdp_mem_allocator *xa;
        struct page *page;
                xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
                page = virt_to_head_page(data);
                if (xa)
-                       page_pool_put_page(xa->page_pool, page);
+                       page_pool_put_page(xa->page_pool, page, napi_direct);
                else
                        put_page(page);
                rcu_read_unlock();
 
 void xdp_return_frame(struct xdp_frame *xdpf)
 {
-       xdp_return(xdpf->data, &xdpf->mem);
+       __xdp_return(xdpf->data, &xdpf->mem, false);
 }
 EXPORT_SYMBOL_GPL(xdp_return_frame);
 
+void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
+{
+       __xdp_return(xdpf->data, &xdpf->mem, true);
+}
+EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
+
 void xdp_return_buff(struct xdp_buff *xdp)
 {
-       xdp_return(xdp->data, &xdp->rxq->mem);
+       __xdp_return(xdp->data, &xdp->rxq->mem, true);
 }
 EXPORT_SYMBOL_GPL(xdp_return_buff);