struct xdp_mem_info;
 
 #ifdef CONFIG_PAGE_POOL
+void page_pool_unlink_napi(struct page_pool *pool);
 void page_pool_destroy(struct page_pool *pool);
 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
                           struct xdp_mem_info *mem);
 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
                             int count);
 #else
+static inline void page_pool_unlink_napi(struct page_pool *pool)
+{
+}
+
 static inline void page_pool_destroy(struct page_pool *pool)
 {
 }
 
        pool->xdp_mem_id = mem->id;
 }
 
+void page_pool_unlink_napi(struct page_pool *pool)
+{
+       if (!pool->p.napi)
+               return;
+
+       /* To avoid races with recycling and additional barriers make sure
+        * pool and NAPI are unlinked when NAPI is disabled.
+        */
+       WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state) ||
+               READ_ONCE(pool->p.napi->list_owner) != -1);
+
+       WRITE_ONCE(pool->p.napi, NULL);
+}
+EXPORT_SYMBOL(page_pool_unlink_napi);
+
 void page_pool_destroy(struct page_pool *pool)
 {
        if (!pool)
        if (!page_pool_put(pool))
                return;
 
+       page_pool_unlink_napi(pool);
        page_pool_free_frag(pool);
 
        if (!page_pool_release(pool))
         * in the same context as the consumer would run, so there's
         * no possible race.
         */
-       napi = pp->p.napi;
+       napi = READ_ONCE(pp->p.napi);
        allow_direct = napi_safe && napi &&
                READ_ONCE(napi->list_owner) == smp_processor_id();