diff --git a/net/core/dev.h b/net/core/dev.h index 764e0097ccf2..2e3bb7669984 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -148,18 +148,6 @@ void xdp_do_check_flushed(struct napi_struct *napi); static inline void xdp_do_check_flushed(struct napi_struct *napi) { } #endif -/* Best effort check that NAPI is not idle (can't be scheduled to run) */ -static inline void napi_assert_will_not_race(const struct napi_struct *napi) -{ - /* uninitialized instance, can't race */ - if (!napi->poll_list.next) - return; - - /* SCHED bit is set on disabled instances */ - WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state)); - WARN_ON(READ_ONCE(napi->list_owner) != -1); -} - void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu); #define XMIT_RECURSION_LIMIT 8 diff --git a/net/core/page_pool.c b/net/core/page_pool.c index c8ce069605c4..7b20f6fcb82c 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -25,7 +25,6 @@ #include -#include "dev.h" #include "mp_dmabuf_devmem.h" #include "netmem_priv.h" #include "page_pool_priv.h" @@ -1109,7 +1108,11 @@ void page_pool_disable_direct_recycling(struct page_pool *pool) if (!pool->p.napi) return; - napi_assert_will_not_race(pool->p.napi); + /* To avoid races with recycling and additional barriers make sure + * pool and NAPI are unlinked when NAPI is disabled. + */ + WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state)); + WARN_ON(READ_ONCE(pool->p.napi->list_owner) != -1); WRITE_ONCE(pool->p.napi, NULL); }