xsk: Fix race condition in AF_XDP generic RX path
[ Upstream commit a1356ac7749cafc4e27aa62c0c4604b5dca4983e ]
Move rx_lock from xsk_socket to xsk_buff_pool.
Fix synchronization for shared umem mode in
generic RX path where multiple sockets share
single xsk_buff_pool.
RX queue is exclusive to xsk_socket, while FILL
queue can be shared between multiple sockets.
This could result in race condition where two
CPU cores access RX path of two different sockets
sharing the same umem.
Protect both queues by acquiring spinlock in shared
xsk_buff_pool.
Lock contention may be minimized in the future by some
per-thread FQ buffering.
It's safe and necessary to move spin_lock_bh(rx_lock)
after xsk_rcv_check():
* xs->pool and spinlock_init is synchronized by
xsk_bind() -> xsk_is_bound() memory barriers.
* xsk_rcv_check() may return true at the moment
of xsk_release() or xsk_unbind_dev(),
however this will not cause any data races or
race conditions. xsk_unbind_dev() removes xdp
socket from all maps and waits for completion
of all outstanding rx operations. Packets in
RX path will either complete safely or drop.
Signed-off-by: Eryk Kubanski <e.kubanski@partner.samsung.com>
Fixes: bf0bdd1343 ("xdp: fix race on generic receive path")
Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
Link: https://patch.msgid.link/20250416101908.10919-1-e.kubanski@partner.samsung.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
5cb9e07f84
commit
65d3c57061
@@ -71,9 +71,6 @@ struct xdp_sock {
|
||||
*/
|
||||
u32 tx_budget_spent;
|
||||
|
||||
/* Protects generic receive. */
|
||||
spinlock_t rx_lock;
|
||||
|
||||
/* Statistics */
|
||||
u64 rx_dropped;
|
||||
u64 rx_queue_full;
|
||||
|
||||
@@ -55,6 +55,8 @@ struct xsk_buff_pool {
|
||||
refcount_t users;
|
||||
struct xdp_umem *umem;
|
||||
struct work_struct work;
|
||||
/* Protects generic receive in shared and non-shared umem mode. */
|
||||
spinlock_t rx_lock;
|
||||
struct list_head free_list;
|
||||
struct list_head xskb_list;
|
||||
u32 heads_cnt;
|
||||
|
||||
@@ -338,13 +338,14 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
|
||||
u32 len = xdp_get_buff_len(xdp);
|
||||
int err;
|
||||
|
||||
spin_lock_bh(&xs->rx_lock);
|
||||
err = xsk_rcv_check(xs, xdp, len);
|
||||
if (!err) {
|
||||
spin_lock_bh(&xs->pool->rx_lock);
|
||||
err = __xsk_rcv(xs, xdp, len);
|
||||
xsk_flush(xs);
|
||||
spin_unlock_bh(&xs->pool->rx_lock);
|
||||
}
|
||||
spin_unlock_bh(&xs->rx_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1720,7 +1721,6 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
|
||||
xs = xdp_sk(sk);
|
||||
xs->state = XSK_READY;
|
||||
mutex_init(&xs->mutex);
|
||||
spin_lock_init(&xs->rx_lock);
|
||||
|
||||
INIT_LIST_HEAD(&xs->map_list);
|
||||
spin_lock_init(&xs->map_list_lock);
|
||||
|
||||
@@ -87,6 +87,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
|
||||
pool->addrs = umem->addrs;
|
||||
pool->tx_metadata_len = umem->tx_metadata_len;
|
||||
pool->tx_sw_csum = umem->flags & XDP_UMEM_TX_SW_CSUM;
|
||||
spin_lock_init(&pool->rx_lock);
|
||||
INIT_LIST_HEAD(&pool->free_list);
|
||||
INIT_LIST_HEAD(&pool->xskb_list);
|
||||
INIT_LIST_HEAD(&pool->xsk_tx_list);
|
||||
|
||||
Reference in New Issue
Block a user