net: mana: Switch to page pool for jumbo frames
commit fa37a8849634db2dd3545116873da8cf4b1e67c6 upstream.
Frag allocators, such as netdev_alloc_frag(), were not designed to
work for fragsz > PAGE_SIZE.
So, switch to page pool for jumbo frames instead of using page frag
allocators. This driver is using page pool for smaller MTUs already.
Cc: stable@vger.kernel.org
Fixes: 80f6215b45 ("net: mana: Add support for jumbo frame")
Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
Reviewed-by: Long Li <longli@microsoft.com>
Reviewed-by: Shradha Gupta <shradhagupta@linux.microsoft.com>
Link: https://patch.msgid.link/1742920357-27263-1-git-send-email-haiyangz@microsoft.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
a2acc67d61
commit
34baf1cfd6
@@ -636,30 +636,16 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
|
||||
mpc->rxbpre_total = 0;
|
||||
|
||||
for (i = 0; i < num_rxb; i++) {
|
||||
if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
|
||||
va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
|
||||
if (!va)
|
||||
goto error;
|
||||
page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
|
||||
if (!page)
|
||||
goto error;
|
||||
|
||||
page = virt_to_head_page(va);
|
||||
/* Check if the frag falls back to single page */
|
||||
if (compound_order(page) <
|
||||
get_order(mpc->rxbpre_alloc_size)) {
|
||||
put_page(page);
|
||||
goto error;
|
||||
}
|
||||
} else {
|
||||
page = dev_alloc_page();
|
||||
if (!page)
|
||||
goto error;
|
||||
|
||||
va = page_to_virt(page);
|
||||
}
|
||||
va = page_to_virt(page);
|
||||
|
||||
da = dma_map_single(dev, va + mpc->rxbpre_headroom,
|
||||
mpc->rxbpre_datasize, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(dev, da)) {
|
||||
put_page(virt_to_head_page(va));
|
||||
put_page(page);
|
||||
goto error;
|
||||
}
|
||||
|
||||
@@ -1618,7 +1604,7 @@ drop:
|
||||
}
|
||||
|
||||
static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
|
||||
dma_addr_t *da, bool *from_pool, bool is_napi)
|
||||
dma_addr_t *da, bool *from_pool)
|
||||
{
|
||||
struct page *page;
|
||||
void *va;
|
||||
@@ -1629,21 +1615,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
|
||||
if (rxq->xdp_save_va) {
|
||||
va = rxq->xdp_save_va;
|
||||
rxq->xdp_save_va = NULL;
|
||||
} else if (rxq->alloc_size > PAGE_SIZE) {
|
||||
if (is_napi)
|
||||
va = napi_alloc_frag(rxq->alloc_size);
|
||||
else
|
||||
va = netdev_alloc_frag(rxq->alloc_size);
|
||||
|
||||
if (!va)
|
||||
return NULL;
|
||||
|
||||
page = virt_to_head_page(va);
|
||||
/* Check if the frag falls back to single page */
|
||||
if (compound_order(page) < get_order(rxq->alloc_size)) {
|
||||
put_page(page);
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
page = page_pool_dev_alloc_pages(rxq->page_pool);
|
||||
if (!page)
|
||||
@@ -1676,7 +1647,7 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
|
||||
dma_addr_t da;
|
||||
void *va;
|
||||
|
||||
va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
|
||||
va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
|
||||
if (!va)
|
||||
return;
|
||||
|
||||
@@ -2083,7 +2054,7 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
|
||||
if (mpc->rxbufs_pre)
|
||||
va = mana_get_rxbuf_pre(rxq, &da);
|
||||
else
|
||||
va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
|
||||
va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
|
||||
|
||||
if (!va)
|
||||
return -ENOMEM;
|
||||
@@ -2169,6 +2140,7 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
|
||||
pprm.nid = gc->numa_node;
|
||||
pprm.napi = &rxq->rx_cq.napi;
|
||||
pprm.netdev = rxq->ndev;
|
||||
pprm.order = get_order(rxq->alloc_size);
|
||||
|
||||
rxq->page_pool = page_pool_create(&pprm);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user