Skip to content

Commit

Permalink
mlxsw: pci: Sync Rx buffers for CPU
Browse files Browse the repository at this point in the history
When Rx packet is received, drivers should sync the pages for CPU, to
ensure the CPU reads the data written by the device and not stale
data from its cache.

Add the missing sync call in Rx path, sync the actual length of data for
each fragment.

Cc: Jiri Pirko <[email protected]>
Fixes: b5b60bb ("mlxsw: pci: Use page pool for Rx buffers allocation")
Signed-off-by: Amit Cohen <[email protected]>
Reviewed-by: Ido Schimmel <[email protected]>
Signed-off-by: Petr Machata <[email protected]>
Link: https://patch.msgid.link/461486fac91755ca4e04c2068c102250026dcd0b.1729866134.git.petrm@nvidia.com
Signed-off-by: Jakub Kicinski <[email protected]>
  • Loading branch information
Amit Cohen authored and kuba-moo committed Oct 31, 2024
1 parent 0a66e55 commit 15f73e6
Showing 1 changed file with 15 additions and 7 deletions.
22 changes: 15 additions & 7 deletions drivers/net/ethernet/mellanox/mlxsw/pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -389,27 +389,34 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
}

static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[],
static struct sk_buff *mlxsw_pci_rdq_build_skb(struct mlxsw_pci_queue *q,
struct page *pages[],
u16 byte_count)
{
struct mlxsw_pci_queue *cq = q->u.rdq.cq;
unsigned int linear_data_size;
struct page_pool *page_pool;
struct sk_buff *skb;
int page_index = 0;
bool linear_only;
void *data;

linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE;
linear_data_size = linear_only ? byte_count :
PAGE_SIZE -
MLXSW_PCI_RX_BUF_SW_OVERHEAD;

page_pool = cq->u.cq.page_pool;
page_pool_dma_sync_for_cpu(page_pool, pages[page_index],
MLXSW_PCI_SKB_HEADROOM, linear_data_size);

data = page_address(pages[page_index]);
net_prefetch(data);

skb = napi_build_skb(data, PAGE_SIZE);
if (unlikely(!skb))
return ERR_PTR(-ENOMEM);

linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE;
linear_data_size = linear_only ? byte_count :
PAGE_SIZE -
MLXSW_PCI_RX_BUF_SW_OVERHEAD;

skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM);
skb_put(skb, linear_data_size);

Expand All @@ -425,6 +432,7 @@ static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[],

page = pages[page_index];
frag_size = min(byte_count, PAGE_SIZE);
page_pool_dma_sync_for_cpu(page_pool, page, 0, frag_size);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
page, 0, frag_size, PAGE_SIZE);
byte_count -= frag_size;
Expand Down Expand Up @@ -760,7 +768,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
if (err)
goto out;

skb = mlxsw_pci_rdq_build_skb(pages, byte_count);
skb = mlxsw_pci_rdq_build_skb(q, pages, byte_count);
if (IS_ERR(skb)) {
dev_err_ratelimited(&pdev->dev, "Failed to build skb for RDQ\n");
mlxsw_pci_rdq_pages_recycle(q, pages, num_sg_entries);
Expand Down

0 comments on commit 15f73e6

Please sign in to comment.