cxgb3: fix dma mapping regression
Commit 5e68b772e6
cxgb3: map entire Rx page, feed map+offset to Rx ring.
introduced a regression on platforms defining DECLARE_PCI_UNMAP_ADDR()
and related macros as no-ops.
Rx descriptors are fed with the a page buffer bus address + page chunk offset.
The page buffer bus address is set and retrieved through
pci_unamp_addr_set(), pci_unmap_addr().
These functions being meaningless on x86 (if CONFIG_DMA_API_DEBUG is not set).
The HW ends up with a bogus bus address.
This patch saves the page buffer bus address for all plaftorms.
Signed-off-by: Divy Le Ray <divy@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
4d3383d0ad
commit
10b6d95612
2 changed files with 7 additions and 8 deletions
|
@ -85,8 +85,8 @@ struct fl_pg_chunk {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
void *va;
|
void *va;
|
||||||
unsigned int offset;
|
unsigned int offset;
|
||||||
u64 *p_cnt;
|
unsigned long *p_cnt;
|
||||||
DECLARE_PCI_UNMAP_ADDR(mapping);
|
dma_addr_t mapping;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct rx_desc;
|
struct rx_desc;
|
||||||
|
|
|
@ -355,7 +355,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
|
||||||
(*d->pg_chunk.p_cnt)--;
|
(*d->pg_chunk.p_cnt)--;
|
||||||
if (!*d->pg_chunk.p_cnt)
|
if (!*d->pg_chunk.p_cnt)
|
||||||
pci_unmap_page(pdev,
|
pci_unmap_page(pdev,
|
||||||
pci_unmap_addr(&d->pg_chunk, mapping),
|
d->pg_chunk.mapping,
|
||||||
q->alloc_size, PCI_DMA_FROMDEVICE);
|
q->alloc_size, PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
put_page(d->pg_chunk.page);
|
put_page(d->pg_chunk.page);
|
||||||
|
@ -454,7 +454,7 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
|
||||||
q->pg_chunk.offset = 0;
|
q->pg_chunk.offset = 0;
|
||||||
mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
|
mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
|
||||||
0, q->alloc_size, PCI_DMA_FROMDEVICE);
|
0, q->alloc_size, PCI_DMA_FROMDEVICE);
|
||||||
pci_unmap_addr_set(&q->pg_chunk, mapping, mapping);
|
q->pg_chunk.mapping = mapping;
|
||||||
}
|
}
|
||||||
sd->pg_chunk = q->pg_chunk;
|
sd->pg_chunk = q->pg_chunk;
|
||||||
|
|
||||||
|
@ -511,8 +511,7 @@ static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
|
||||||
nomem: q->alloc_failed++;
|
nomem: q->alloc_failed++;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
mapping = pci_unmap_addr(&sd->pg_chunk, mapping) +
|
mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
|
||||||
sd->pg_chunk.offset;
|
|
||||||
pci_unmap_addr_set(sd, dma_addr, mapping);
|
pci_unmap_addr_set(sd, dma_addr, mapping);
|
||||||
|
|
||||||
add_one_rx_chunk(mapping, d, q->gen);
|
add_one_rx_chunk(mapping, d, q->gen);
|
||||||
|
@ -881,7 +880,7 @@ recycle:
|
||||||
(*sd->pg_chunk.p_cnt)--;
|
(*sd->pg_chunk.p_cnt)--;
|
||||||
if (!*sd->pg_chunk.p_cnt)
|
if (!*sd->pg_chunk.p_cnt)
|
||||||
pci_unmap_page(adap->pdev,
|
pci_unmap_page(adap->pdev,
|
||||||
pci_unmap_addr(&sd->pg_chunk, mapping),
|
sd->pg_chunk.mapping,
|
||||||
fl->alloc_size,
|
fl->alloc_size,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
if (!skb) {
|
if (!skb) {
|
||||||
|
@ -2096,7 +2095,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
|
||||||
(*sd->pg_chunk.p_cnt)--;
|
(*sd->pg_chunk.p_cnt)--;
|
||||||
if (!*sd->pg_chunk.p_cnt)
|
if (!*sd->pg_chunk.p_cnt)
|
||||||
pci_unmap_page(adap->pdev,
|
pci_unmap_page(adap->pdev,
|
||||||
pci_unmap_addr(&sd->pg_chunk, mapping),
|
sd->pg_chunk.mapping,
|
||||||
fl->alloc_size,
|
fl->alloc_size,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue