spi: Fix cache corruption due to DMA/PIO overlap

Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2071848
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2122415

commit 0c17ba73c08ff2690c1eff8df374b6709eed55ce
Author: Vincent Whitchurch <vincent.whitchurch@axis.com>
Date: Tue, 27 Sep 2022 13:21:15 +0200

    The SPI core DMA mapping support performs cache management once for the
    entire message and not between transfers, and this leads to cache
    corruption if a message has two or more RX transfers with both
    transfers targeting the same cache line, and the controller driver
    decides to handle one using DMA and the other using PIO (for example,
    because one is much larger than the other).

    Fix it by syncing before/after the actual transfers.  This also means
    that we can skip the sync during the map/unmap of the message.

    Fixes: 99adef310f ("spi: Provide core support for DMA mapping transfers")
    Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
    Link: https://lore.kernel.org/r/20220927112117.77599-3-vincent.whitchurch@axis.com
    Signed-off-by: Mark Brown <broonie@kernel.org>

Signed-off-by: Mark Salter <msalter@redhat.com>
This commit is contained in:
Mark Salter 2023-01-25 12:32:02 -05:00
parent d6907984f2
commit ade96425c7
1 changed files with 89 additions and 22 deletions

View File

@ -1017,9 +1017,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
}
#ifdef CONFIG_HAS_DMA
int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, void *buf, size_t len,
enum dma_data_direction dir)
static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, void *buf, size_t len,
enum dma_data_direction dir, unsigned long attrs)
{
const bool vmalloced_buf = is_vmalloc_addr(buf);
unsigned int max_seg_size = dma_get_max_seg_size(dev);
@ -1085,26 +1085,37 @@ int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
sg = sg_next(sg);
}
ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
if (!ret)
ret = -ENOMEM;
ret = dma_map_sgtable(dev, sgt, dir, attrs);
if (ret < 0) {
sg_free_table(sgt);
return ret;
}
sgt->nents = ret;
return 0;
}
int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, void *buf, size_t len,
enum dma_data_direction dir)
{
return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
}
static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir,
unsigned long attrs)
{
if (sgt->orig_nents) {
dma_unmap_sgtable(dev, sgt, dir, attrs);
sg_free_table(sgt);
}
}
void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
struct sg_table *sgt, enum dma_data_direction dir)
{
if (sgt->orig_nents) {
dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
sg_free_table(sgt);
}
spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
}
static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
@ -1131,24 +1142,30 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
rx_dev = ctlr->dev.parent;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
/* The sync is done before each transfer. */
unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
continue;
if (xfer->tx_buf != NULL) {
ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
(void *)xfer->tx_buf, xfer->len,
DMA_TO_DEVICE);
ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
(void *)xfer->tx_buf,
xfer->len, DMA_TO_DEVICE,
attrs);
if (ret != 0)
return ret;
}
if (xfer->rx_buf != NULL) {
ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
xfer->rx_buf, xfer->len,
DMA_FROM_DEVICE);
ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
xfer->rx_buf, xfer->len,
DMA_FROM_DEVICE, attrs);
if (ret != 0) {
spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
DMA_TO_DEVICE);
spi_unmap_buf_attrs(ctlr, tx_dev,
&xfer->tx_sg, DMA_TO_DEVICE,
attrs);
return ret;
}
}
@ -1171,17 +1188,52 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
return 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
/* The sync has already been done after each transfer. */
unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
continue;
spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
DMA_FROM_DEVICE, attrs);
spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
DMA_TO_DEVICE, attrs);
}
ctlr->cur_msg_mapped = false;
return 0;
}
static void spi_dma_sync_for_device(struct spi_controller *ctlr,
struct spi_transfer *xfer)
{
struct device *rx_dev = ctlr->cur_rx_dma_dev;
struct device *tx_dev = ctlr->cur_tx_dma_dev;
if (!ctlr->cur_msg_mapped)
return;
if (xfer->tx_sg.orig_nents)
dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
if (xfer->rx_sg.orig_nents)
dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
}
static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
struct spi_transfer *xfer)
{
struct device *rx_dev = ctlr->cur_rx_dma_dev;
struct device *tx_dev = ctlr->cur_tx_dma_dev;
if (!ctlr->cur_msg_mapped)
return;
if (xfer->rx_sg.orig_nents)
dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
if (xfer->tx_sg.orig_nents)
dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
}
#else /* !CONFIG_HAS_DMA */
static inline int __spi_map_msg(struct spi_controller *ctlr,
struct spi_message *msg)
@ -1194,6 +1246,16 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr,
{
return 0;
}
static void spi_dma_sync_for_device(struct spi_controller *ctrl,
struct spi_transfer *xfer)
{
}
static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
struct spi_transfer *xfer)
{
}
#endif /* !CONFIG_HAS_DMA */
static inline int spi_unmap_msg(struct spi_controller *ctlr,
@ -1452,8 +1514,11 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
reinit_completion(&ctlr->xfer_completion);
fallback_pio:
spi_dma_sync_for_device(ctlr, xfer);
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
if (ret < 0) {
spi_dma_sync_for_cpu(ctlr, xfer);
if (ctlr->cur_msg_mapped &&
(xfer->error & SPI_TRANS_FAIL_NO_START)) {
__spi_unmap_msg(ctlr, msg);
@ -1476,6 +1541,8 @@ fallback_pio:
if (ret < 0)
msg->status = ret;
}
spi_dma_sync_for_cpu(ctlr, xfer);
} else {
if (xfer->len)
dev_err(&msg->spi->dev,