Merge: dmaengine: Updates for 9.2
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/1307 # Merge Request Required Information Bugzilla: https://bugzilla.redhat.com/2112028 Testing: Tested ioat driver with dmatest module, legacy support in idxd driver with dmatest module, and scalable mode support in the idxd driver with dsa_user_test_runner.sh. ## Summary of Changes This brings the dma engine subsystem, core and supported drivers, up to date with the 6.0 merge window. Only very minor conflicts, 1 context diff, and then 1 commit dropping bits touching drivers we do not support. Signed-off-by: Jerry Snitselaar <jsnitsel@redhat.com> Approved-by: John W. Linville <linville@redhat.com> Approved-by: David Arcari <darcari@redhat.com> Approved-by: Andrew Halaney <ahalaney@redhat.com> Approved-by: Lyude Paul <lyude@redhat.com> Signed-off-by: Frantisek Hrbata <fhrbata@redhat.com>
This commit is contained in:
commit
d96e57c6c8
|
@ -6,6 +6,16 @@ Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
|||
|
||||
This small document introduces how to test DMA drivers using dmatest module.
|
||||
|
||||
The dmatest module tests DMA memcpy, memset, XOR and RAID6 P+Q operations using
|
||||
various lengths and various offsets into the source and destination buffers. It
|
||||
will initialize both buffers with a repeatable pattern and verify that the DMA
|
||||
engine copies the requested region and nothing more. It will also verify that
|
||||
the bytes aren't swapped around, and that the source buffer isn't modified.
|
||||
|
||||
The dmatest module can be configured to test a specific channel. It can also
|
||||
test multiple channels at the same time, and it can start multiple threads
|
||||
competing for the same channel.
|
||||
|
||||
.. note::
|
||||
The test suite works only on the channels that have at least one
|
||||
capability of the following: DMA_MEMCPY (memory-to-memory), DMA_MEMSET
|
||||
|
@ -143,13 +153,14 @@ Part 5 - Handling channel allocation
|
|||
Allocating Channels
|
||||
-------------------
|
||||
|
||||
Channels are required to be configured prior to starting the test run.
|
||||
Attempting to run the test without configuring the channels will fail.
|
||||
Channels do not need to be configured prior to starting a test run. Attempting
|
||||
to run the test without configuring the channels will result in testing any
|
||||
channels that are available.
|
||||
|
||||
Example::
|
||||
|
||||
% echo 1 > /sys/module/dmatest/parameters/run
|
||||
dmatest: Could not start test, no channels configured
|
||||
dmatest: No channels configured, continue with any
|
||||
|
||||
Channels are registered using the "channel" parameter. Channels can be requested by their
|
||||
name, once requested, the channel is registered and a pending thread is added to the test list.
|
||||
|
|
|
@ -162,6 +162,29 @@ Currently, the types available are:
|
|||
|
||||
- The device is able to do memory to memory copies
|
||||
|
||||
- - DMA_MEMCPY_SG
|
||||
|
||||
- The device supports memory to memory scatter-gather transfers.
|
||||
|
||||
- Even though a plain memcpy can look like a particular case of a
|
||||
scatter-gather transfer, with a single chunk to copy, it's a distinct
|
||||
transaction type in the mem2mem transfer case. This is because some very
|
||||
simple devices might be able to do contiguous single-chunk memory copies,
|
||||
but have no support for more complex SG transfers.
|
||||
|
||||
- No matter what the overall size of the combined chunks for source and
|
||||
destination is, only as many bytes as the smallest of the two will be
|
||||
transmitted. That means the number and size of the scatter-gather buffers in
|
||||
both lists need not be the same, and that the operation functionally is
|
||||
equivalent to a ``strncpy`` where the ``count`` argument equals the smallest
|
||||
total size of the two scatter-gather list buffers.
|
||||
|
||||
- It's usually used for copying pixel data between host memory and
|
||||
memory-mapped GPU device memory, such as found on modern PCI video graphics
|
||||
cards. The most immediate example is the OpenGL API function
|
||||
``glReadPielx()``, which might require a verbatim copy of a huge framebuffer
|
||||
from local device memory onto host memory.
|
||||
|
||||
- DMA_XOR
|
||||
|
||||
- The device is able to perform XOR operations on memory areas
|
||||
|
@ -183,6 +206,12 @@ Currently, the types available are:
|
|||
- The device is able to perform parity check using RAID6 P+Q
|
||||
algorithm against a memory buffer.
|
||||
|
||||
- DMA_MEMSET
|
||||
|
||||
- The device is able to fill memory with the provided pattern
|
||||
|
||||
- The pattern is treated as a single byte signed value.
|
||||
|
||||
- DMA_INTERRUPT
|
||||
|
||||
- The device is able to trigger a dummy transfer that will
|
||||
|
|
|
@ -824,7 +824,7 @@ S: Maintained
|
|||
F: drivers/mailbox/mailbox-altera.c
|
||||
|
||||
ALTERA MSGDMA IP CORE DRIVER
|
||||
M: Olivier Dautricourt <olivier.dautricourt@orolia.com>
|
||||
M: Olivier Dautricourt <olivierdautricourt@gmail.com>
|
||||
R: Stefan Roese <sr@denx.de>
|
||||
L: dmaengine@vger.kernel.org
|
||||
S: Odd Fixes
|
||||
|
@ -9431,7 +9431,8 @@ S: Supported
|
|||
Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
|
||||
F: drivers/dma/ioat*
|
||||
|
||||
INTEL IADX DRIVER
|
||||
INTEL IDXD DRIVER
|
||||
M: Fenghua Yu <fenghua.yu@intel.com>
|
||||
M: Dave Jiang <dave.jiang@intel.com>
|
||||
L: dmaengine@vger.kernel.org
|
||||
S: Supported
|
||||
|
|
|
@ -283,7 +283,7 @@ config INTEL_IDXD_BUS
|
|||
|
||||
config INTEL_IDXD
|
||||
tristate "Intel Data Accelerators support"
|
||||
depends on PCI && X86_64
|
||||
depends on PCI && X86_64 && !UML
|
||||
depends on PCI_MSI
|
||||
depends on PCI_PASID
|
||||
depends on SBITMAP
|
||||
|
@ -337,7 +337,7 @@ config INTEL_IDXD_PERFMON
|
|||
|
||||
config INTEL_IOATDMA
|
||||
tristate "Intel I/OAT DMA support"
|
||||
depends on PCI && X86_64
|
||||
depends on PCI && X86_64 && !UML
|
||||
select DMA_ENGINE
|
||||
select DMA_ENGINE_RAID
|
||||
select DCA
|
||||
|
|
|
@ -695,13 +695,12 @@ static struct dma_chan *find_candidate(struct dma_device *device,
|
|||
*/
|
||||
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
|
||||
{
|
||||
int err = -EBUSY;
|
||||
|
||||
/* lock against __dma_request_channel */
|
||||
mutex_lock(&dma_list_mutex);
|
||||
|
||||
if (chan->client_count == 0) {
|
||||
struct dma_device *device = chan->device;
|
||||
int err;
|
||||
|
||||
dma_cap_set(DMA_PRIVATE, device->cap_mask);
|
||||
device->privatecnt++;
|
||||
|
@ -1054,9 +1053,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
|
|||
* When the chan_id is a negative value, we are dynamically adding
|
||||
* the channel. Otherwise we are static enumerating.
|
||||
*/
|
||||
mutex_lock(&device->chan_mutex);
|
||||
chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
|
||||
mutex_unlock(&device->chan_mutex);
|
||||
if (chan->chan_id < 0) {
|
||||
pr_err("%s: unable to alloc ida for chan: %d\n",
|
||||
__func__, chan->chan_id);
|
||||
|
@ -1079,9 +1076,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
|
|||
return 0;
|
||||
|
||||
err_out_ida:
|
||||
mutex_lock(&device->chan_mutex);
|
||||
ida_free(&device->chan_ida, chan->chan_id);
|
||||
mutex_unlock(&device->chan_mutex);
|
||||
err_free_dev:
|
||||
kfree(chan->dev);
|
||||
err_free_local:
|
||||
|
@ -1114,9 +1109,7 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
|
|||
device->chancnt--;
|
||||
chan->dev->chan = NULL;
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
mutex_lock(&device->chan_mutex);
|
||||
ida_free(&device->chan_ida, chan->chan_id);
|
||||
mutex_unlock(&device->chan_mutex);
|
||||
device_unregister(&chan->dev->device);
|
||||
free_percpu(chan->local);
|
||||
}
|
||||
|
@ -1244,7 +1237,6 @@ int dma_async_device_register(struct dma_device *device)
|
|||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
mutex_init(&device->chan_mutex);
|
||||
ida_init(&device->chan_ida);
|
||||
|
||||
/* represent channels in sysfs. Probably want devs too */
|
||||
|
|
|
@ -176,7 +176,7 @@ dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
|
|||
static inline bool
|
||||
dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
|
||||
{
|
||||
return (cb->callback) ? true : false;
|
||||
return cb->callback || cb->callback_result;
|
||||
}
|
||||
|
||||
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
|
||||
|
|
|
@ -22,51 +22,50 @@
|
|||
#include <linux/wait.h>
|
||||
|
||||
static unsigned int test_buf_size = 16384;
|
||||
module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
|
||||
module_param(test_buf_size, uint, 0644);
|
||||
MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
|
||||
|
||||
static char test_device[32];
|
||||
module_param_string(device, test_device, sizeof(test_device),
|
||||
S_IRUGO | S_IWUSR);
|
||||
module_param_string(device, test_device, sizeof(test_device), 0644);
|
||||
MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
|
||||
|
||||
static unsigned int threads_per_chan = 1;
|
||||
module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
|
||||
module_param(threads_per_chan, uint, 0644);
|
||||
MODULE_PARM_DESC(threads_per_chan,
|
||||
"Number of threads to start per channel (default: 1)");
|
||||
|
||||
static unsigned int max_channels;
|
||||
module_param(max_channels, uint, S_IRUGO | S_IWUSR);
|
||||
module_param(max_channels, uint, 0644);
|
||||
MODULE_PARM_DESC(max_channels,
|
||||
"Maximum number of channels to use (default: all)");
|
||||
|
||||
static unsigned int iterations;
|
||||
module_param(iterations, uint, S_IRUGO | S_IWUSR);
|
||||
module_param(iterations, uint, 0644);
|
||||
MODULE_PARM_DESC(iterations,
|
||||
"Iterations before stopping test (default: infinite)");
|
||||
|
||||
static unsigned int dmatest;
|
||||
module_param(dmatest, uint, S_IRUGO | S_IWUSR);
|
||||
module_param(dmatest, uint, 0644);
|
||||
MODULE_PARM_DESC(dmatest,
|
||||
"dmatest 0-memcpy 1-memset (default: 0)");
|
||||
|
||||
static unsigned int xor_sources = 3;
|
||||
module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
|
||||
module_param(xor_sources, uint, 0644);
|
||||
MODULE_PARM_DESC(xor_sources,
|
||||
"Number of xor source buffers (default: 3)");
|
||||
|
||||
static unsigned int pq_sources = 3;
|
||||
module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
|
||||
module_param(pq_sources, uint, 0644);
|
||||
MODULE_PARM_DESC(pq_sources,
|
||||
"Number of p+q source buffers (default: 3)");
|
||||
|
||||
static int timeout = 3000;
|
||||
module_param(timeout, int, S_IRUGO | S_IWUSR);
|
||||
module_param(timeout, int, 0644);
|
||||
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
|
||||
"Pass -1 for infinite timeout");
|
||||
|
||||
static bool noverify;
|
||||
module_param(noverify, bool, S_IRUGO | S_IWUSR);
|
||||
module_param(noverify, bool, 0644);
|
||||
MODULE_PARM_DESC(noverify, "Disable data verification (default: verify)");
|
||||
|
||||
static bool norandom;
|
||||
|
@ -74,7 +73,7 @@ module_param(norandom, bool, 0644);
|
|||
MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
|
||||
|
||||
static bool verbose;
|
||||
module_param(verbose, bool, S_IRUGO | S_IWUSR);
|
||||
module_param(verbose, bool, 0644);
|
||||
MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
|
||||
|
||||
static int alignment = -1;
|
||||
|
@ -86,7 +85,7 @@ module_param(transfer_size, uint, 0644);
|
|||
MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))");
|
||||
|
||||
static bool polled;
|
||||
module_param(polled, bool, S_IRUGO | S_IWUSR);
|
||||
module_param(polled, bool, 0644);
|
||||
MODULE_PARM_DESC(polled, "Use polling for completion instead of interrupts");
|
||||
|
||||
/**
|
||||
|
@ -154,7 +153,7 @@ static const struct kernel_param_ops run_ops = {
|
|||
.get = dmatest_run_get,
|
||||
};
|
||||
static bool dmatest_run;
|
||||
module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
|
||||
module_param_cb(run, &run_ops, &dmatest_run, 0644);
|
||||
MODULE_PARM_DESC(run, "Run the test (default: false)");
|
||||
|
||||
static int dmatest_chan_set(const char *val, const struct kernel_param *kp);
|
||||
|
@ -290,7 +289,7 @@ static const struct kernel_param_ops wait_ops = {
|
|||
.get = dmatest_wait_get,
|
||||
.set = param_set_bool,
|
||||
};
|
||||
module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
|
||||
module_param_cb(wait, &wait_ops, &wait, 0444);
|
||||
MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
|
||||
|
||||
static bool dmatest_match_channel(struct dmatest_params *params,
|
||||
|
@ -579,10 +578,10 @@ static int dmatest_func(void *data)
|
|||
unsigned int total_tests = 0;
|
||||
dma_cookie_t cookie;
|
||||
enum dma_status status;
|
||||
enum dma_ctrl_flags flags;
|
||||
enum dma_ctrl_flags flags;
|
||||
u8 *pq_coefs = NULL;
|
||||
int ret;
|
||||
unsigned int buf_size;
|
||||
unsigned int buf_size;
|
||||
struct dmatest_data *src;
|
||||
struct dmatest_data *dst;
|
||||
int i;
|
||||
|
@ -675,16 +674,10 @@ static int dmatest_func(void *data)
|
|||
/*
|
||||
* src and dst buffers are freed by ourselves below
|
||||
*/
|
||||
if (params->polled) {
|
||||
if (params->polled)
|
||||
flags = DMA_CTRL_ACK;
|
||||
} else {
|
||||
if (dma_has_cap(DMA_INTERRUPT, dev->cap_mask)) {
|
||||
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
|
||||
} else {
|
||||
pr_err("Channel does not support interrupt!\n");
|
||||
goto err_pq_array;
|
||||
}
|
||||
}
|
||||
else
|
||||
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
|
||||
|
||||
ktime = ktime_get();
|
||||
while (!(kthread_should_stop() ||
|
||||
|
@ -912,7 +905,6 @@ error_unmap_continue:
|
|||
runtime = ktime_to_us(ktime);
|
||||
|
||||
ret = 0;
|
||||
err_pq_array:
|
||||
kfree(dma_pq);
|
||||
err_srcs_array:
|
||||
kfree(srcs);
|
||||
|
@ -1102,8 +1094,8 @@ static void add_threaded_test(struct dmatest_info *info)
|
|||
|
||||
/* Copy test parameters */
|
||||
params->buf_size = test_buf_size;
|
||||
strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
|
||||
strlcpy(params->device, strim(test_device), sizeof(params->device));
|
||||
strscpy(params->channel, strim(test_channel), sizeof(params->channel));
|
||||
strscpy(params->device, strim(test_device), sizeof(params->device));
|
||||
params->threads_per_chan = threads_per_chan;
|
||||
params->max_channels = max_channels;
|
||||
params->iterations = iterations;
|
||||
|
@ -1247,7 +1239,7 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
|
|||
dtc = list_last_entry(&info->channels,
|
||||
struct dmatest_chan,
|
||||
node);
|
||||
strlcpy(chan_reset_val,
|
||||
strscpy(chan_reset_val,
|
||||
dma_chan_name(dtc->chan),
|
||||
sizeof(chan_reset_val));
|
||||
ret = -EBUSY;
|
||||
|
@ -1270,14 +1262,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
|
|||
if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
|
||||
&& (strcmp("", strim(test_channel)) != 0)) {
|
||||
ret = -EINVAL;
|
||||
strlcpy(chan_reset_val, dma_chan_name(dtc->chan),
|
||||
strscpy(chan_reset_val, dma_chan_name(dtc->chan),
|
||||
sizeof(chan_reset_val));
|
||||
goto add_chan_err;
|
||||
}
|
||||
|
||||
} else {
|
||||
/* Clear test_channel if no channels were added successfully */
|
||||
strlcpy(chan_reset_val, "", sizeof(chan_reset_val));
|
||||
strscpy(chan_reset_val, "", sizeof(chan_reset_val));
|
||||
ret = -EBUSY;
|
||||
goto add_chan_err;
|
||||
}
|
||||
|
@ -1302,7 +1294,7 @@ static int dmatest_chan_get(char *val, const struct kernel_param *kp)
|
|||
mutex_lock(&info->lock);
|
||||
if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) {
|
||||
stop_threaded_test(info);
|
||||
strlcpy(test_channel, "", sizeof(test_channel));
|
||||
strscpy(test_channel, "", sizeof(test_channel));
|
||||
}
|
||||
mutex_unlock(&info->lock);
|
||||
|
||||
|
|
|
@ -29,9 +29,6 @@
|
|||
* (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
|
||||
* of which use ARM any more). See the "Databook" from Synopsys for
|
||||
* information beyond what licensees probably provide.
|
||||
*
|
||||
* The driver has been tested with the Atmel AT32AP7000, which does not
|
||||
* support descriptor writeback.
|
||||
*/
|
||||
|
||||
/* The set of bus widths supported by the DMA controller */
|
||||
|
|
|
@ -32,11 +32,7 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|||
pci_set_master(pdev);
|
||||
pci_try_set_mwi(pdev);
|
||||
|
||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -65,11 +65,7 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
pci_set_master(pdev);
|
||||
pci_try_set_mwi(pdev);
|
||||
|
||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
|
|||
ctx->wq = wq;
|
||||
filp->private_data = ctx;
|
||||
|
||||
if (device_pasid_enabled(idxd)) {
|
||||
if (device_user_pasid_enabled(idxd)) {
|
||||
sva = iommu_sva_bind_device(dev, current->mm, NULL);
|
||||
if (IS_ERR(sva)) {
|
||||
rc = PTR_ERR(sva);
|
||||
|
@ -152,7 +152,7 @@ static int idxd_cdev_release(struct inode *node, struct file *filep)
|
|||
if (wq_shared(wq)) {
|
||||
idxd_device_drain_pasid(idxd, ctx->pasid);
|
||||
} else {
|
||||
if (device_pasid_enabled(idxd)) {
|
||||
if (device_user_pasid_enabled(idxd)) {
|
||||
/* The wq disable in the disable pasid function will drain the wq */
|
||||
rc = idxd_wq_disable_pasid(wq);
|
||||
if (rc < 0)
|
||||
|
|
|
@ -716,10 +716,7 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
|
|||
struct idxd_wq *wq = idxd->wqs[i];
|
||||
|
||||
mutex_lock(&wq->wq_lock);
|
||||
if (wq->state == IDXD_WQ_ENABLED) {
|
||||
idxd_wq_disable_cleanup(wq);
|
||||
wq->state = IDXD_WQ_DISABLED;
|
||||
}
|
||||
idxd_wq_disable_cleanup(wq);
|
||||
idxd_wq_device_reset_cleanup(wq);
|
||||
mutex_unlock(&wq->wq_lock);
|
||||
}
|
||||
|
@ -968,7 +965,7 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
|
|||
if (!wq->group)
|
||||
continue;
|
||||
|
||||
if (wq_shared(wq) && !device_swq_supported(idxd)) {
|
||||
if (wq_shared(wq) && !wq_shared_supported(wq)) {
|
||||
idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
|
||||
dev_warn(dev, "No shared wq support but configured.\n");
|
||||
return -EINVAL;
|
||||
|
@ -1268,7 +1265,7 @@ int drv_enable_wq(struct idxd_wq *wq)
|
|||
|
||||
/* Shared WQ checks */
|
||||
if (wq_shared(wq)) {
|
||||
if (!device_swq_supported(idxd)) {
|
||||
if (!wq_shared_supported(wq)) {
|
||||
idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
|
||||
dev_dbg(dev, "PASID not enabled and shared wq.\n");
|
||||
goto err;
|
||||
|
@ -1298,7 +1295,7 @@ int drv_enable_wq(struct idxd_wq *wq)
|
|||
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
|
||||
int priv = 0;
|
||||
|
||||
if (device_pasid_enabled(idxd)) {
|
||||
if (wq_pasid_enabled(wq)) {
|
||||
if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
|
||||
u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
|
||||
|
||||
|
|
|
@ -239,6 +239,7 @@ enum idxd_device_flag {
|
|||
IDXD_FLAG_CONFIGURABLE = 0,
|
||||
IDXD_FLAG_CMD_RUNNING,
|
||||
IDXD_FLAG_PASID_ENABLED,
|
||||
IDXD_FLAG_USER_PASID_ENABLED,
|
||||
};
|
||||
|
||||
struct idxd_dma_dev {
|
||||
|
@ -469,9 +470,20 @@ static inline bool device_pasid_enabled(struct idxd_device *idxd)
|
|||
return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
|
||||
}
|
||||
|
||||
static inline bool device_swq_supported(struct idxd_device *idxd)
|
||||
static inline bool device_user_pasid_enabled(struct idxd_device *idxd)
|
||||
{
|
||||
return (support_enqcmd && device_pasid_enabled(idxd));
|
||||
return test_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
|
||||
}
|
||||
|
||||
static inline bool wq_pasid_enabled(struct idxd_wq *wq)
|
||||
{
|
||||
return (is_idxd_wq_kernel(wq) && device_pasid_enabled(wq->idxd)) ||
|
||||
(is_idxd_wq_user(wq) && device_user_pasid_enabled(wq->idxd));
|
||||
}
|
||||
|
||||
static inline bool wq_shared_supported(struct idxd_wq *wq)
|
||||
{
|
||||
return (support_enqcmd && wq_pasid_enabled(wq));
|
||||
}
|
||||
|
||||
enum idxd_portal_prot {
|
||||
|
|
|
@ -512,17 +512,15 @@ static int idxd_probe(struct idxd_device *idxd)
|
|||
dev_dbg(dev, "IDXD reset complete\n");
|
||||
|
||||
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
|
||||
rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA);
|
||||
if (rc == 0) {
|
||||
rc = idxd_enable_system_pasid(idxd);
|
||||
if (rc < 0) {
|
||||
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
|
||||
dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
|
||||
} else {
|
||||
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
|
||||
}
|
||||
if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) {
|
||||
dev_warn(dev, "Unable to turn on user SVA feature.\n");
|
||||
} else {
|
||||
dev_warn(dev, "Unable to turn on SVA feature.\n");
|
||||
set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
|
||||
|
||||
if (idxd_enable_system_pasid(idxd))
|
||||
dev_warn(dev, "No in-kernel DMA with PASID.\n");
|
||||
else
|
||||
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
|
||||
}
|
||||
} else if (!sva) {
|
||||
dev_warn(dev, "User forced SVA off via module param.\n");
|
||||
|
@ -561,7 +559,8 @@ static int idxd_probe(struct idxd_device *idxd)
|
|||
err:
|
||||
if (device_pasid_enabled(idxd))
|
||||
idxd_disable_system_pasid(idxd);
|
||||
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
|
||||
if (device_user_pasid_enabled(idxd))
|
||||
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -574,7 +573,8 @@ static void idxd_cleanup(struct idxd_device *idxd)
|
|||
idxd_cleanup_internals(idxd);
|
||||
if (device_pasid_enabled(idxd))
|
||||
idxd_disable_system_pasid(idxd);
|
||||
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
|
||||
if (device_user_pasid_enabled(idxd))
|
||||
iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
|
||||
}
|
||||
|
||||
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
@ -691,7 +691,8 @@ static void idxd_remove(struct pci_dev *pdev)
|
|||
free_irq(irq_entry->vector, irq_entry);
|
||||
pci_free_irq_vectors(pdev);
|
||||
pci_iounmap(pdev, idxd->reg_base);
|
||||
iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
|
||||
if (device_user_pasid_enabled(idxd))
|
||||
iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
|
||||
pci_disable_device(pdev);
|
||||
destroy_workqueue(idxd->wq);
|
||||
perfmon_pmu_remove(idxd);
|
||||
|
|
|
@ -588,7 +588,7 @@ static ssize_t wq_mode_store(struct device *dev,
|
|||
if (sysfs_streq(buf, "dedicated")) {
|
||||
set_bit(WQ_FLAG_DEDICATED, &wq->flags);
|
||||
wq->threshold = 0;
|
||||
} else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
|
||||
} else if (sysfs_streq(buf, "shared")) {
|
||||
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include <linux/of_dma.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <linux/platform_data/dma-imx.h>
|
||||
#include <linux/dma/imx-dma.h>
|
||||
|
||||
#include "dmaengine.h"
|
||||
#define IMXDMA_MAX_CHAN_DESCRIPTORS 16
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/iopoll.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -35,7 +36,7 @@
|
|||
#include <linux/workqueue.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <linux/platform_data/dma-imx.h>
|
||||
#include <linux/dma/imx-dma.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
|
||||
|
@ -73,6 +74,7 @@
|
|||
#define SDMA_CHNENBL0_IMX35 0x200
|
||||
#define SDMA_CHNENBL0_IMX31 0x080
|
||||
#define SDMA_CHNPRI_0 0x100
|
||||
#define SDMA_DONE0_CONFIG 0x1000
|
||||
|
||||
/*
|
||||
* Buffer descriptor status values.
|
||||
|
@ -180,7 +182,15 @@
|
|||
BIT(DMA_MEM_TO_DEV) | \
|
||||
BIT(DMA_DEV_TO_DEV))
|
||||
|
||||
/**
|
||||
#define SDMA_WATERMARK_LEVEL_N_FIFOS GENMASK(15, 12)
|
||||
#define SDMA_WATERMARK_LEVEL_OFF_FIFOS GENMASK(19, 16)
|
||||
#define SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO GENMASK(31, 28)
|
||||
#define SDMA_WATERMARK_LEVEL_SW_DONE BIT(23)
|
||||
|
||||
#define SDMA_DONE0_CONFIG_DONE_SEL BIT(7)
|
||||
#define SDMA_DONE0_CONFIG_DONE_DIS BIT(6)
|
||||
|
||||
/*
|
||||
* struct sdma_script_start_addrs - SDMA script start pointers
|
||||
*
|
||||
* start addresses of the different functions in the physical
|
||||
|
@ -230,6 +240,10 @@ struct sdma_script_start_addrs {
|
|||
s32 zcanfd_2_mcu_addr;
|
||||
s32 zqspi_2_mcu_addr;
|
||||
s32 mcu_2_ecspi_addr;
|
||||
s32 mcu_2_sai_addr;
|
||||
s32 sai_2_mcu_addr;
|
||||
s32 uart_2_mcu_rom_addr;
|
||||
s32 uartsh_2_mcu_rom_addr;
|
||||
/* End of v3 array */
|
||||
s32 mcu_2_zqspi_addr;
|
||||
/* End of v4 array */
|
||||
|
@ -412,6 +426,14 @@ struct sdma_desc {
|
|||
* @data: specific sdma interface structure
|
||||
* @bd_pool: dma_pool for bd
|
||||
* @terminate_worker: used to call back into terminate work function
|
||||
* @terminated: terminated list
|
||||
* @is_ram_script: flag for script in ram
|
||||
* @n_fifos_src: number of source device fifos
|
||||
* @n_fifos_dst: number of destination device fifos
|
||||
* @sw_done: software done flag
|
||||
* @stride_fifos_src: stride for source device FIFOs
|
||||
* @stride_fifos_dst: stride for destination device FIFOs
|
||||
* @words_per_fifo: copy number of words one time for one FIFO
|
||||
*/
|
||||
struct sdma_channel {
|
||||
struct virt_dma_chan vc;
|
||||
|
@ -433,9 +455,16 @@ struct sdma_channel {
|
|||
unsigned long watermark_level;
|
||||
u32 shp_addr, per_addr;
|
||||
enum dma_status status;
|
||||
bool context_loaded;
|
||||
struct imx_dma_data data;
|
||||
struct work_struct terminate_worker;
|
||||
struct list_head terminated;
|
||||
bool is_ram_script;
|
||||
unsigned int n_fifos_src;
|
||||
unsigned int n_fifos_dst;
|
||||
unsigned int stride_fifos_src;
|
||||
unsigned int stride_fifos_dst;
|
||||
unsigned int words_per_fifo;
|
||||
bool sw_done;
|
||||
};
|
||||
|
||||
#define IMX_DMA_SG_LOOP BIT(0)
|
||||
|
@ -476,6 +505,13 @@ struct sdma_driver_data {
|
|||
int num_events;
|
||||
struct sdma_script_start_addrs *script_addrs;
|
||||
bool check_ratio;
|
||||
/*
|
||||
* ecspi ERR009165 fixed should be done in sdma script
|
||||
* and it has been fixed in soc from i.mx6ul.
|
||||
* please get more information from the below link:
|
||||
* https://www.nxp.com/docs/en/errata/IMX6DQCE.pdf
|
||||
*/
|
||||
bool ecspi_fixed;
|
||||
};
|
||||
|
||||
struct sdma_engine {
|
||||
|
@ -499,6 +535,7 @@ struct sdma_engine {
|
|||
struct sdma_buffer_descriptor *bd0;
|
||||
/* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
|
||||
bool clk_ratio;
|
||||
bool fw_loaded;
|
||||
};
|
||||
|
||||
static int sdma_config_write(struct dma_chan *chan,
|
||||
|
@ -595,6 +632,13 @@ static struct sdma_driver_data sdma_imx6q = {
|
|||
.script_addrs = &sdma_script_imx6q,
|
||||
};
|
||||
|
||||
static struct sdma_driver_data sdma_imx6ul = {
|
||||
.chnenbl0 = SDMA_CHNENBL0_IMX35,
|
||||
.num_events = 48,
|
||||
.script_addrs = &sdma_script_imx6q,
|
||||
.ecspi_fixed = true,
|
||||
};
|
||||
|
||||
static struct sdma_script_start_addrs sdma_script_imx7d = {
|
||||
.ap_2_ap_addr = 644,
|
||||
.uart_2_mcu_addr = 819,
|
||||
|
@ -628,6 +672,7 @@ static const struct of_device_id sdma_dt_ids[] = {
|
|||
{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
|
||||
{ .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
|
||||
{ .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
|
||||
{ .compatible = "fsl,imx6ul-sdma", .data = &sdma_imx6ul, },
|
||||
{ .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
@ -680,6 +725,11 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int is_sdma_channel_enabled(struct sdma_engine *sdma, int channel)
|
||||
{
|
||||
return !!(readl(sdma->regs + SDMA_H_STATSTOP) & BIT(channel));
|
||||
}
|
||||
|
||||
static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
|
||||
{
|
||||
writel(BIT(channel), sdma->regs + SDMA_H_START);
|
||||
|
@ -720,9 +770,8 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
|
|||
unsigned long flags;
|
||||
|
||||
buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
|
||||
if (!buf_virt) {
|
||||
if (!buf_virt)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&sdma->channel_0_lock, flags);
|
||||
|
||||
|
@ -753,6 +802,14 @@ static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
|
|||
val = readl_relaxed(sdma->regs + chnenbl);
|
||||
__set_bit(channel, &val);
|
||||
writel_relaxed(val, sdma->regs + chnenbl);
|
||||
|
||||
/* Set SDMA_DONEx_CONFIG is sw_done enabled */
|
||||
if (sdmac->sw_done) {
|
||||
val = readl_relaxed(sdma->regs + SDMA_DONE0_CONFIG);
|
||||
val |= SDMA_DONE0_CONFIG_DONE_SEL;
|
||||
val &= ~SDMA_DONE0_CONFIG_DONE_DIS;
|
||||
writel_relaxed(val, sdma->regs + SDMA_DONE0_CONFIG);
|
||||
}
|
||||
}
|
||||
|
||||
static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
|
||||
|
@ -822,7 +879,6 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
|
|||
*/
|
||||
|
||||
desc->chn_real_count = bd->mode.count;
|
||||
bd->mode.status |= BD_DONE;
|
||||
bd->mode.count = desc->period_len;
|
||||
desc->buf_ptail = desc->buf_tail;
|
||||
desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
|
||||
|
@ -837,9 +893,21 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
|
|||
dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
|
||||
spin_lock(&sdmac->vc.lock);
|
||||
|
||||
/* Assign buffer ownership to SDMA */
|
||||
bd->mode.status |= BD_DONE;
|
||||
|
||||
if (error)
|
||||
sdmac->status = old_status;
|
||||
}
|
||||
|
||||
/*
|
||||
* SDMA stops cyclic channel when DMA request triggers a channel and no SDMA
|
||||
* owned buffer is available (i.e. BD_DONE was set too late).
|
||||
*/
|
||||
if (sdmac->desc && !is_sdma_channel_enabled(sdmac->sdma, sdmac->channel)) {
|
||||
dev_warn(sdmac->sdma->dev, "restart cyclic channel %d\n", sdmac->channel);
|
||||
sdma_enable_channel(sdmac->sdma, sdmac->channel);
|
||||
}
|
||||
}
|
||||
|
||||
static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
|
||||
|
@ -856,9 +924,9 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
|
|||
for (i = 0; i < sdmac->desc->num_bd; i++) {
|
||||
bd = &sdmac->desc->bd[i];
|
||||
|
||||
if (bd->mode.status & (BD_DONE | BD_RROR))
|
||||
if (bd->mode.status & (BD_DONE | BD_RROR))
|
||||
error = -EIO;
|
||||
sdmac->desc->chn_real_count += bd->mode.count;
|
||||
sdmac->desc->chn_real_count += bd->mode.count;
|
||||
}
|
||||
|
||||
if (error)
|
||||
|
@ -904,7 +972,7 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
|
|||
/*
|
||||
* sets the pc of SDMA script according to the peripheral type
|
||||
*/
|
||||
static void sdma_get_pc(struct sdma_channel *sdmac,
|
||||
static int sdma_get_pc(struct sdma_channel *sdmac,
|
||||
enum sdma_peripheral_type peripheral_type)
|
||||
{
|
||||
struct sdma_engine *sdma = sdmac->sdma;
|
||||
|
@ -919,6 +987,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
|
|||
sdmac->pc_to_device = 0;
|
||||
sdmac->device_to_device = 0;
|
||||
sdmac->pc_to_pc = 0;
|
||||
sdmac->is_ram_script = false;
|
||||
|
||||
switch (peripheral_type) {
|
||||
case IMX_DMATYPE_MEMORY:
|
||||
|
@ -945,6 +1014,17 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
|
|||
emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
|
||||
break;
|
||||
case IMX_DMATYPE_CSPI:
|
||||
per_2_emi = sdma->script_addrs->app_2_mcu_addr;
|
||||
|
||||
/* Use rom script mcu_2_app if ERR009165 fixed */
|
||||
if (sdmac->sdma->drvdata->ecspi_fixed) {
|
||||
emi_2_per = sdma->script_addrs->mcu_2_app_addr;
|
||||
} else {
|
||||
emi_2_per = sdma->script_addrs->mcu_2_ecspi_addr;
|
||||
sdmac->is_ram_script = true;
|
||||
}
|
||||
|
||||
break;
|
||||
case IMX_DMATYPE_EXT:
|
||||
case IMX_DMATYPE_SSI:
|
||||
case IMX_DMATYPE_SAI:
|
||||
|
@ -954,6 +1034,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
|
|||
case IMX_DMATYPE_SSI_DUAL:
|
||||
per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
|
||||
emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
|
||||
sdmac->is_ram_script = true;
|
||||
break;
|
||||
case IMX_DMATYPE_SSI_SP:
|
||||
case IMX_DMATYPE_MMC:
|
||||
|
@ -968,6 +1049,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
|
|||
per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
|
||||
emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
|
||||
per_2_per = sdma->script_addrs->per_2_per_addr;
|
||||
sdmac->is_ram_script = true;
|
||||
break;
|
||||
case IMX_DMATYPE_ASRC_SP:
|
||||
per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
|
||||
|
@ -988,14 +1070,22 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
|
|||
case IMX_DMATYPE_IPU_MEMORY:
|
||||
emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
|
||||
break;
|
||||
default:
|
||||
case IMX_DMATYPE_MULTI_SAI:
|
||||
per_2_emi = sdma->script_addrs->sai_2_mcu_addr;
|
||||
emi_2_per = sdma->script_addrs->mcu_2_sai_addr;
|
||||
break;
|
||||
default:
|
||||
dev_err(sdma->dev, "Unsupported transfer type %d\n",
|
||||
peripheral_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sdmac->pc_from_device = per_2_emi;
|
||||
sdmac->pc_to_device = emi_2_per;
|
||||
sdmac->device_to_device = per_2_per;
|
||||
sdmac->pc_to_pc = emi_2_emi;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdma_load_context(struct sdma_channel *sdmac)
|
||||
|
@ -1008,9 +1098,6 @@ static int sdma_load_context(struct sdma_channel *sdmac)
|
|||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
if (sdmac->context_loaded)
|
||||
return 0;
|
||||
|
||||
if (sdmac->direction == DMA_DEV_TO_MEM)
|
||||
load_address = sdmac->pc_from_device;
|
||||
else if (sdmac->direction == DMA_DEV_TO_DEV)
|
||||
|
@ -1053,8 +1140,6 @@ static int sdma_load_context(struct sdma_channel *sdmac)
|
|||
|
||||
spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
|
||||
|
||||
sdmac->context_loaded = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1078,9 +1163,6 @@ static void sdma_channel_terminate_work(struct work_struct *work)
|
|||
{
|
||||
struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
|
||||
terminate_worker);
|
||||
unsigned long flags;
|
||||
LIST_HEAD(head);
|
||||
|
||||
/*
|
||||
* According to NXP R&D team a delay of one BD SDMA cost time
|
||||
* (maximum is 1ms) should be added after disable of the channel
|
||||
|
@ -1089,11 +1171,7 @@ static void sdma_channel_terminate_work(struct work_struct *work)
|
|||
*/
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
spin_lock_irqsave(&sdmac->vc.lock, flags);
|
||||
vchan_get_all_descriptors(&sdmac->vc, &head);
|
||||
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
|
||||
vchan_dma_desc_free_list(&sdmac->vc, &head);
|
||||
sdmac->context_loaded = false;
|
||||
vchan_dma_desc_free_list(&sdmac->vc, &sdmac->terminated);
|
||||
}
|
||||
|
||||
static int sdma_terminate_all(struct dma_chan *chan)
|
||||
|
@ -1107,6 +1185,13 @@ static int sdma_terminate_all(struct dma_chan *chan)
|
|||
|
||||
if (sdmac->desc) {
|
||||
vchan_terminate_vdesc(&sdmac->desc->vd);
|
||||
/*
|
||||
* move out current descriptor into terminated list so that
|
||||
* it could be free in sdma_channel_terminate_work alone
|
||||
* later without potential involving next descriptor raised
|
||||
* up before the last descriptor terminated.
|
||||
*/
|
||||
vchan_get_all_descriptors(&sdmac->vc, &sdmac->terminated);
|
||||
sdmac->desc = NULL;
|
||||
schedule_work(&sdmac->terminate_worker);
|
||||
}
|
||||
|
@ -1165,6 +1250,34 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
|
|||
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
|
||||
}
|
||||
|
||||
static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac)
|
||||
{
|
||||
unsigned int n_fifos;
|
||||
unsigned int stride_fifos;
|
||||
unsigned int words_per_fifo;
|
||||
|
||||
if (sdmac->sw_done)
|
||||
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SW_DONE;
|
||||
|
||||
if (sdmac->direction == DMA_DEV_TO_MEM) {
|
||||
n_fifos = sdmac->n_fifos_src;
|
||||
stride_fifos = sdmac->stride_fifos_src;
|
||||
} else {
|
||||
n_fifos = sdmac->n_fifos_dst;
|
||||
stride_fifos = sdmac->stride_fifos_dst;
|
||||
}
|
||||
|
||||
words_per_fifo = sdmac->words_per_fifo;
|
||||
|
||||
sdmac->watermark_level |=
|
||||
FIELD_PREP(SDMA_WATERMARK_LEVEL_N_FIFOS, n_fifos);
|
||||
sdmac->watermark_level |=
|
||||
FIELD_PREP(SDMA_WATERMARK_LEVEL_OFF_FIFOS, stride_fifos);
|
||||
if (words_per_fifo)
|
||||
sdmac->watermark_level |=
|
||||
FIELD_PREP(SDMA_WATERMARK_LEVEL_WORDS_PER_FIFO, (words_per_fifo - 1));
|
||||
}
|
||||
|
||||
static int sdma_config_channel(struct dma_chan *chan)
|
||||
{
|
||||
struct sdma_channel *sdmac = to_sdma_chan(chan);
|
||||
|
@ -1189,7 +1302,9 @@ static int sdma_config_channel(struct dma_chan *chan)
|
|||
break;
|
||||
}
|
||||
|
||||
sdma_get_pc(sdmac, sdmac->peripheral_type);
|
||||
ret = sdma_get_pc(sdmac, sdmac->peripheral_type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
|
||||
(sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
|
||||
|
@ -1198,8 +1313,13 @@ static int sdma_config_channel(struct dma_chan *chan)
|
|||
if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
|
||||
sdmac->peripheral_type == IMX_DMATYPE_ASRC)
|
||||
sdma_set_watermarklevel_for_p2p(sdmac);
|
||||
} else
|
||||
} else {
|
||||
if (sdmac->peripheral_type ==
|
||||
IMX_DMATYPE_MULTI_SAI)
|
||||
sdma_set_watermarklevel_for_sais(sdmac);
|
||||
|
||||
__set_bit(sdmac->event_id0, sdmac->event_mask);
|
||||
}
|
||||
|
||||
/* Address */
|
||||
sdmac->shp_addr = sdmac->per_address;
|
||||
|
@ -1208,13 +1328,11 @@ static int sdma_config_channel(struct dma_chan *chan)
|
|||
sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
|
||||
}
|
||||
|
||||
ret = sdma_load_context(sdmac);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdma_set_channel_priority(struct sdma_channel *sdmac,
|
||||
unsigned int priority)
|
||||
unsigned int priority)
|
||||
{
|
||||
struct sdma_engine *sdma = sdmac->sdma;
|
||||
int channel = sdmac->channel;
|
||||
|
@ -1234,7 +1352,7 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
|
|||
int ret = -EBUSY;
|
||||
|
||||
sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
|
||||
GFP_NOWAIT);
|
||||
GFP_NOWAIT);
|
||||
if (!sdma->bd0) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -1257,7 +1375,7 @@ static int sdma_alloc_bd(struct sdma_desc *desc)
|
|||
int ret = 0;
|
||||
|
||||
desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
|
||||
&desc->bd_phys, GFP_NOWAIT);
|
||||
&desc->bd_phys, GFP_NOWAIT);
|
||||
if (!desc->bd) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -1306,7 +1424,9 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
|
|||
mem_data.dma_request2 = 0;
|
||||
data = &mem_data;
|
||||
|
||||
sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY);
|
||||
ret = sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
switch (data->priority) {
|
||||
|
@ -1361,7 +1481,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
|
|||
|
||||
sdmac->event_id0 = 0;
|
||||
sdmac->event_id1 = 0;
|
||||
sdmac->context_loaded = false;
|
||||
|
||||
sdma_set_channel_priority(sdmac, 0);
|
||||
|
||||
|
@ -1374,6 +1493,11 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
|
|||
{
|
||||
struct sdma_desc *desc;
|
||||
|
||||
if (!sdmac->sdma->fw_loaded && sdmac->is_ram_script) {
|
||||
dev_warn_once(sdmac->sdma->dev, "sdma firmware not ready!\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
|
||||
if (!desc)
|
||||
goto err_out;
|
||||
|
@ -1651,9 +1775,26 @@ static int sdma_config(struct dma_chan *chan,
|
|||
struct dma_slave_config *dmaengine_cfg)
|
||||
{
|
||||
struct sdma_channel *sdmac = to_sdma_chan(chan);
|
||||
struct sdma_engine *sdma = sdmac->sdma;
|
||||
|
||||
memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
|
||||
|
||||
if (dmaengine_cfg->peripheral_config) {
|
||||
struct sdma_peripheral_config *sdmacfg = dmaengine_cfg->peripheral_config;
|
||||
if (dmaengine_cfg->peripheral_size != sizeof(struct sdma_peripheral_config)) {
|
||||
dev_err(sdma->dev, "Invalid peripheral size %zu, expected %zu\n",
|
||||
dmaengine_cfg->peripheral_size,
|
||||
sizeof(struct sdma_peripheral_config));
|
||||
return -EINVAL;
|
||||
}
|
||||
sdmac->n_fifos_src = sdmacfg->n_fifos_src;
|
||||
sdmac->n_fifos_dst = sdmacfg->n_fifos_dst;
|
||||
sdmac->stride_fifos_src = sdmacfg->stride_fifos_src;
|
||||
sdmac->stride_fifos_dst = sdmacfg->stride_fifos_dst;
|
||||
sdmac->words_per_fifo = sdmacfg->words_per_fifo;
|
||||
sdmac->sw_done = sdmacfg->sw_done;
|
||||
}
|
||||
|
||||
/* Set ENBLn earlier to make sure dma request triggered after that */
|
||||
if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
|
||||
return -EINVAL;
|
||||
|
@ -1722,11 +1863,11 @@ static void sdma_issue_pending(struct dma_chan *chan)
|
|||
|
||||
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
|
||||
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
|
||||
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41
|
||||
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42
|
||||
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 45
|
||||
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 46
|
||||
|
||||
static void sdma_add_scripts(struct sdma_engine *sdma,
|
||||
const struct sdma_script_start_addrs *addr)
|
||||
const struct sdma_script_start_addrs *addr)
|
||||
{
|
||||
s32 *addr_arr = (u32 *)addr;
|
||||
s32 *saddr_arr = (u32 *)sdma->script_addrs;
|
||||
|
@ -1747,6 +1888,19 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
|
|||
for (i = 0; i < sdma->script_number; i++)
|
||||
if (addr_arr[i] > 0)
|
||||
saddr_arr[i] = addr_arr[i];
|
||||
|
||||
/*
|
||||
* For compatibility with NXP internal legacy kernel before 4.19 which
|
||||
* is based on uart ram script and mainline kernel based on uart rom
|
||||
* script, both uart ram/rom scripts are present in newer sdma
|
||||
* firmware. Use the rom versions if they are present (V3 or newer).
|
||||
*/
|
||||
if (sdma->script_number >= SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3) {
|
||||
if (addr->uart_2_mcu_rom_addr)
|
||||
sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_rom_addr;
|
||||
if (addr->uartsh_2_mcu_rom_addr)
|
||||
sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_rom_addr;
|
||||
}
|
||||
}
|
||||
|
||||
static void sdma_load_firmware(const struct firmware *fw, void *context)
|
||||
|
@ -1796,16 +1950,18 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
|
|||
clk_enable(sdma->clk_ahb);
|
||||
/* download the RAM image for SDMA */
|
||||
sdma_load_script(sdma, ram_code,
|
||||
header->ram_code_size,
|
||||
addr->ram_code_start_addr);
|
||||
header->ram_code_size,
|
||||
addr->ram_code_start_addr);
|
||||
clk_disable(sdma->clk_ipg);
|
||||
clk_disable(sdma->clk_ahb);
|
||||
|
||||
sdma_add_scripts(sdma, addr);
|
||||
|
||||
sdma->fw_loaded = true;
|
||||
|
||||
dev_info(sdma->dev, "loaded firmware %d.%d\n",
|
||||
header->version_major,
|
||||
header->version_minor);
|
||||
header->version_major,
|
||||
header->version_minor);
|
||||
|
||||
err_firmware:
|
||||
release_firmware(fw);
|
||||
|
@ -1823,7 +1979,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
|
|||
u32 reg, val, shift, num_map, i;
|
||||
int ret = 0;
|
||||
|
||||
if (IS_ERR(np) || IS_ERR(gpr_np))
|
||||
if (IS_ERR(np) || !gpr_np)
|
||||
goto out;
|
||||
|
||||
event_remap = of_find_property(np, propname, NULL);
|
||||
|
@ -1871,7 +2027,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
|
|||
}
|
||||
|
||||
out:
|
||||
if (!IS_ERR(gpr_np))
|
||||
if (gpr_np)
|
||||
of_node_put(gpr_np);
|
||||
|
||||
return ret;
|
||||
|
@ -1909,7 +2065,7 @@ static int sdma_init(struct sdma_engine *sdma)
|
|||
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
|
||||
|
||||
sdma->channel_control = dma_alloc_coherent(sdma->dev,
|
||||
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
|
||||
MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control) +
|
||||
sizeof(struct sdma_context_data),
|
||||
&ccb_phys, GFP_KERNEL);
|
||||
|
||||
|
@ -1919,9 +2075,9 @@ static int sdma_init(struct sdma_engine *sdma)
|
|||
}
|
||||
|
||||
sdma->context = (void *)sdma->channel_control +
|
||||
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
|
||||
MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control);
|
||||
sdma->context_phys = ccb_phys +
|
||||
MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
|
||||
MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control);
|
||||
|
||||
/* disable all channels */
|
||||
for (i = 0; i < sdma->drvdata->num_events; i++)
|
||||
|
@ -2055,8 +2211,8 @@ static int sdma_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
goto err_clk;
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
|
||||
sdma);
|
||||
ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0,
|
||||
dev_name(&pdev->dev), sdma);
|
||||
if (ret)
|
||||
goto err_irq;
|
||||
|
||||
|
@ -2086,6 +2242,7 @@ static int sdma_probe(struct platform_device *pdev)
|
|||
|
||||
sdmac->channel = i;
|
||||
sdmac->vc.desc_free = sdma_desc_free;
|
||||
INIT_LIST_HEAD(&sdmac->terminated);
|
||||
INIT_WORK(&sdmac->terminate_worker,
|
||||
sdma_channel_terminate_work);
|
||||
/*
|
||||
|
@ -2217,7 +2374,7 @@ MODULE_DESCRIPTION("i.MX SDMA driver");
|
|||
#if IS_ENABLED(CONFIG_SOC_IMX6Q)
|
||||
MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_SOC_IMX7D)
|
||||
#if IS_ENABLED(CONFIG_SOC_IMX7D) || IS_ENABLED(CONFIG_SOC_IMX8M)
|
||||
MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
|
||||
#endif
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -1363,15 +1363,7 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
if (!iomap)
|
||||
return -ENOMEM;
|
||||
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
if (err)
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
if (err)
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -158,8 +158,9 @@ static struct attribute *ioat_attrs[] = {
|
|||
&intr_coalesce_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(ioat);
|
||||
|
||||
struct kobj_type ioat_ktype = {
|
||||
.sysfs_ops = &ioat_sysfs_ops,
|
||||
.default_attrs = ioat_attrs,
|
||||
.default_groups = ioat_groups,
|
||||
};
|
||||
|
|
|
@ -100,6 +100,7 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
|
|||
struct pt_passthru_engine *pt_engine)
|
||||
{
|
||||
struct ptdma_desc desc;
|
||||
struct pt_device *pt = container_of(cmd_q, struct pt_device, cmd_q);
|
||||
|
||||
cmd_q->cmd_error = 0;
|
||||
cmd_q->total_pt_ops++;
|
||||
|
@ -111,19 +112,14 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
|
|||
desc.dst_lo = lower_32_bits(pt_engine->dst_dma);
|
||||
desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma);
|
||||
|
||||
if (cmd_q->int_en)
|
||||
pt_core_enable_queue_interrupts(pt);
|
||||
else
|
||||
pt_core_disable_queue_interrupts(pt);
|
||||
|
||||
return pt_core_execute_cmd(&desc, cmd_q);
|
||||
}
|
||||
|
||||
static inline void pt_core_disable_queue_interrupts(struct pt_device *pt)
|
||||
{
|
||||
iowrite32(0, pt->cmd_q.reg_control + 0x000C);
|
||||
}
|
||||
|
||||
static inline void pt_core_enable_queue_interrupts(struct pt_device *pt)
|
||||
{
|
||||
iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
|
||||
}
|
||||
|
||||
static void pt_do_cmd_complete(unsigned long data)
|
||||
{
|
||||
struct pt_tasklet_data *tdata = (struct pt_tasklet_data *)data;
|
||||
|
@ -144,14 +140,10 @@ static void pt_do_cmd_complete(unsigned long data)
|
|||
cmd->pt_cmd_callback(cmd->data, cmd->ret);
|
||||
}
|
||||
|
||||
static irqreturn_t pt_core_irq_handler(int irq, void *data)
|
||||
void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q)
|
||||
{
|
||||
struct pt_device *pt = data;
|
||||
struct pt_cmd_queue *cmd_q = &pt->cmd_q;
|
||||
u32 status;
|
||||
|
||||
pt_core_disable_queue_interrupts(pt);
|
||||
pt->total_interrupts++;
|
||||
status = ioread32(cmd_q->reg_control + 0x0010);
|
||||
if (status) {
|
||||
cmd_q->int_status = status;
|
||||
|
@ -162,11 +154,21 @@ static irqreturn_t pt_core_irq_handler(int irq, void *data)
|
|||
if ((status & INT_ERROR) && !cmd_q->cmd_error)
|
||||
cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
|
||||
|
||||
/* Acknowledge the interrupt */
|
||||
/* Acknowledge the completion */
|
||||
iowrite32(status, cmd_q->reg_control + 0x0010);
|
||||
pt_core_enable_queue_interrupts(pt);
|
||||
pt_do_cmd_complete((ulong)&pt->tdata);
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t pt_core_irq_handler(int irq, void *data)
|
||||
{
|
||||
struct pt_device *pt = data;
|
||||
struct pt_cmd_queue *cmd_q = &pt->cmd_q;
|
||||
|
||||
pt_core_disable_queue_interrupts(pt);
|
||||
pt->total_interrupts++;
|
||||
pt_check_status_trans(pt, cmd_q);
|
||||
pt_core_enable_queue_interrupts(pt);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
|
|
@ -171,6 +171,7 @@ static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
|
|||
vchan_tx_prep(&chan->vc, &desc->vd, flags);
|
||||
|
||||
desc->pt = chan->pt;
|
||||
desc->pt->cmd_q.int_en = !!(flags & DMA_PREP_INTERRUPT);
|
||||
desc->issued_to_hw = 0;
|
||||
desc->status = DMA_IN_PROGRESS;
|
||||
|
||||
|
@ -257,6 +258,17 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
|
|||
pt_cmd_callback(desc, 0);
|
||||
}
|
||||
|
||||
static enum dma_status
|
||||
pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
|
||||
struct dma_tx_state *txstate)
|
||||
{
|
||||
struct pt_device *pt = to_pt_chan(c)->pt;
|
||||
struct pt_cmd_queue *cmd_q = &pt->cmd_q;
|
||||
|
||||
pt_check_status_trans(pt, cmd_q);
|
||||
return dma_cookie_status(c, cookie, txstate);
|
||||
}
|
||||
|
||||
static int pt_pause(struct dma_chan *dma_chan)
|
||||
{
|
||||
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
|
||||
|
@ -291,8 +303,10 @@ static int pt_terminate_all(struct dma_chan *dma_chan)
|
|||
{
|
||||
struct pt_dma_chan *chan = to_pt_chan(dma_chan);
|
||||
unsigned long flags;
|
||||
struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q;
|
||||
LIST_HEAD(head);
|
||||
|
||||
iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
|
||||
spin_lock_irqsave(&chan->vc.lock, flags);
|
||||
vchan_get_all_descriptors(&chan->vc, &head);
|
||||
spin_unlock_irqrestore(&chan->vc.lock, flags);
|
||||
|
@ -362,7 +376,7 @@ int pt_dmaengine_register(struct pt_device *pt)
|
|||
dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
|
||||
dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
|
||||
dma_dev->device_issue_pending = pt_issue_pending;
|
||||
dma_dev->device_tx_status = dma_cookie_status;
|
||||
dma_dev->device_tx_status = pt_tx_status;
|
||||
dma_dev->device_pause = pt_pause;
|
||||
dma_dev->device_resume = pt_resume;
|
||||
dma_dev->device_terminate_all = pt_terminate_all;
|
||||
|
|
|
@ -206,6 +206,9 @@ struct pt_cmd_queue {
|
|||
unsigned int active;
|
||||
unsigned int suspended;
|
||||
|
||||
/* Interrupt flag */
|
||||
bool int_en;
|
||||
|
||||
/* Register addresses for queue */
|
||||
void __iomem *reg_control;
|
||||
u32 qcontrol; /* Cached control register */
|
||||
|
@ -318,7 +321,17 @@ void pt_core_destroy(struct pt_device *pt);
|
|||
int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q,
|
||||
struct pt_passthru_engine *pt_engine);
|
||||
|
||||
void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q);
|
||||
void pt_start_queue(struct pt_cmd_queue *cmd_q);
|
||||
void pt_stop_queue(struct pt_cmd_queue *cmd_q);
|
||||
|
||||
static inline void pt_core_disable_queue_interrupts(struct pt_device *pt)
|
||||
{
|
||||
iowrite32(0, pt->cmd_q.reg_control + 0x000C);
|
||||
}
|
||||
|
||||
static inline void pt_core_enable_queue_interrupts(struct pt_device *pt)
|
||||
{
|
||||
iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -844,9 +844,7 @@ static int hidma_probe(struct platform_device *pdev)
|
|||
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (rc) {
|
||||
dev_warn(&pdev->dev, "unable to set coherent mask to 64");
|
||||
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (rc)
|
||||
goto dmafree;
|
||||
goto dmafree;
|
||||
}
|
||||
|
||||
dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
#include <asm/irq.h>
|
||||
#include <linux/platform_data/mmc-mxcmmc.h>
|
||||
|
||||
#include <linux/platform_data/dma-imx.h>
|
||||
#include <linux/dma/imx-dma.h>
|
||||
|
||||
#define DRIVER_NAME "mxc-mmc"
|
||||
#define MXCMCI_TIMEOUT_MS 10000
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include <linux/of_device.h>
|
||||
#include <linux/pinctrl/consumer.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/platform_data/dma-imx.h>
|
||||
#include <linux/dma/imx-dma.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spi/spi.h>
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#include <linux/of_device.h>
|
||||
#include <linux/property.h>
|
||||
|
||||
#include <linux/platform_data/dma-imx.h>
|
||||
#include <linux/dma/imx-dma.h>
|
||||
|
||||
#define DRIVER_NAME "spi_imx"
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <linux/platform_data/dma-imx.h>
|
||||
#include <linux/dma/imx-dma.h>
|
||||
|
||||
#include "serial_mctrl_gpio.h"
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include <linux/dma/ipu-dma.h>
|
||||
#include <linux/backlight.h>
|
||||
|
||||
#include <linux/platform_data/dma-imx.h>
|
||||
#include <linux/dma/imx-dma.h>
|
||||
#include <linux/platform_data/video-mx3fb.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
* Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_ARCH_MXC_DMA_H__
|
||||
#define __ASM_ARCH_MXC_DMA_H__
|
||||
#ifndef __LINUX_DMA_IMX_H
|
||||
#define __LINUX_DMA_IMX_H
|
||||
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/device.h>
|
||||
|
@ -39,6 +39,7 @@ enum sdma_peripheral_type {
|
|||
IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
|
||||
IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
|
||||
IMX_DMATYPE_SAI, /* SAI */
|
||||
IMX_DMATYPE_MULTI_SAI, /* MULTI FIFOs For Audio */
|
||||
};
|
||||
|
||||
enum imx_dma_prio {
|
||||
|
@ -65,4 +66,36 @@ static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
|
|||
!strcmp(chan->device->dev->driver->name, "imx-dma");
|
||||
}
|
||||
|
||||
#endif
|
||||
/**
|
||||
* struct sdma_peripheral_config - SDMA config for audio
|
||||
* @n_fifos_src: Number of FIFOs for recording
|
||||
* @n_fifos_dst: Number of FIFOs for playback
|
||||
* @stride_fifos_src: FIFO address stride for recording, 0 means all FIFOs are
|
||||
* continuous, 1 means 1 word stride between FIFOs. All stride
|
||||
* between FIFOs should be same.
|
||||
* @stride_fifos_dst: FIFO address stride for playback
|
||||
* @words_per_fifo: numbers of words per FIFO fetch/fill, 1 means
|
||||
* one channel per FIFO, 2 means 2 channels per FIFO..
|
||||
* If 'n_fifos_src = 4' and 'words_per_fifo = 2', it
|
||||
* means the first two words(channels) fetch from FIFO0
|
||||
* and then jump to FIFO1 for next two words, and so on
|
||||
* after the last FIFO3 fetched, roll back to FIFO0.
|
||||
* @sw_done: Use software done. Needed for PDM (micfil)
|
||||
*
|
||||
* Some i.MX Audio devices (SAI, micfil) have multiple successive FIFO
|
||||
* registers. For multichannel recording/playback the SAI/micfil have
|
||||
* one FIFO register per channel and the SDMA engine has to read/write
|
||||
* the next channel from/to the next register and wrap around to the
|
||||
* first register when all channels are handled. The number of active
|
||||
* channels must be communicated to the SDMA engine using this struct.
|
||||
*/
|
||||
struct sdma_peripheral_config {
|
||||
int n_fifos_src;
|
||||
int n_fifos_dst;
|
||||
int stride_fifos_src;
|
||||
int stride_fifos_dst;
|
||||
int words_per_fifo;
|
||||
bool sw_done;
|
||||
};
|
||||
|
||||
#endif /* __LINUX_DMA_IMX_H */
|
|
@ -380,6 +380,7 @@ enum dma_slave_buswidth {
|
|||
DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
|
||||
DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
|
||||
DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
|
||||
DMA_SLAVE_BUSWIDTH_128_BYTES = 128,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -398,7 +399,7 @@ enum dma_slave_buswidth {
|
|||
* @src_addr_width: this is the width in bytes of the source (RX)
|
||||
* register where DMA data shall be read. If the source
|
||||
* is memory this may be ignored depending on architecture.
|
||||
* Legal values: 1, 2, 3, 4, 8, 16, 32, 64.
|
||||
* Legal values: 1, 2, 3, 4, 8, 16, 32, 64, 128.
|
||||
* @dst_addr_width: same as src_addr_width but for destination
|
||||
* target (TX) mutatis mutandis.
|
||||
* @src_maxburst: the maximum number of words (note: words, as in
|
||||
|
@ -417,9 +418,6 @@ enum dma_slave_buswidth {
|
|||
* @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
|
||||
* with 'true' if peripheral should be flow controller. Direction will be
|
||||
* selected at Runtime.
|
||||
* @slave_id: Slave requester id. Only valid for slave channels. The dma
|
||||
* slave peripheral will have unique id as dma requester which need to be
|
||||
* pass as slave config.
|
||||
* @peripheral_config: peripheral configuration for programming peripheral
|
||||
* for dmaengine transfer
|
||||
* @peripheral_size: peripheral configuration buffer size
|
||||
|
@ -447,7 +445,6 @@ struct dma_slave_config {
|
|||
u32 src_port_window_size;
|
||||
u32 dst_port_window_size;
|
||||
bool device_fc;
|
||||
unsigned int slave_id;
|
||||
void *peripheral_config;
|
||||
size_t peripheral_size;
|
||||
};
|
||||
|
@ -872,7 +869,6 @@ struct dma_device {
|
|||
struct device *dev;
|
||||
struct module *owner;
|
||||
struct ida chan_ida;
|
||||
struct mutex chan_mutex; /* to protect chan_ida */
|
||||
|
||||
u32 src_addr_widths;
|
||||
u32 dst_addr_widths;
|
||||
|
@ -1028,6 +1024,14 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
|
|||
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* dmaengine_prep_dma_memset() - Prepare a DMA memset descriptor.
|
||||
* @chan: The channel to be used for this descriptor
|
||||
* @dest: Address of buffer to be set
|
||||
* @value: Treated as a single byte value that fills the destination buffer
|
||||
* @len: The total size of dest
|
||||
* @flags: DMA engine flags
|
||||
*/
|
||||
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
|
||||
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
|
||||
unsigned long flags)
|
||||
|
|
|
@ -53,6 +53,11 @@ enum idxd_scmd_stat {
|
|||
|
||||
/* IAX */
|
||||
#define IDXD_OP_FLAG_RD_SRC2_AECS 0x010000
|
||||
#define IDXD_OP_FLAG_RD_SRC2_2ND 0x020000
|
||||
#define IDXD_OP_FLAG_WR_SRC2_AECS_COMP 0x040000
|
||||
#define IDXD_OP_FLAG_WR_SRC2_AECS_OVFL 0x080000
|
||||
#define IDXD_OP_FLAG_SRC2_STS 0x100000
|
||||
#define IDXD_OP_FLAG_CRC_RFC3720 0x200000
|
||||
|
||||
/* Opcode */
|
||||
enum dsa_opcode {
|
||||
|
@ -81,6 +86,18 @@ enum iax_opcode {
|
|||
IAX_OPCODE_MEMMOVE,
|
||||
IAX_OPCODE_DECOMPRESS = 0x42,
|
||||
IAX_OPCODE_COMPRESS,
|
||||
IAX_OPCODE_CRC64,
|
||||
IAX_OPCODE_ZERO_DECOMP_32 = 0x48,
|
||||
IAX_OPCODE_ZERO_DECOMP_16,
|
||||
IAX_OPCODE_ZERO_COMP_32 = 0x4c,
|
||||
IAX_OPCODE_ZERO_COMP_16,
|
||||
IAX_OPCODE_SCAN = 0x50,
|
||||
IAX_OPCODE_SET_MEMBER,
|
||||
IAX_OPCODE_EXTRACT,
|
||||
IAX_OPCODE_SELECT,
|
||||
IAX_OPCODE_RLE_BURST,
|
||||
IAX_OPCODE_FIND_UNIQUE,
|
||||
IAX_OPCODE_EXPAND,
|
||||
};
|
||||
|
||||
/* Completion record status */
|
||||
|
@ -120,6 +137,7 @@ enum iax_completion_status {
|
|||
IAX_COMP_NONE = 0,
|
||||
IAX_COMP_SUCCESS,
|
||||
IAX_COMP_PAGE_FAULT_IR = 0x04,
|
||||
IAX_COMP_ANALYTICS_ERROR = 0x0a,
|
||||
IAX_COMP_OUTBUF_OVERFLOW,
|
||||
IAX_COMP_BAD_OPCODE = 0x10,
|
||||
IAX_COMP_INVALID_FLAGS,
|
||||
|
@ -140,7 +158,10 @@ enum iax_completion_status {
|
|||
IAX_COMP_WATCHDOG,
|
||||
IAX_COMP_INVALID_COMP_FLAG = 0x30,
|
||||
IAX_COMP_INVALID_FILTER_FLAG,
|
||||
IAX_COMP_INVALID_NUM_ELEMS = 0x33,
|
||||
IAX_COMP_INVALID_INPUT_SIZE,
|
||||
IAX_COMP_INVALID_NUM_ELEMS,
|
||||
IAX_COMP_INVALID_SRC1_WIDTH,
|
||||
IAX_COMP_INVALID_INVERT_OUT,
|
||||
};
|
||||
|
||||
#define DSA_COMP_STATUS_MASK 0x7f
|
||||
|
@ -319,8 +340,12 @@ struct iax_completion_record {
|
|||
uint32_t output_size;
|
||||
uint8_t output_bits;
|
||||
uint8_t rsvd3;
|
||||
uint16_t rsvd4;
|
||||
uint64_t rsvd5[4];
|
||||
uint16_t xor_csum;
|
||||
uint32_t crc;
|
||||
uint32_t min;
|
||||
uint32_t max;
|
||||
uint32_t sum;
|
||||
uint64_t rsvd4[2];
|
||||
} __attribute__((packed));
|
||||
|
||||
struct iax_raw_completion_record {
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#include <linux/dma-mapping.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_data/dma-imx.h>
|
||||
#include <linux/dma/imx-dma.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <sound/dmaengine_pcm.h>
|
||||
#include <sound/pcm_params.h>
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_data/dma-imx.h>
|
||||
#include <linux/dma/imx-dma.h>
|
||||
#include <sound/dmaengine_pcm.h>
|
||||
#include <sound/pcm_params.h>
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#define _FSL_EASRC_H
|
||||
|
||||
#include <sound/asound.h>
|
||||
#include <linux/platform_data/dma-imx.h>
|
||||
#include <linux/dma/imx-dma.h>
|
||||
|
||||
#include "fsl_asrc_common.h"
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#ifndef _IMX_PCM_H
|
||||
#define _IMX_PCM_H
|
||||
|
||||
#include <linux/platform_data/dma-imx.h>
|
||||
#include <linux/dma/imx-dma.h>
|
||||
|
||||
/*
|
||||
* Do not change this as the FIQ handler depends on this size
|
||||
|
|
|
@ -182,7 +182,7 @@
|
|||
#define DRV_NAME "imx-ssi"
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/platform_data/dma-imx.h>
|
||||
#include <linux/dma/imx-dma.h>
|
||||
#include <sound/dmaengine_pcm.h>
|
||||
#include "imx-pcm.h"
|
||||
|
||||
|
|
Loading…
Reference in New Issue