2023-03-03 20:21:21 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
|
|
|
/*
|
|
|
|
|
* Copyright (C) 2021 Intel Corporation
|
|
|
|
|
* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
|
|
|
|
|
*/
|
|
|
|
|
#ifndef __LINUX_IOMMUFD_H
|
|
|
|
|
#define __LINUX_IOMMUFD_H
|
|
|
|
|
|
|
|
|
|
#include <linux/err.h>
|
2024-10-15 22:22:40 +00:00
|
|
|
#include <linux/errno.h>
|
2025-02-17 21:09:13 +00:00
|
|
|
#include <linux/refcount.h>
|
2024-10-15 22:22:40 +00:00
|
|
|
#include <linux/types.h>
|
2025-02-17 21:14:28 +00:00
|
|
|
#include <linux/xarray.h>
|
2023-03-03 20:21:21 +00:00
|
|
|
|
2023-03-03 20:26:02 +00:00
|
|
|
struct device;
|
2023-03-03 20:21:21 +00:00
|
|
|
struct file;
|
2023-10-19 17:44:06 +00:00
|
|
|
struct iommu_group;
|
2025-02-17 21:11:39 +00:00
|
|
|
struct iommu_user_data;
|
2025-02-17 21:15:13 +00:00
|
|
|
struct iommu_user_data_array;
|
2024-10-15 22:23:51 +00:00
|
|
|
struct iommufd_access;
|
|
|
|
|
struct iommufd_ctx;
|
|
|
|
|
struct iommufd_device;
|
iommufd: Introduce IOMMUFD_OBJ_VIOMMU and its related struct
JIRA: https://issues.redhat.com/browse/RHEL-55217
Upstream Status: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
commit 6b22d562fcd6e3d1cc1c265b0596840946d16a09
Author: Nicolin Chen <nicolinc@nvidia.com>
Date: Tue Nov 5 12:04:19 2024 -0800
iommufd: Introduce IOMMUFD_OBJ_VIOMMU and its related struct
Add a new IOMMUFD_OBJ_VIOMMU with an iommufd_viommu structure to represent
a slice of physical IOMMU device passed to or shared with a user space VM.
This slice, now a vIOMMU object, is a group of virtualization resources of
a physical IOMMU's, such as:
- Security namespace for guest owned ID, e.g. guest-controlled cache tags
- Non-device-affiliated event reporting, e.g. invalidation queue errors
- Access to a sharable nesting parent pagetable across physical IOMMUs
- Virtualization of various platforms IDs, e.g. RIDs and others
- Delivery of paravirtualized invalidation
- Direct assigned invalidation queues
- Direct assigned interrupts
Add a new viommu_alloc op in iommu_ops, for drivers to allocate their own
vIOMMU structures. And this allocation also needs a free(), so add struct
iommufd_viommu_ops.
To simplify a vIOMMU allocation, provide a iommufd_viommu_alloc() helper.
It's suggested that a driver should embed a core-level viommu structure in
its driver-level viommu struct and call the iommufd_viommu_alloc() helper,
meanwhile the driver can also implement a viommu ops:
struct my_driver_viommu {
struct iommufd_viommu core;
/* driver-owned properties/features */
....
};
static const struct iommufd_viommu_ops my_driver_viommu_ops = {
.free = my_driver_viommu_free,
/* future ops for virtualization features */
....
};
static struct iommufd_viommu my_driver_viommu_alloc(...)
{
struct my_driver_viommu *my_viommu =
iommufd_viommu_alloc(ictx, my_driver_viommu, core,
my_driver_viommu_ops);
/* Init my_viommu and related HW feature */
....
return &my_viommu->core;
}
static struct iommu_domain_ops my_driver_domain_ops = {
....
.viommu_alloc = my_driver_viommu_alloc,
};
Link: https://patch.msgid.link/r/64685e2b79dea0f1dc56f6ede04809b72d578935.1730836219.git.nicolinc@nvidia.com
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
(cherry picked from commit 6b22d562fcd6e3d1cc1c265b0596840946d16a09)
Signed-off-by: Jerry Snitselaar <jsnitsel@redhat.com>
2025-02-17 21:10:14 +00:00
|
|
|
struct iommufd_viommu_ops;
|
2024-10-15 22:23:51 +00:00
|
|
|
struct page;
|
2023-03-03 20:21:21 +00:00
|
|
|
|
2025-02-17 21:09:13 +00:00
|
|
|
enum iommufd_object_type {
|
|
|
|
|
IOMMUFD_OBJ_NONE,
|
|
|
|
|
IOMMUFD_OBJ_ANY = IOMMUFD_OBJ_NONE,
|
|
|
|
|
IOMMUFD_OBJ_DEVICE,
|
|
|
|
|
IOMMUFD_OBJ_HWPT_PAGING,
|
|
|
|
|
IOMMUFD_OBJ_HWPT_NESTED,
|
|
|
|
|
IOMMUFD_OBJ_IOAS,
|
|
|
|
|
IOMMUFD_OBJ_ACCESS,
|
|
|
|
|
IOMMUFD_OBJ_FAULT,
|
iommufd: Introduce IOMMUFD_OBJ_VIOMMU and its related struct
JIRA: https://issues.redhat.com/browse/RHEL-55217
Upstream Status: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
commit 6b22d562fcd6e3d1cc1c265b0596840946d16a09
Author: Nicolin Chen <nicolinc@nvidia.com>
Date: Tue Nov 5 12:04:19 2024 -0800
iommufd: Introduce IOMMUFD_OBJ_VIOMMU and its related struct
Add a new IOMMUFD_OBJ_VIOMMU with an iommufd_viommu structure to represent
a slice of physical IOMMU device passed to or shared with a user space VM.
This slice, now a vIOMMU object, is a group of virtualization resources of
a physical IOMMU's, such as:
- Security namespace for guest owned ID, e.g. guest-controlled cache tags
- Non-device-affiliated event reporting, e.g. invalidation queue errors
- Access to a sharable nesting parent pagetable across physical IOMMUs
- Virtualization of various platforms IDs, e.g. RIDs and others
- Delivery of paravirtualized invalidation
- Direct assigned invalidation queues
- Direct assigned interrupts
Add a new viommu_alloc op in iommu_ops, for drivers to allocate their own
vIOMMU structures. And this allocation also needs a free(), so add struct
iommufd_viommu_ops.
To simplify a vIOMMU allocation, provide a iommufd_viommu_alloc() helper.
It's suggested that a driver should embed a core-level viommu structure in
its driver-level viommu struct and call the iommufd_viommu_alloc() helper,
meanwhile the driver can also implement a viommu ops:
struct my_driver_viommu {
struct iommufd_viommu core;
/* driver-owned properties/features */
....
};
static const struct iommufd_viommu_ops my_driver_viommu_ops = {
.free = my_driver_viommu_free,
/* future ops for virtualization features */
....
};
static struct iommufd_viommu my_driver_viommu_alloc(...)
{
struct my_driver_viommu *my_viommu =
iommufd_viommu_alloc(ictx, my_driver_viommu, core,
my_driver_viommu_ops);
/* Init my_viommu and related HW feature */
....
return &my_viommu->core;
}
static struct iommu_domain_ops my_driver_domain_ops = {
....
.viommu_alloc = my_driver_viommu_alloc,
};
Link: https://patch.msgid.link/r/64685e2b79dea0f1dc56f6ede04809b72d578935.1730836219.git.nicolinc@nvidia.com
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
(cherry picked from commit 6b22d562fcd6e3d1cc1c265b0596840946d16a09)
Signed-off-by: Jerry Snitselaar <jsnitsel@redhat.com>
2025-02-17 21:10:14 +00:00
|
|
|
IOMMUFD_OBJ_VIOMMU,
|
2025-02-17 21:14:28 +00:00
|
|
|
IOMMUFD_OBJ_VDEVICE,
|
2025-02-17 21:09:13 +00:00
|
|
|
#ifdef CONFIG_IOMMUFD_TEST
|
|
|
|
|
IOMMUFD_OBJ_SELFTEST,
|
|
|
|
|
#endif
|
|
|
|
|
IOMMUFD_OBJ_MAX,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* Base struct for all objects with a userspace ID handle. */
|
|
|
|
|
struct iommufd_object {
|
|
|
|
|
refcount_t shortterm_users;
|
|
|
|
|
refcount_t users;
|
|
|
|
|
enum iommufd_object_type type;
|
|
|
|
|
unsigned int id;
|
|
|
|
|
};
|
|
|
|
|
|
2023-03-03 20:25:53 +00:00
|
|
|
struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
|
|
|
|
|
struct device *dev, u32 *id);
|
|
|
|
|
void iommufd_device_unbind(struct iommufd_device *idev);
|
|
|
|
|
|
|
|
|
|
int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id);
|
2023-07-17 18:12:12 +00:00
|
|
|
int iommufd_device_replace(struct iommufd_device *idev, u32 *pt_id);
|
2023-03-03 20:25:53 +00:00
|
|
|
void iommufd_device_detach(struct iommufd_device *idev);
|
|
|
|
|
|
2023-10-19 17:44:12 +00:00
|
|
|
struct iommufd_ctx *iommufd_device_to_ictx(struct iommufd_device *idev);
|
|
|
|
|
u32 iommufd_device_to_id(struct iommufd_device *idev);
|
|
|
|
|
|
2023-03-03 20:26:02 +00:00
|
|
|
struct iommufd_access_ops {
|
|
|
|
|
u8 needs_pin_pages : 1;
|
|
|
|
|
void (*unmap)(void *data, unsigned long iova, unsigned long length);
|
|
|
|
|
};
|
|
|
|
|
|
2023-03-03 20:23:38 +00:00
|
|
|
enum {
|
|
|
|
|
IOMMUFD_ACCESS_RW_READ = 0,
|
|
|
|
|
IOMMUFD_ACCESS_RW_WRITE = 1 << 0,
|
|
|
|
|
/* Set if the caller is in a kthread then rw will use kthread_use_mm() */
|
|
|
|
|
IOMMUFD_ACCESS_RW_KTHREAD = 1 << 1,
|
2023-03-03 20:26:22 +00:00
|
|
|
|
|
|
|
|
/* Only for use by selftest */
|
|
|
|
|
__IOMMUFD_ACCESS_RW_SLOW_PATH = 1 << 2,
|
2023-03-03 20:23:38 +00:00
|
|
|
};
|
|
|
|
|
|
2023-03-03 20:26:02 +00:00
|
|
|
struct iommufd_access *
|
2023-10-18 22:19:56 +00:00
|
|
|
iommufd_access_create(struct iommufd_ctx *ictx,
|
2023-10-18 22:20:10 +00:00
|
|
|
const struct iommufd_access_ops *ops, void *data, u32 *id);
|
2023-03-03 20:26:02 +00:00
|
|
|
void iommufd_access_destroy(struct iommufd_access *access);
|
2023-10-18 22:19:56 +00:00
|
|
|
int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id);
|
2023-07-28 06:33:27 +00:00
|
|
|
int iommufd_access_replace(struct iommufd_access *access, u32 ioas_id);
|
iommufd/device: Add iommufd_access_detach() API
JIRA: https://issues.redhat.com/browse/RHEL-14318
commit e23a6217f3bb4f6f205d4517782ad49e3533fc1c
Author: Nicolin Chen <nicolinc@nvidia.com>
Date: Tue Jul 18 06:55:39 2023 -0700
iommufd/device: Add iommufd_access_detach() API
Previously, the detach routine is only done by the destroy(). And it was
called by vfio_iommufd_emulated_unbind() when the device runs close(), so
all the mappings in iopt were cleaned in that setup, when the call trace
reaches this detach() routine.
Now, there's a need of a detach uAPI, meaning that it does not only need
a new iommufd_access_detach() API, but also requires access->ops->unmap()
call as a cleanup. So add one.
However, leaving that unprotected can introduce some potential of a race
condition during the pin_/unpin_pages() call, where access->ioas->iopt is
getting referenced. So, add an ioas_lock to protect the context of iopt
referencings.
Also, to allow the iommufd_access_unpin_pages() callback to happen via
this unmap() call, add an ioas_unpin pointer, so the unpin routine won't
be affected by the "access->ioas = NULL" trick.
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Terrence Xu <terrence.xu@intel.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Tested-by: Matthew Rosato <mjrosato@linux.ibm.com>
Tested-by: Yanting Jiang <yanting.jiang@intel.com>
Tested-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Tested-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
Link: https://lore.kernel.org/r/20230718135551.6592-15-yi.l.liu@intel.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
2023-10-19 18:18:42 +00:00
|
|
|
void iommufd_access_detach(struct iommufd_access *access);
|
2023-03-03 20:26:02 +00:00
|
|
|
|
2023-03-03 20:21:21 +00:00
|
|
|
void iommufd_ctx_get(struct iommufd_ctx *ictx);
|
|
|
|
|
|
iommufd: Introduce IOMMUFD_OBJ_VIOMMU and its related struct
JIRA: https://issues.redhat.com/browse/RHEL-55217
Upstream Status: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
commit 6b22d562fcd6e3d1cc1c265b0596840946d16a09
Author: Nicolin Chen <nicolinc@nvidia.com>
Date: Tue Nov 5 12:04:19 2024 -0800
iommufd: Introduce IOMMUFD_OBJ_VIOMMU and its related struct
Add a new IOMMUFD_OBJ_VIOMMU with an iommufd_viommu structure to represent
a slice of physical IOMMU device passed to or shared with a user space VM.
This slice, now a vIOMMU object, is a group of virtualization resources of
a physical IOMMU's, such as:
- Security namespace for guest owned ID, e.g. guest-controlled cache tags
- Non-device-affiliated event reporting, e.g. invalidation queue errors
- Access to a sharable nesting parent pagetable across physical IOMMUs
- Virtualization of various platforms IDs, e.g. RIDs and others
- Delivery of paravirtualized invalidation
- Direct assigned invalidation queues
- Direct assigned interrupts
Add a new viommu_alloc op in iommu_ops, for drivers to allocate their own
vIOMMU structures. And this allocation also needs a free(), so add struct
iommufd_viommu_ops.
To simplify a vIOMMU allocation, provide a iommufd_viommu_alloc() helper.
It's suggested that a driver should embed a core-level viommu structure in
its driver-level viommu struct and call the iommufd_viommu_alloc() helper,
meanwhile the driver can also implement a viommu ops:
struct my_driver_viommu {
struct iommufd_viommu core;
/* driver-owned properties/features */
....
};
static const struct iommufd_viommu_ops my_driver_viommu_ops = {
.free = my_driver_viommu_free,
/* future ops for virtualization features */
....
};
static struct iommufd_viommu my_driver_viommu_alloc(...)
{
struct my_driver_viommu *my_viommu =
iommufd_viommu_alloc(ictx, my_driver_viommu, core,
my_driver_viommu_ops);
/* Init my_viommu and related HW feature */
....
return &my_viommu->core;
}
static struct iommu_domain_ops my_driver_domain_ops = {
....
.viommu_alloc = my_driver_viommu_alloc,
};
Link: https://patch.msgid.link/r/64685e2b79dea0f1dc56f6ede04809b72d578935.1730836219.git.nicolinc@nvidia.com
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
(cherry picked from commit 6b22d562fcd6e3d1cc1c265b0596840946d16a09)
Signed-off-by: Jerry Snitselaar <jsnitsel@redhat.com>
2025-02-17 21:10:14 +00:00
|
|
|
struct iommufd_viommu {
|
|
|
|
|
struct iommufd_object obj;
|
|
|
|
|
struct iommufd_ctx *ictx;
|
|
|
|
|
struct iommu_device *iommu_dev;
|
|
|
|
|
struct iommufd_hwpt_paging *hwpt;
|
|
|
|
|
|
|
|
|
|
const struct iommufd_viommu_ops *ops;
|
|
|
|
|
|
2025-02-17 21:14:28 +00:00
|
|
|
struct xarray vdevs;
|
|
|
|
|
|
iommufd: Introduce IOMMUFD_OBJ_VIOMMU and its related struct
JIRA: https://issues.redhat.com/browse/RHEL-55217
Upstream Status: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
commit 6b22d562fcd6e3d1cc1c265b0596840946d16a09
Author: Nicolin Chen <nicolinc@nvidia.com>
Date: Tue Nov 5 12:04:19 2024 -0800
iommufd: Introduce IOMMUFD_OBJ_VIOMMU and its related struct
Add a new IOMMUFD_OBJ_VIOMMU with an iommufd_viommu structure to represent
a slice of physical IOMMU device passed to or shared with a user space VM.
This slice, now a vIOMMU object, is a group of virtualization resources of
a physical IOMMU's, such as:
- Security namespace for guest owned ID, e.g. guest-controlled cache tags
- Non-device-affiliated event reporting, e.g. invalidation queue errors
- Access to a sharable nesting parent pagetable across physical IOMMUs
- Virtualization of various platforms IDs, e.g. RIDs and others
- Delivery of paravirtualized invalidation
- Direct assigned invalidation queues
- Direct assigned interrupts
Add a new viommu_alloc op in iommu_ops, for drivers to allocate their own
vIOMMU structures. And this allocation also needs a free(), so add struct
iommufd_viommu_ops.
To simplify a vIOMMU allocation, provide a iommufd_viommu_alloc() helper.
It's suggested that a driver should embed a core-level viommu structure in
its driver-level viommu struct and call the iommufd_viommu_alloc() helper,
meanwhile the driver can also implement a viommu ops:
struct my_driver_viommu {
struct iommufd_viommu core;
/* driver-owned properties/features */
....
};
static const struct iommufd_viommu_ops my_driver_viommu_ops = {
.free = my_driver_viommu_free,
/* future ops for virtualization features */
....
};
static struct iommufd_viommu my_driver_viommu_alloc(...)
{
struct my_driver_viommu *my_viommu =
iommufd_viommu_alloc(ictx, my_driver_viommu, core,
my_driver_viommu_ops);
/* Init my_viommu and related HW feature */
....
return &my_viommu->core;
}
static struct iommu_domain_ops my_driver_domain_ops = {
....
.viommu_alloc = my_driver_viommu_alloc,
};
Link: https://patch.msgid.link/r/64685e2b79dea0f1dc56f6ede04809b72d578935.1730836219.git.nicolinc@nvidia.com
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
(cherry picked from commit 6b22d562fcd6e3d1cc1c265b0596840946d16a09)
Signed-off-by: Jerry Snitselaar <jsnitsel@redhat.com>
2025-02-17 21:10:14 +00:00
|
|
|
unsigned int type;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* struct iommufd_viommu_ops - vIOMMU specific operations
|
|
|
|
|
* @destroy: Clean up all driver-specific parts of an iommufd_viommu. The memory
|
|
|
|
|
* of the vIOMMU will be free-ed by iommufd core after calling this op
|
2025-02-17 21:11:39 +00:00
|
|
|
* @alloc_domain_nested: Allocate a IOMMU_DOMAIN_NESTED on a vIOMMU that holds a
|
|
|
|
|
* nesting parent domain (IOMMU_DOMAIN_PAGING). @user_data
|
|
|
|
|
* must be defined in include/uapi/linux/iommufd.h.
|
|
|
|
|
* It must fully initialize the new iommu_domain before
|
|
|
|
|
* returning. Upon failure, ERR_PTR must be returned.
|
2025-02-17 21:15:13 +00:00
|
|
|
* @cache_invalidate: Flush hardware cache used by a vIOMMU. It can be used for
|
|
|
|
|
* any IOMMU hardware specific cache: TLB and device cache.
|
|
|
|
|
* The @array passes in the cache invalidation requests, in
|
|
|
|
|
* form of a driver data structure. A driver must update the
|
|
|
|
|
* array->entry_num to report the number of handled requests.
|
|
|
|
|
* The data structure of the array entry must be defined in
|
|
|
|
|
* include/uapi/linux/iommufd.h
|
iommufd: Introduce IOMMUFD_OBJ_VIOMMU and its related struct
JIRA: https://issues.redhat.com/browse/RHEL-55217
Upstream Status: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
commit 6b22d562fcd6e3d1cc1c265b0596840946d16a09
Author: Nicolin Chen <nicolinc@nvidia.com>
Date: Tue Nov 5 12:04:19 2024 -0800
iommufd: Introduce IOMMUFD_OBJ_VIOMMU and its related struct
Add a new IOMMUFD_OBJ_VIOMMU with an iommufd_viommu structure to represent
a slice of physical IOMMU device passed to or shared with a user space VM.
This slice, now a vIOMMU object, is a group of virtualization resources of
a physical IOMMU's, such as:
- Security namespace for guest owned ID, e.g. guest-controlled cache tags
- Non-device-affiliated event reporting, e.g. invalidation queue errors
- Access to a sharable nesting parent pagetable across physical IOMMUs
- Virtualization of various platforms IDs, e.g. RIDs and others
- Delivery of paravirtualized invalidation
- Direct assigned invalidation queues
- Direct assigned interrupts
Add a new viommu_alloc op in iommu_ops, for drivers to allocate their own
vIOMMU structures. And this allocation also needs a free(), so add struct
iommufd_viommu_ops.
To simplify a vIOMMU allocation, provide a iommufd_viommu_alloc() helper.
It's suggested that a driver should embed a core-level viommu structure in
its driver-level viommu struct and call the iommufd_viommu_alloc() helper,
meanwhile the driver can also implement a viommu ops:
struct my_driver_viommu {
struct iommufd_viommu core;
/* driver-owned properties/features */
....
};
static const struct iommufd_viommu_ops my_driver_viommu_ops = {
.free = my_driver_viommu_free,
/* future ops for virtualization features */
....
};
static struct iommufd_viommu my_driver_viommu_alloc(...)
{
struct my_driver_viommu *my_viommu =
iommufd_viommu_alloc(ictx, my_driver_viommu, core,
my_driver_viommu_ops);
/* Init my_viommu and related HW feature */
....
return &my_viommu->core;
}
static struct iommu_domain_ops my_driver_domain_ops = {
....
.viommu_alloc = my_driver_viommu_alloc,
};
Link: https://patch.msgid.link/r/64685e2b79dea0f1dc56f6ede04809b72d578935.1730836219.git.nicolinc@nvidia.com
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
(cherry picked from commit 6b22d562fcd6e3d1cc1c265b0596840946d16a09)
Signed-off-by: Jerry Snitselaar <jsnitsel@redhat.com>
2025-02-17 21:10:14 +00:00
|
|
|
*/
|
|
|
|
|
struct iommufd_viommu_ops {
|
|
|
|
|
void (*destroy)(struct iommufd_viommu *viommu);
|
2025-02-17 21:11:39 +00:00
|
|
|
struct iommu_domain *(*alloc_domain_nested)(
|
|
|
|
|
struct iommufd_viommu *viommu, u32 flags,
|
|
|
|
|
const struct iommu_user_data *user_data);
|
2025-02-17 21:15:13 +00:00
|
|
|
int (*cache_invalidate)(struct iommufd_viommu *viommu,
|
|
|
|
|
struct iommu_user_data_array *array);
|
iommufd: Introduce IOMMUFD_OBJ_VIOMMU and its related struct
JIRA: https://issues.redhat.com/browse/RHEL-55217
Upstream Status: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
commit 6b22d562fcd6e3d1cc1c265b0596840946d16a09
Author: Nicolin Chen <nicolinc@nvidia.com>
Date: Tue Nov 5 12:04:19 2024 -0800
iommufd: Introduce IOMMUFD_OBJ_VIOMMU and its related struct
Add a new IOMMUFD_OBJ_VIOMMU with an iommufd_viommu structure to represent
a slice of physical IOMMU device passed to or shared with a user space VM.
This slice, now a vIOMMU object, is a group of virtualization resources of
a physical IOMMU's, such as:
- Security namespace for guest owned ID, e.g. guest-controlled cache tags
- Non-device-affiliated event reporting, e.g. invalidation queue errors
- Access to a sharable nesting parent pagetable across physical IOMMUs
- Virtualization of various platforms IDs, e.g. RIDs and others
- Delivery of paravirtualized invalidation
- Direct assigned invalidation queues
- Direct assigned interrupts
Add a new viommu_alloc op in iommu_ops, for drivers to allocate their own
vIOMMU structures. And this allocation also needs a free(), so add struct
iommufd_viommu_ops.
To simplify a vIOMMU allocation, provide a iommufd_viommu_alloc() helper.
It's suggested that a driver should embed a core-level viommu structure in
its driver-level viommu struct and call the iommufd_viommu_alloc() helper,
meanwhile the driver can also implement a viommu ops:
struct my_driver_viommu {
struct iommufd_viommu core;
/* driver-owned properties/features */
....
};
static const struct iommufd_viommu_ops my_driver_viommu_ops = {
.free = my_driver_viommu_free,
/* future ops for virtualization features */
....
};
static struct iommufd_viommu my_driver_viommu_alloc(...)
{
struct my_driver_viommu *my_viommu =
iommufd_viommu_alloc(ictx, my_driver_viommu, core,
my_driver_viommu_ops);
/* Init my_viommu and related HW feature */
....
return &my_viommu->core;
}
static struct iommu_domain_ops my_driver_domain_ops = {
....
.viommu_alloc = my_driver_viommu_alloc,
};
Link: https://patch.msgid.link/r/64685e2b79dea0f1dc56f6ede04809b72d578935.1730836219.git.nicolinc@nvidia.com
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
(cherry picked from commit 6b22d562fcd6e3d1cc1c265b0596840946d16a09)
Signed-off-by: Jerry Snitselaar <jsnitsel@redhat.com>
2025-02-17 21:10:14 +00:00
|
|
|
};
|
|
|
|
|
|
2023-03-03 20:21:21 +00:00
|
|
|
#if IS_ENABLED(CONFIG_IOMMUFD)
|
|
|
|
|
struct iommufd_ctx *iommufd_ctx_from_file(struct file *file);
|
2023-10-19 18:19:31 +00:00
|
|
|
struct iommufd_ctx *iommufd_ctx_from_fd(int fd);
|
2023-03-03 20:21:21 +00:00
|
|
|
void iommufd_ctx_put(struct iommufd_ctx *ictx);
|
2023-10-19 17:44:06 +00:00
|
|
|
bool iommufd_ctx_has_group(struct iommufd_ctx *ictx, struct iommu_group *group);
|
2023-03-03 20:26:02 +00:00
|
|
|
|
|
|
|
|
int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
|
|
|
|
|
unsigned long length, struct page **out_pages,
|
|
|
|
|
unsigned int flags);
|
|
|
|
|
void iommufd_access_unpin_pages(struct iommufd_access *access,
|
|
|
|
|
unsigned long iova, unsigned long length);
|
|
|
|
|
int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
|
|
|
|
|
void *data, size_t len, unsigned int flags);
|
2023-01-18 17:50:28 +00:00
|
|
|
int iommufd_vfio_compat_ioas_get_id(struct iommufd_ctx *ictx, u32 *out_ioas_id);
|
|
|
|
|
int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx);
|
|
|
|
|
int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx);
|
2023-03-03 20:21:21 +00:00
|
|
|
#else /* !CONFIG_IOMMUFD */
|
|
|
|
|
static inline struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
|
|
|
|
|
{
|
|
|
|
|
return ERR_PTR(-EOPNOTSUPP);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void iommufd_ctx_put(struct iommufd_ctx *ictx)
|
|
|
|
|
{
|
|
|
|
|
}
|
2023-03-03 20:26:02 +00:00
|
|
|
|
|
|
|
|
static inline int iommufd_access_pin_pages(struct iommufd_access *access,
|
|
|
|
|
unsigned long iova,
|
|
|
|
|
unsigned long length,
|
|
|
|
|
struct page **out_pages,
|
|
|
|
|
unsigned int flags)
|
|
|
|
|
{
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void iommufd_access_unpin_pages(struct iommufd_access *access,
|
|
|
|
|
unsigned long iova,
|
|
|
|
|
unsigned long length)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
|
|
|
|
|
void *data, size_t len, unsigned int flags)
|
|
|
|
|
{
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
}
|
2023-03-03 20:26:13 +00:00
|
|
|
|
2023-01-18 17:50:28 +00:00
|
|
|
static inline int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx)
|
|
|
|
|
{
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx)
|
2023-03-03 20:26:13 +00:00
|
|
|
{
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
}
|
2023-03-03 20:21:21 +00:00
|
|
|
#endif /* CONFIG_IOMMUFD */
|
2025-02-17 21:09:40 +00:00
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER_CORE)
|
|
|
|
|
struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
|
|
|
|
|
size_t size,
|
|
|
|
|
enum iommufd_object_type type);
|
2025-02-17 21:16:49 +00:00
|
|
|
struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
|
|
|
|
|
unsigned long vdev_id);
|
2025-02-17 21:09:40 +00:00
|
|
|
#else /* !CONFIG_IOMMUFD_DRIVER_CORE */
|
|
|
|
|
static inline struct iommufd_object *
|
|
|
|
|
_iommufd_object_alloc(struct iommufd_ctx *ictx, size_t size,
|
|
|
|
|
enum iommufd_object_type type)
|
|
|
|
|
{
|
|
|
|
|
return ERR_PTR(-EOPNOTSUPP);
|
|
|
|
|
}
|
2025-02-17 21:16:49 +00:00
|
|
|
|
|
|
|
|
static inline struct device *
|
|
|
|
|
iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id)
|
|
|
|
|
{
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2025-02-17 21:09:40 +00:00
|
|
|
#endif /* CONFIG_IOMMUFD_DRIVER_CORE */
|
iommufd: Introduce IOMMUFD_OBJ_VIOMMU and its related struct
JIRA: https://issues.redhat.com/browse/RHEL-55217
Upstream Status: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
commit 6b22d562fcd6e3d1cc1c265b0596840946d16a09
Author: Nicolin Chen <nicolinc@nvidia.com>
Date: Tue Nov 5 12:04:19 2024 -0800
iommufd: Introduce IOMMUFD_OBJ_VIOMMU and its related struct
Add a new IOMMUFD_OBJ_VIOMMU with an iommufd_viommu structure to represent
a slice of physical IOMMU device passed to or shared with a user space VM.
This slice, now a vIOMMU object, is a group of virtualization resources of
a physical IOMMU's, such as:
- Security namespace for guest owned ID, e.g. guest-controlled cache tags
- Non-device-affiliated event reporting, e.g. invalidation queue errors
- Access to a sharable nesting parent pagetable across physical IOMMUs
- Virtualization of various platforms IDs, e.g. RIDs and others
- Delivery of paravirtualized invalidation
- Direct assigned invalidation queues
- Direct assigned interrupts
Add a new viommu_alloc op in iommu_ops, for drivers to allocate their own
vIOMMU structures. And this allocation also needs a free(), so add struct
iommufd_viommu_ops.
To simplify a vIOMMU allocation, provide a iommufd_viommu_alloc() helper.
It's suggested that a driver should embed a core-level viommu structure in
its driver-level viommu struct and call the iommufd_viommu_alloc() helper,
meanwhile the driver can also implement a viommu ops:
struct my_driver_viommu {
struct iommufd_viommu core;
/* driver-owned properties/features */
....
};
static const struct iommufd_viommu_ops my_driver_viommu_ops = {
.free = my_driver_viommu_free,
/* future ops for virtualization features */
....
};
static struct iommufd_viommu my_driver_viommu_alloc(...)
{
struct my_driver_viommu *my_viommu =
iommufd_viommu_alloc(ictx, my_driver_viommu, core,
my_driver_viommu_ops);
/* Init my_viommu and related HW feature */
....
return &my_viommu->core;
}
static struct iommu_domain_ops my_driver_domain_ops = {
....
.viommu_alloc = my_driver_viommu_alloc,
};
Link: https://patch.msgid.link/r/64685e2b79dea0f1dc56f6ede04809b72d578935.1730836219.git.nicolinc@nvidia.com
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
(cherry picked from commit 6b22d562fcd6e3d1cc1c265b0596840946d16a09)
Signed-off-by: Jerry Snitselaar <jsnitsel@redhat.com>
2025-02-17 21:10:14 +00:00
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Helpers for IOMMU driver to allocate driver structures that will be freed by
|
|
|
|
|
* the iommufd core. The free op will be called prior to freeing the memory.
|
|
|
|
|
*/
|
|
|
|
|
#define iommufd_viommu_alloc(ictx, drv_struct, member, viommu_ops) \
|
|
|
|
|
({ \
|
|
|
|
|
drv_struct *ret; \
|
|
|
|
|
\
|
|
|
|
|
static_assert(__same_type(struct iommufd_viommu, \
|
|
|
|
|
((drv_struct *)NULL)->member)); \
|
|
|
|
|
static_assert(offsetof(drv_struct, member.obj) == 0); \
|
|
|
|
|
ret = (drv_struct *)_iommufd_object_alloc( \
|
|
|
|
|
ictx, sizeof(drv_struct), IOMMUFD_OBJ_VIOMMU); \
|
|
|
|
|
if (!IS_ERR(ret)) \
|
|
|
|
|
ret->member.ops = viommu_ops; \
|
|
|
|
|
ret; \
|
|
|
|
|
})
|
2023-03-03 20:21:21 +00:00
|
|
|
#endif
|