* [PATCH v3 1/2] iommu/vt-d: Introduce __mapping_notify_one()
2018-05-04 2:34 [PATCH v3 0/2] iommu/vt-d: Fix mapping PSI missing for iommu_map() Peter Xu
@ 2018-05-04 2:34 ` Peter Xu
2018-05-04 2:34 ` [PATCH v3 2/2] iommu/vt-d: Fix iotlb psi missing for mappings Peter Xu
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Peter Xu @ 2018-05-04 2:34 UTC (permalink / raw)
To: Linux IOMMU Mailing List, linux-kernel
Cc: Joerg Roedel, David Woodhouse, Alex Williamson, Jintack Lim,
Alexander Witte, peterx
Introduce this new helper to notify one newly created mapping on one
single IOMMU. We can further leverage this helper in the next patch.
Signed-off-by: Peter Xu <peterx@redhat.com>
---
drivers/iommu/intel-iommu.c | 26 ++++++++++++++------------
1 file changed, 14 insertions(+), 12 deletions(-)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 749d8f235346..13190a54aba2 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1606,6 +1606,18 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
iommu_flush_dev_iotlb(domain, addr, mask);
}
+/* Notification for newly created mappings */
+static inline void __mapping_notify_one(struct intel_iommu *iommu,
+ struct dmar_domain *domain,
+ unsigned long pfn, unsigned int pages)
+{
+ /* It's a non-present to present mapping. Only flush if caching mode */
+ if (cap_caching_mode(iommu->cap))
+ iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
+ else
+ iommu_flush_write_buffer(iommu);
+}
+
static void iommu_flush_iova(struct iova_domain *iovad)
{
struct dmar_domain *domain;
@@ -3625,13 +3637,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
if (ret)
goto error;
- /* it's a non-present to present mapping. Only flush if caching mode */
- if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain,
- mm_to_dma_pfn(iova_pfn),
- size, 0, 1);
- else
- iommu_flush_write_buffer(iommu);
+ __mapping_notify_one(iommu, domain, mm_to_dma_pfn(iova_pfn), size);
start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
start_paddr += paddr & ~PAGE_MASK;
@@ -3819,11 +3825,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
return 0;
}
- /* it's a non-present to present mapping. Only flush if caching mode */
- if (cap_caching_mode(iommu->cap))
- iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
- else
- iommu_flush_write_buffer(iommu);
+ __mapping_notify_one(iommu, domain, start_vpfn, size);
return nelems;
}
--
2.17.0
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v3 2/2] iommu/vt-d: Fix iotlb psi missing for mappings
2018-05-04 2:34 [PATCH v3 0/2] iommu/vt-d: Fix mapping PSI missing for iommu_map() Peter Xu
2018-05-04 2:34 ` [PATCH v3 1/2] iommu/vt-d: Introduce __mapping_notify_one() Peter Xu
@ 2018-05-04 2:34 ` Peter Xu
2018-05-04 7:32 ` [PATCH v3 0/2] iommu/vt-d: Fix mapping PSI missing for iommu_map() Lu Baolu
2018-05-15 14:32 ` Joerg Roedel
3 siblings, 0 replies; 5+ messages in thread
From: Peter Xu @ 2018-05-04 2:34 UTC (permalink / raw)
To: Linux IOMMU Mailing List, linux-kernel
Cc: Joerg Roedel, David Woodhouse, Alex Williamson, Jintack Lim,
Alexander Witte, peterx
When caching mode is enabled for IOMMU, we should send explicit IOTLB
PSIs even for newly created mappings. However these events are missing
for all intel_iommu_map() callers, e.g., iommu_map(). One direct user
is the vfio-pci driver.
To make sure we'll send the PSIs always when necessary, this patch
firstly introduced domain_mapping() helper for page mappings, then fixed
the problem by generalizing the explicit map IOTLB PSI logic into that
new helper. With that, we let iommu_domain_identity_map() to use the
simplified version to avoid sending the notifications, while for all the
rest of cases we send the notifications always.
For VM case, we send the PSIs to all the backend IOMMUs for the domain.
This patch allows the nested device assignment to work with QEMU (assign
device firstly to L1 guest, then assign it again to L2 guest).
Signed-off-by: Peter Xu <peterx@redhat.com>
---
drivers/iommu/intel-iommu.c | 43 +++++++++++++++++++++++++++++--------
1 file changed, 34 insertions(+), 9 deletions(-)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 13190a54aba2..601d3789211f 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2352,18 +2352,47 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
return 0;
}
+static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ struct scatterlist *sg, unsigned long phys_pfn,
+ unsigned long nr_pages, int prot)
+{
+ int ret;
+ struct intel_iommu *iommu;
+
+ /* Do the real mapping first */
+ ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
+ if (ret)
+ return ret;
+
+ /* Notify about the new mapping */
+ if (domain_type_is_vm(domain)) {
+ /* VM typed domains can have more than one IOMMUs */
+ int iommu_id;
+ for_each_domain_iommu(iommu_id, domain) {
+ iommu = g_iommus[iommu_id];
+ __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
+ }
+ } else {
+ /* General domains only have one IOMMU */
+ iommu = domain_get_iommu(domain);
+ __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
+ }
+
+ return 0;
+}
+
static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long nr_pages,
int prot)
{
- return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
+ return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
}
static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phys_pfn, unsigned long nr_pages,
int prot)
{
- return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
+ return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
}
static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
@@ -2668,9 +2697,9 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
*/
dma_pte_clear_range(domain, first_vpfn, last_vpfn);
- return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
- last_vpfn - first_vpfn + 1,
- DMA_PTE_READ|DMA_PTE_WRITE);
+ return __domain_mapping(domain, first_vpfn, NULL,
+ first_vpfn, last_vpfn - first_vpfn + 1,
+ DMA_PTE_READ|DMA_PTE_WRITE);
}
static int domain_prepare_identity_map(struct device *dev,
@@ -3637,8 +3666,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
if (ret)
goto error;
- __mapping_notify_one(iommu, domain, mm_to_dma_pfn(iova_pfn), size);
-
start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
start_paddr += paddr & ~PAGE_MASK;
return start_paddr;
@@ -3825,8 +3852,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
return 0;
}
- __mapping_notify_one(iommu, domain, start_vpfn, size);
-
return nelems;
}
--
2.17.0
^ permalink raw reply [flat|nested] 5+ messages in thread