LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Robin Murphy <robin.murphy@arm.com>
Cc: Tom Murphy <tmurphy@arista.com>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will.deacon@arm.com>,
linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org,
linux-arm-kernel@lists.infradead.org
Subject: [PATCH 06/23] iommu/dma: Move domain lookup into __iommu_dma_{map,unmap}
Date: Thu, 23 May 2019 09:00:11 +0200 [thread overview]
Message-ID: <20190523070028.7435-7-hch@lst.de> (raw)
In-Reply-To: <20190523070028.7435-1-hch@lst.de>
From: Robin Murphy <robin.murphy@arm.com>
Most of the callers don't care, and the couple that do already have the
domain to hand for other reasons are in slow paths where the (trivial)
overhead of a repeated lookup will be utterly immaterial.
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
drivers/iommu/dma-iommu.c | 34 ++++++++++++++++------------------
1 file changed, 16 insertions(+), 18 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index c406abe3be01..c04450a4adec 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -448,9 +448,10 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
size >> iova_shift(iovad));
}
-static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
+static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
size_t size)
{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
size_t iova_off = iova_offset(iovad, dma_addr);
@@ -465,8 +466,9 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
- size_t size, int prot, struct iommu_domain *domain)
+ size_t size, int prot)
{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
size_t iova_off = 0;
dma_addr_t iova;
@@ -565,7 +567,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
static void __iommu_dma_free(struct device *dev, struct page **pages,
size_t size, dma_addr_t *handle)
{
- __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
+ __iommu_dma_unmap(dev, *handle, size);
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
*handle = DMA_MAPPING_ERROR;
}
@@ -718,14 +720,13 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
static dma_addr_t __iommu_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, int prot)
{
- return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot,
- iommu_get_dma_domain(dev));
+ return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
}
static void __iommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
+ __iommu_dma_unmap(dev, handle, size);
}
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
@@ -734,11 +735,10 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
{
phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
+ int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dma_handle;
- dma_handle =__iommu_dma_map(dev, phys, size,
- dma_info_to_prot(dir, coherent, attrs),
- iommu_get_dma_domain(dev));
+ dma_handle =__iommu_dma_map(dev, phys, size, prot);
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
dma_handle != DMA_MAPPING_ERROR)
arch_sync_dma_for_device(dev, phys, size, dir);
@@ -750,7 +750,7 @@ static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
- __iommu_dma_unmap(iommu_get_dma_domain(dev), dma_handle, size);
+ __iommu_dma_unmap(dev, dma_handle, size);
}
/*
@@ -931,21 +931,20 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
sg = tmp;
}
end = sg_dma_address(sg) + sg_dma_len(sg);
- __iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start);
+ __iommu_dma_unmap(dev, start, end - start);
}
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
- dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
- iommu_get_dma_domain(dev));
+ dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
}
static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
+ __iommu_dma_unmap(dev, handle, size);
}
static void *iommu_dma_alloc(struct device *dev, size_t size,
@@ -1205,9 +1204,8 @@ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
}
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
- phys_addr_t msi_addr, struct iommu_domain *domain)
+ phys_addr_t msi_addr, struct iommu_dma_cookie *cookie)
{
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_msi_page *msi_page;
dma_addr_t iova;
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
@@ -1222,7 +1220,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
- iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
+ iova = __iommu_dma_map(dev, msi_addr, size, prot);
if (iova == DMA_MAPPING_ERROR)
goto out_free_page;
@@ -1258,7 +1256,7 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
* of an MSI from within an IPI handler.
*/
spin_lock_irqsave(&cookie->msi_lock, flags);
- msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
+ msi_page = iommu_dma_get_msi_page(dev, msi_addr, cookie);
spin_unlock_irqrestore(&cookie->msi_lock, flags);
msi_desc_set_iommu_cookie(desc, msi_page);
--
2.20.1
next prev parent reply other threads:[~2019-05-23 7:02 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-05-23 7:00 implement generic dma_map_ops for IOMMUs v6 Christoph Hellwig
2019-05-23 7:00 ` [PATCH 01/23] iommu/dma: Cleanup dma-iommu.h Christoph Hellwig
2019-05-23 7:00 ` [PATCH 02/23] iommu/dma: Remove the flush_page callback Christoph Hellwig
2019-05-23 7:00 ` [PATCH 03/23] iommu/dma: Use for_each_sg in iommu_dma_alloc Christoph Hellwig
2019-05-23 7:00 ` [PATCH 04/23] iommu/dma: move the arm64 wrappers to common code Christoph Hellwig
2019-05-23 7:00 ` [PATCH 05/23] iommu/dma: Move __iommu_dma_map Christoph Hellwig
2019-05-23 7:00 ` Christoph Hellwig [this message]
2019-05-23 7:00 ` [PATCH 07/23] iommu/dma: Squash __iommu_dma_{map,unmap}_page helpers Christoph Hellwig
2019-05-23 7:00 ` [PATCH 08/23] iommu/dma: Factor out remapped pages lookup Christoph Hellwig
2019-05-23 7:00 ` [PATCH 09/23] iommu/dma: Refactor the page array remapping allocator Christoph Hellwig
2019-05-23 7:00 ` [PATCH 10/23] iommu/dma: Remove __iommu_dma_free Christoph Hellwig
2019-05-23 7:00 ` [PATCH 11/23] iommu/dma: Refactor iommu_dma_free Christoph Hellwig
2019-05-23 7:00 ` [PATCH 12/23] iommu/dma: Refactor iommu_dma_alloc Christoph Hellwig
2019-05-23 7:00 ` [PATCH 13/23] iommu/dma: Don't remap CMA unnecessarily Christoph Hellwig
2019-05-23 7:00 ` [PATCH 14/23] iommu/dma: Merge the CMA and alloc_pages allocation paths Christoph Hellwig
2019-05-23 7:00 ` [PATCH 15/23] iommu/dma: Split iommu_dma_free Christoph Hellwig
2019-05-23 7:00 ` [PATCH 16/23] iommu/dma: Cleanup variable naming in iommu_dma_alloc Christoph Hellwig
2019-05-23 7:00 ` [PATCH 17/23] iommu/dma: Refactor iommu_dma_alloc, part 2 Christoph Hellwig
2019-05-23 7:00 ` [PATCH 18/23] iommu/dma: Refactor iommu_dma_get_sgtable Christoph Hellwig
2019-05-23 7:00 ` [PATCH 19/23] iommu/dma: Refactor iommu_dma_mmap Christoph Hellwig
2019-05-23 7:00 ` [PATCH 20/23] iommu/dma: Don't depend on CONFIG_DMA_DIRECT_REMAP Christoph Hellwig
2019-05-23 7:00 ` [PATCH 21/23] iommu/dma: Switch copyright boilerplace to SPDX Christoph Hellwig
2019-05-23 7:00 ` [PATCH 22/23] arm64: switch copyright boilerplace to SPDX in dma-mapping.c Christoph Hellwig
2019-05-23 7:00 ` [PATCH 23/23] arm64: trim includes " Christoph Hellwig
2019-05-23 12:33 ` implement generic dma_map_ops for IOMMUs v6 Robin Murphy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190523070028.7435-7-hch@lst.de \
--to=hch@lst.de \
--cc=catalin.marinas@arm.com \
--cc=iommu@lists.linux-foundation.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=robin.murphy@arm.com \
--cc=tmurphy@arista.com \
--cc=will.deacon@arm.com \
--subject='Re: [PATCH 06/23] iommu/dma: Move domain lookup into __iommu_dma_{map,unmap}' \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).