LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Robin Murphy <robin.murphy@arm.com>
Cc: Tom Murphy <tmurphy@arista.com>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will.deacon@arm.com>,
linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org,
linux-arm-kernel@lists.infradead.org
Subject: [PATCH 09/23] iommu/dma: Refactor the page array remapping allocator
Date: Thu, 23 May 2019 09:00:14 +0200 [thread overview]
Message-ID: <20190523070028.7435-10-hch@lst.de> (raw)
In-Reply-To: <20190523070028.7435-1-hch@lst.de>
Move the call to dma_common_pages_remap into __iommu_dma_alloc and
rename it to iommu_dma_alloc_remap. This creates a self-contained
helper for remapped pages allocation and mapping.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
---
drivers/iommu/dma-iommu.c | 54 +++++++++++++++++++--------------------
1 file changed, 26 insertions(+), 28 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index e870ea59a34d..0ccc25fd5c86 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -564,9 +564,9 @@ static struct page **__iommu_dma_get_pages(void *cpu_addr)
}
/**
- * iommu_dma_free - Free a buffer allocated by __iommu_dma_alloc()
+ * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc_remap()
* @dev: Device which owns this buffer
- * @pages: Array of buffer pages as returned by __iommu_dma_alloc()
+ * @pages: Array of buffer pages as returned by __iommu_dma_alloc_remap()
* @size: Size of buffer in bytes
* @handle: DMA address of buffer
*
@@ -582,33 +582,35 @@ static void __iommu_dma_free(struct device *dev, struct page **pages,
}
/**
- * __iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
+ * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
* @dev: Device to allocate memory for. Must be a real device
* attached to an iommu_dma_domain
* @size: Size of buffer in bytes
+ * @dma_handle: Out argument for allocated DMA handle
* @gfp: Allocation flags
* @attrs: DMA attributes for this allocation
- * @prot: IOMMU mapping flags
- * @handle: Out argument for allocated DMA handle
*
* If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
* but an IOMMU which supports smaller pages might not map the whole thing.
*
- * Return: Array of struct page pointers describing the buffer,
- * or NULL on failure.
+ * Return: Mapped virtual address, or NULL on failure.
*/
-static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
- gfp_t gfp, unsigned long attrs, int prot, dma_addr_t *handle)
+static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
+ bool coherent = dev_is_dma_coherent(dev);
+ int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+ pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
+ unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages;
struct sg_table sgt;
dma_addr_t iova;
- unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
+ void *vaddr;
- *handle = DMA_MAPPING_ERROR;
+ *dma_handle = DMA_MAPPING_ERROR;
min_size = alloc_sizes & -alloc_sizes;
if (min_size < PAGE_SIZE) {
@@ -634,7 +636,7 @@ static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
goto out_free_iova;
- if (!(prot & IOMMU_CACHE)) {
+ if (!(ioprot & IOMMU_CACHE)) {
struct scatterlist *sg;
int i;
@@ -642,14 +644,21 @@ static struct page **__iommu_dma_alloc(struct device *dev, size_t size,
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
- if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
+ if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
< size)
goto out_free_sg;
- *handle = iova;
+ vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
+ __builtin_return_address(0));
+ if (!vaddr)
+ goto out_unmap;
+
+ *dma_handle = iova;
sg_free_table(&sgt);
- return pages;
+ return vaddr;
+out_unmap:
+ __iommu_dma_unmap(dev, iova, size);
out_free_sg:
sg_free_table(&sgt);
out_free_iova:
@@ -1008,18 +1017,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
size >> PAGE_SHIFT);
}
} else {
- pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
- struct page **pages;
-
- pages = __iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
- handle);
- if (!pages)
- return NULL;
-
- addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
- __builtin_return_address(0));
- if (!addr)
- __iommu_dma_free(dev, pages, iosize, handle);
+ addr = iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs);
}
return addr;
}
@@ -1033,7 +1031,7 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
/*
* @cpu_addr will be one of 4 things depending on how it was allocated:
* - A remapped array of pages for contiguous allocations.
- * - A remapped array of pages from __iommu_dma_alloc(), for all
+ * - A remapped array of pages from iommu_dma_alloc_remap(), for all
* non-atomic allocations.
* - A non-cacheable alias from the atomic pool, for atomic
* allocations by non-coherent devices.
--
2.20.1
next prev parent reply other threads:[~2019-05-23 7:01 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-05-23 7:00 implement generic dma_map_ops for IOMMUs v6 Christoph Hellwig
2019-05-23 7:00 ` [PATCH 01/23] iommu/dma: Cleanup dma-iommu.h Christoph Hellwig
2019-05-23 7:00 ` [PATCH 02/23] iommu/dma: Remove the flush_page callback Christoph Hellwig
2019-05-23 7:00 ` [PATCH 03/23] iommu/dma: Use for_each_sg in iommu_dma_alloc Christoph Hellwig
2019-05-23 7:00 ` [PATCH 04/23] iommu/dma: move the arm64 wrappers to common code Christoph Hellwig
2019-05-23 7:00 ` [PATCH 05/23] iommu/dma: Move __iommu_dma_map Christoph Hellwig
2019-05-23 7:00 ` [PATCH 06/23] iommu/dma: Move domain lookup into __iommu_dma_{map,unmap} Christoph Hellwig
2019-05-23 7:00 ` [PATCH 07/23] iommu/dma: Squash __iommu_dma_{map,unmap}_page helpers Christoph Hellwig
2019-05-23 7:00 ` [PATCH 08/23] iommu/dma: Factor out remapped pages lookup Christoph Hellwig
2019-05-23 7:00 ` Christoph Hellwig [this message]
2019-05-23 7:00 ` [PATCH 10/23] iommu/dma: Remove __iommu_dma_free Christoph Hellwig
2019-05-23 7:00 ` [PATCH 11/23] iommu/dma: Refactor iommu_dma_free Christoph Hellwig
2019-05-23 7:00 ` [PATCH 12/23] iommu/dma: Refactor iommu_dma_alloc Christoph Hellwig
2019-05-23 7:00 ` [PATCH 13/23] iommu/dma: Don't remap CMA unnecessarily Christoph Hellwig
2019-05-23 7:00 ` [PATCH 14/23] iommu/dma: Merge the CMA and alloc_pages allocation paths Christoph Hellwig
2019-05-23 7:00 ` [PATCH 15/23] iommu/dma: Split iommu_dma_free Christoph Hellwig
2019-05-23 7:00 ` [PATCH 16/23] iommu/dma: Cleanup variable naming in iommu_dma_alloc Christoph Hellwig
2019-05-23 7:00 ` [PATCH 17/23] iommu/dma: Refactor iommu_dma_alloc, part 2 Christoph Hellwig
2019-05-23 7:00 ` [PATCH 18/23] iommu/dma: Refactor iommu_dma_get_sgtable Christoph Hellwig
2019-05-23 7:00 ` [PATCH 19/23] iommu/dma: Refactor iommu_dma_mmap Christoph Hellwig
2019-05-23 7:00 ` [PATCH 20/23] iommu/dma: Don't depend on CONFIG_DMA_DIRECT_REMAP Christoph Hellwig
2019-05-23 7:00 ` [PATCH 21/23] iommu/dma: Switch copyright boilerplace to SPDX Christoph Hellwig
2019-05-23 7:00 ` [PATCH 22/23] arm64: switch copyright boilerplace to SPDX in dma-mapping.c Christoph Hellwig
2019-05-23 7:00 ` [PATCH 23/23] arm64: trim includes " Christoph Hellwig
2019-05-23 12:33 ` implement generic dma_map_ops for IOMMUs v6 Robin Murphy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190523070028.7435-10-hch@lst.de \
--to=hch@lst.de \
--cc=catalin.marinas@arm.com \
--cc=iommu@lists.linux-foundation.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=robin.murphy@arm.com \
--cc=tmurphy@arista.com \
--cc=will.deacon@arm.com \
--subject='Re: [PATCH 09/23] iommu/dma: Refactor the page array remapping allocator' \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).