LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Robin Murphy <robin.murphy@arm.com>
Cc: Tom Murphy <tmurphy@arista.com>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will.deacon@arm.com>,
linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org,
linux-arm-kernel@lists.infradead.org
Subject: [PATCH 17/23] iommu/dma: Refactor iommu_dma_alloc, part 2
Date: Thu, 23 May 2019 09:00:22 +0200 [thread overview]
Message-ID: <20190523070028.7435-18-hch@lst.de> (raw)
In-Reply-To: <20190523070028.7435-1-hch@lst.de>
All the logic in iommu_dma_alloc that deals with page allocation from
the CMA or page allocators can be split into a self-contained helper,
and we can than map the result of that or the atomic pool allocation
with the iommu later. This also allows reusing __iommu_dma_free to
tear down the allocations and MMU mappings when the IOMMU mapping
fails.
Based on a patch from Robin Murphy.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
drivers/iommu/dma-iommu.c | 65 +++++++++++++++++++++------------------
1 file changed, 35 insertions(+), 30 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 9ac76d286df1..9f0aa80f2bdd 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -972,35 +972,14 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
__iommu_dma_free(dev, size, cpu_addr);
}
-static void *iommu_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
+static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
+ struct page **pagep, gfp_t gfp, unsigned long attrs)
{
bool coherent = dev_is_dma_coherent(dev);
- int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
size_t alloc_size = PAGE_ALIGN(size);
struct page *page = NULL;
void *cpu_addr;
- gfp |= __GFP_ZERO;
-
- if (gfpflags_allow_blocking(gfp) &&
- !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
- return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
-
- if (!gfpflags_allow_blocking(gfp) && !coherent) {
- cpu_addr = dma_alloc_from_pool(alloc_size, &page, gfp);
- if (!cpu_addr)
- return NULL;
-
- *handle = __iommu_dma_map(dev, page_to_phys(page), size,
- ioprot);
- if (*handle == DMA_MAPPING_ERROR) {
- dma_free_from_pool(cpu_addr, alloc_size);
- return NULL;
- }
- return cpu_addr;
- }
-
if (gfpflags_allow_blocking(gfp))
page = dma_alloc_from_contiguous(dev, alloc_size >> PAGE_SHIFT,
get_order(alloc_size),
@@ -1010,33 +989,59 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
if (!page)
return NULL;
- *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
- if (*handle == DMA_MAPPING_ERROR)
- goto out_free_pages;
-
if (!coherent || PageHighMem(page)) {
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
VM_USERMAP, prot, __builtin_return_address(0));
if (!cpu_addr)
- goto out_unmap;
+ goto out_free_pages;
if (!coherent)
arch_dma_prep_coherent(page, size);
} else {
cpu_addr = page_address(page);
}
+
+ *pagep = page;
memset(cpu_addr, 0, alloc_size);
return cpu_addr;
-out_unmap:
- __iommu_dma_unmap(dev, *handle, size);
out_free_pages:
if (!dma_release_from_contiguous(dev, page, alloc_size >> PAGE_SHIFT))
__free_pages(page, get_order(alloc_size));
return NULL;
}
+static void *iommu_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
+{
+ bool coherent = dev_is_dma_coherent(dev);
+ int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
+ struct page *page = NULL;
+ void *cpu_addr;
+
+ gfp |= __GFP_ZERO;
+
+ if (gfpflags_allow_blocking(gfp) &&
+ !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
+ return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
+
+ if (!gfpflags_allow_blocking(gfp) && !coherent)
+ cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
+ else
+ cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
+ if (!cpu_addr)
+ return NULL;
+
+ *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
+ if (*handle == DMA_MAPPING_ERROR) {
+ __iommu_dma_free(dev, size, cpu_addr);
+ return NULL;
+ }
+
+ return cpu_addr;
+}
+
static int __iommu_dma_mmap_pfn(struct vm_area_struct *vma,
unsigned long pfn, size_t size)
{
--
2.20.1
next prev parent reply other threads:[~2019-05-23 7:01 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-05-23 7:00 implement generic dma_map_ops for IOMMUs v6 Christoph Hellwig
2019-05-23 7:00 ` [PATCH 01/23] iommu/dma: Cleanup dma-iommu.h Christoph Hellwig
2019-05-23 7:00 ` [PATCH 02/23] iommu/dma: Remove the flush_page callback Christoph Hellwig
2019-05-23 7:00 ` [PATCH 03/23] iommu/dma: Use for_each_sg in iommu_dma_alloc Christoph Hellwig
2019-05-23 7:00 ` [PATCH 04/23] iommu/dma: move the arm64 wrappers to common code Christoph Hellwig
2019-05-23 7:00 ` [PATCH 05/23] iommu/dma: Move __iommu_dma_map Christoph Hellwig
2019-05-23 7:00 ` [PATCH 06/23] iommu/dma: Move domain lookup into __iommu_dma_{map,unmap} Christoph Hellwig
2019-05-23 7:00 ` [PATCH 07/23] iommu/dma: Squash __iommu_dma_{map,unmap}_page helpers Christoph Hellwig
2019-05-23 7:00 ` [PATCH 08/23] iommu/dma: Factor out remapped pages lookup Christoph Hellwig
2019-05-23 7:00 ` [PATCH 09/23] iommu/dma: Refactor the page array remapping allocator Christoph Hellwig
2019-05-23 7:00 ` [PATCH 10/23] iommu/dma: Remove __iommu_dma_free Christoph Hellwig
2019-05-23 7:00 ` [PATCH 11/23] iommu/dma: Refactor iommu_dma_free Christoph Hellwig
2019-05-23 7:00 ` [PATCH 12/23] iommu/dma: Refactor iommu_dma_alloc Christoph Hellwig
2019-05-23 7:00 ` [PATCH 13/23] iommu/dma: Don't remap CMA unnecessarily Christoph Hellwig
2019-05-23 7:00 ` [PATCH 14/23] iommu/dma: Merge the CMA and alloc_pages allocation paths Christoph Hellwig
2019-05-23 7:00 ` [PATCH 15/23] iommu/dma: Split iommu_dma_free Christoph Hellwig
2019-05-23 7:00 ` [PATCH 16/23] iommu/dma: Cleanup variable naming in iommu_dma_alloc Christoph Hellwig
2019-05-23 7:00 ` Christoph Hellwig [this message]
2019-05-23 7:00 ` [PATCH 18/23] iommu/dma: Refactor iommu_dma_get_sgtable Christoph Hellwig
2019-05-23 7:00 ` [PATCH 19/23] iommu/dma: Refactor iommu_dma_mmap Christoph Hellwig
2019-05-23 7:00 ` [PATCH 20/23] iommu/dma: Don't depend on CONFIG_DMA_DIRECT_REMAP Christoph Hellwig
2019-05-23 7:00 ` [PATCH 21/23] iommu/dma: Switch copyright boilerplace to SPDX Christoph Hellwig
2019-05-23 7:00 ` [PATCH 22/23] arm64: switch copyright boilerplace to SPDX in dma-mapping.c Christoph Hellwig
2019-05-23 7:00 ` [PATCH 23/23] arm64: trim includes " Christoph Hellwig
2019-05-23 12:33 ` implement generic dma_map_ops for IOMMUs v6 Robin Murphy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190523070028.7435-18-hch@lst.de \
--to=hch@lst.de \
--cc=catalin.marinas@arm.com \
--cc=iommu@lists.linux-foundation.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=robin.murphy@arm.com \
--cc=tmurphy@arista.com \
--cc=will.deacon@arm.com \
--subject='Re: [PATCH 17/23] iommu/dma: Refactor iommu_dma_alloc, part 2' \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).