LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Robin Murphy <robin.murphy@arm.com>
Cc: Tom Murphy <tmurphy@arista.com>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will.deacon@arm.com>,
linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org,
linux-arm-kernel@lists.infradead.org
Subject: [PATCH 19/23] iommu/dma: Refactor iommu_dma_mmap
Date: Thu, 23 May 2019 09:00:24 +0200 [thread overview]
Message-ID: <20190523070028.7435-20-hch@lst.de> (raw)
In-Reply-To: <20190523070028.7435-1-hch@lst.de>
Inline __iommu_dma_mmap and __iommu_dma_mmap_pfn into the main function,
and use the fact that __iommu_dma_get_pages return NULL for remapped
contigous allocations to simplify the code flow a bit.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
---
drivers/iommu/dma-iommu.c | 60 +++++++--------------------------------
1 file changed, 11 insertions(+), 49 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index b56bd8e7d5f9..ea2797d10070 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -650,21 +650,6 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
return NULL;
}
-/**
- * __iommu_dma_mmap - Map a buffer into provided user VMA
- * @pages: Array representing buffer from __iommu_dma_alloc()
- * @size: Size of buffer in bytes
- * @vma: VMA describing requested userspace mapping
- *
- * Maps the pages of the buffer in @pages into @vma. The caller is responsible
- * for verifying the correct size and protection of @vma beforehand.
- */
-static int __iommu_dma_mmap(struct page **pages, size_t size,
- struct vm_area_struct *vma)
-{
- return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
-}
-
static void iommu_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
@@ -1042,31 +1027,13 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
return cpu_addr;
}
-static int __iommu_dma_mmap_pfn(struct vm_area_struct *vma,
- unsigned long pfn, size_t size)
-{
- int ret = -ENXIO;
- unsigned long nr_vma_pages = vma_pages(vma);
- unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long off = vma->vm_pgoff;
-
- if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
- ret = remap_pfn_range(vma, vma->vm_start,
- pfn + off,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
- }
-
- return ret;
-}
-
static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
- struct page **pages;
+ unsigned long pfn;
int ret;
vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
@@ -1077,24 +1044,19 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
return -ENXIO;
- if (!is_vmalloc_addr(cpu_addr)) {
- unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
- return __iommu_dma_mmap_pfn(vma, pfn, size);
- }
+ if (is_vmalloc_addr(cpu_addr)) {
+ struct page **pages = __iommu_dma_get_pages(cpu_addr);
- if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
- /*
- * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
- * hence in the vmalloc space.
- */
- unsigned long pfn = vmalloc_to_pfn(cpu_addr);
- return __iommu_dma_mmap_pfn(vma, pfn, size);
+ if (pages)
+ return vm_map_pages(vma, pages, nr_pages);
+ pfn = vmalloc_to_pfn(cpu_addr);
+ } else {
+ pfn = page_to_pfn(virt_to_page(cpu_addr));
}
- pages = __iommu_dma_get_pages(cpu_addr);
- if (!pages)
- return -ENXIO;
- return __iommu_dma_mmap(pages, size, vma);
+ return remap_pfn_range(vma, vma->vm_start, pfn + off,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
}
static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
--
2.20.1
next prev parent reply other threads:[~2019-05-23 7:01 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-05-23 7:00 implement generic dma_map_ops for IOMMUs v6 Christoph Hellwig
2019-05-23 7:00 ` [PATCH 01/23] iommu/dma: Cleanup dma-iommu.h Christoph Hellwig
2019-05-23 7:00 ` [PATCH 02/23] iommu/dma: Remove the flush_page callback Christoph Hellwig
2019-05-23 7:00 ` [PATCH 03/23] iommu/dma: Use for_each_sg in iommu_dma_alloc Christoph Hellwig
2019-05-23 7:00 ` [PATCH 04/23] iommu/dma: move the arm64 wrappers to common code Christoph Hellwig
2019-05-23 7:00 ` [PATCH 05/23] iommu/dma: Move __iommu_dma_map Christoph Hellwig
2019-05-23 7:00 ` [PATCH 06/23] iommu/dma: Move domain lookup into __iommu_dma_{map,unmap} Christoph Hellwig
2019-05-23 7:00 ` [PATCH 07/23] iommu/dma: Squash __iommu_dma_{map,unmap}_page helpers Christoph Hellwig
2019-05-23 7:00 ` [PATCH 08/23] iommu/dma: Factor out remapped pages lookup Christoph Hellwig
2019-05-23 7:00 ` [PATCH 09/23] iommu/dma: Refactor the page array remapping allocator Christoph Hellwig
2019-05-23 7:00 ` [PATCH 10/23] iommu/dma: Remove __iommu_dma_free Christoph Hellwig
2019-05-23 7:00 ` [PATCH 11/23] iommu/dma: Refactor iommu_dma_free Christoph Hellwig
2019-05-23 7:00 ` [PATCH 12/23] iommu/dma: Refactor iommu_dma_alloc Christoph Hellwig
2019-05-23 7:00 ` [PATCH 13/23] iommu/dma: Don't remap CMA unnecessarily Christoph Hellwig
2019-05-23 7:00 ` [PATCH 14/23] iommu/dma: Merge the CMA and alloc_pages allocation paths Christoph Hellwig
2019-05-23 7:00 ` [PATCH 15/23] iommu/dma: Split iommu_dma_free Christoph Hellwig
2019-05-23 7:00 ` [PATCH 16/23] iommu/dma: Cleanup variable naming in iommu_dma_alloc Christoph Hellwig
2019-05-23 7:00 ` [PATCH 17/23] iommu/dma: Refactor iommu_dma_alloc, part 2 Christoph Hellwig
2019-05-23 7:00 ` [PATCH 18/23] iommu/dma: Refactor iommu_dma_get_sgtable Christoph Hellwig
2019-05-23 7:00 ` Christoph Hellwig [this message]
2019-05-23 7:00 ` [PATCH 20/23] iommu/dma: Don't depend on CONFIG_DMA_DIRECT_REMAP Christoph Hellwig
2019-05-23 7:00 ` [PATCH 21/23] iommu/dma: Switch copyright boilerplace to SPDX Christoph Hellwig
2019-05-23 7:00 ` [PATCH 22/23] arm64: switch copyright boilerplace to SPDX in dma-mapping.c Christoph Hellwig
2019-05-23 7:00 ` [PATCH 23/23] arm64: trim includes " Christoph Hellwig
2019-05-23 12:33 ` implement generic dma_map_ops for IOMMUs v6 Robin Murphy
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190523070028.7435-20-hch@lst.de \
--to=hch@lst.de \
--cc=catalin.marinas@arm.com \
--cc=iommu@lists.linux-foundation.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=robin.murphy@arm.com \
--cc=tmurphy@arista.com \
--cc=will.deacon@arm.com \
--subject='Re: [PATCH 19/23] iommu/dma: Refactor iommu_dma_mmap' \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).