Netdev Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Mauro Carvalho Chehab <mchehab@kernel.org>,
Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
Joonyoung Shim <jy0922.shim@samsung.com>,
Seung-Woo Kim <sw0312.kim@samsung.com>,
Kyungmin Park <kyungmin.park@samsung.com>,
Ben Skeggs <bskeggs@redhat.com>, Pawel Osciak <pawel@osciak.com>,
Marek Szyprowski <m.szyprowski@samsung.com>,
Matt Porter <mporter@kernel.crashing.org>,
iommu@lists.linux-foundation.org
Cc: Tom Lendacky <thomas.lendacky@amd.com>,
linux-doc@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-media@vger.kernel.org,
linux-arm-kernel@lists.infradead.org, linux-ia64@vger.kernel.org,
linux-mips@vger.kernel.org, linux-parisc@vger.kernel.org,
linux-samsung-soc@vger.kernel.org, nouveau@lists.freedesktop.org,
netdev@vger.kernel.org, linux-nvme@lists.infradead.org,
linux-scsi@vger.kernel.org, linux-mm@kvack.org,
alsa-devel@alsa-project.org
Subject: [PATCH 28/28] nvme-pci: use dma_alloc_pages backed dmapools
Date: Wed, 19 Aug 2020 08:55:55 +0200 [thread overview]
Message-ID: <20200819065555.1802761-29-hch@lst.de> (raw)
In-Reply-To: <20200819065555.1802761-1-hch@lst.de>
Switch from coherent DMA pools to those backed by dma_alloc_pages. This
helps device with non-coherent DMA to avoid host accesses to uncached
memory for every submission of a larger than single entry I/O.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
drivers/nvme/host/pci.c | 80 ++++++++++++++++++++---------------------
1 file changed, 40 insertions(+), 40 deletions(-)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a33adab62acbaf..fb34dbcb973673 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -114,8 +114,8 @@ struct nvme_dev {
struct blk_mq_tag_set admin_tagset;
u32 __iomem *dbs;
struct device *dev;
- struct dma_pool *prp_page_pool;
- struct dma_pool *prp_small_pool;
+ struct dma_pool prp_page_pool;
+ struct dma_pool prp_small_pool;
unsigned online_queues;
unsigned max_qid;
unsigned io_queues[HCTX_MAX_TYPES];
@@ -536,7 +536,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
if (iod->npages == 0)
- dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
+ dma_pool_free(&dev->prp_small_pool, nvme_pci_iod_list(req)[0],
dma_addr);
for (i = 0; i < iod->npages; i++) {
@@ -553,7 +553,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
next_dma_addr = le64_to_cpu(prp_list[last_prp]);
}
- dma_pool_free(dev->prp_page_pool, addr, dma_addr);
+ dma_pool_free(&dev->prp_page_pool, addr, dma_addr);
dma_addr = next_dma_addr;
}
@@ -611,10 +611,10 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
if (nprps <= (256 / 8)) {
- pool = dev->prp_small_pool;
+ pool = &dev->prp_small_pool;
iod->npages = 0;
} else {
- pool = dev->prp_page_pool;
+ pool = &dev->prp_page_pool;
iod->npages = 1;
}
@@ -630,6 +630,11 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
for (;;) {
if (i == NVME_CTRL_PAGE_SIZE >> 3) {
__le64 *old_prp_list = prp_list;
+
+ dma_sync_single_for_device(dev->dev, prp_dma,
+ i * sizeof(*prp_list),
+ DMA_TO_DEVICE);
+
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list)
return BLK_STS_RESOURCE;
@@ -653,6 +658,8 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
dma_len = sg_dma_len(sg);
}
+ dma_sync_single_for_device(dev->dev, prp_dma, i * sizeof(*prp_list),
+ DMA_TO_DEVICE);
done:
cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
@@ -706,10 +713,10 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
}
if (entries <= (256 / sizeof(struct nvme_sgl_desc))) {
- pool = dev->prp_small_pool;
+ pool = &dev->prp_small_pool;
iod->npages = 0;
} else {
- pool = dev->prp_page_pool;
+ pool = &dev->prp_page_pool;
iod->npages = 1;
}
@@ -728,6 +735,10 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
if (i == SGES_PER_PAGE) {
struct nvme_sgl_desc *old_sg_desc = sg_list;
struct nvme_sgl_desc *link = &old_sg_desc[i - 1];
+
+ dma_sync_single_for_device(dev->dev, sgl_dma,
+ i * sizeof(*sg_list),
+ DMA_TO_DEVICE);
sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
if (!sg_list)
@@ -743,6 +754,8 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
sg = sg_next(sg);
} while (--entries > 0);
+ dma_sync_single_for_device(dev->dev, sgl_dma, i * sizeof(*sg_list),
+ DMA_TO_DEVICE);
return BLK_STS_OK;
}
@@ -2457,30 +2470,6 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
return 0;
}
-static int nvme_setup_prp_pools(struct nvme_dev *dev)
-{
- dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
- NVME_CTRL_PAGE_SIZE,
- NVME_CTRL_PAGE_SIZE, 0);
- if (!dev->prp_page_pool)
- return -ENOMEM;
-
- /* Optimisation for I/Os between 4k and 128k */
- dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
- 256, 256, 0);
- if (!dev->prp_small_pool) {
- dma_pool_destroy(dev->prp_page_pool);
- return -ENOMEM;
- }
- return 0;
-}
-
-static void nvme_release_prp_pools(struct nvme_dev *dev)
-{
- dma_pool_destroy(dev->prp_page_pool);
- dma_pool_destroy(dev->prp_small_pool);
-}
-
static void nvme_free_tagset(struct nvme_dev *dev)
{
if (dev->tagset.tags)
@@ -2851,10 +2840,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
mutex_init(&dev->shutdown_lock);
- result = nvme_setup_prp_pools(dev);
- if (result)
- goto unmap;
-
quirks |= check_vendor_combination_bug(pdev);
if (!noacpi && nvme_acpi_storage_d3(pdev)) {
@@ -2867,6 +2852,18 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
quirks |= NVME_QUIRK_SIMPLE_SUSPEND;
}
+ result = dma_pool_init(dev->dev, &dev->prp_page_pool, "prp list page",
+ NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE, 0,
+ DMA_TO_DEVICE);
+ if (result)
+ goto unmap;
+
+ /* Optimisation for I/Os between 4k and 128k */
+ result = dma_pool_init(dev->dev, &dev->prp_small_pool, "prp list 256",
+ 256, 256, 0, DMA_TO_DEVICE);
+ if (result)
+ goto release_prp_page_pool;
+
/*
* Double check that our mempool alloc size will cover the biggest
* command we support.
@@ -2880,7 +2877,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
GFP_KERNEL, node);
if (!dev->iod_mempool) {
result = -ENOMEM;
- goto release_pools;
+ goto release_prp_small_pool;
}
result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
@@ -2897,8 +2894,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
release_mempool:
mempool_destroy(dev->iod_mempool);
- release_pools:
- nvme_release_prp_pools(dev);
+ release_prp_small_pool:
+ dma_pool_exit(&dev->prp_small_pool);
+ release_prp_page_pool:
+ dma_pool_exit(&dev->prp_page_pool);
unmap:
nvme_dev_unmap(dev);
put_pci:
@@ -2963,7 +2962,8 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_free_host_mem(dev);
nvme_dev_remove_admin(dev);
nvme_free_queues(dev, 0);
- nvme_release_prp_pools(dev);
+ dma_pool_exit(&dev->prp_small_pool);
+ dma_pool_exit(&dev->prp_page_pool);
nvme_dev_unmap(dev);
nvme_uninit_ctrl(&dev->ctrl);
}
--
2.28.0
next prev parent reply other threads:[~2020-08-19 6:58 UTC|newest]
Thread overview: 77+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <CGME20200819065610eucas1p2fde88e81917071b1888e7cc01ba0f298@eucas1p2.samsung.com>
2020-08-19 6:55 ` a saner API for allocating DMA addressable pages Christoph Hellwig
2020-08-19 6:55 ` [PATCH 01/28] mm: turn alloc_pages into an inline function Christoph Hellwig
2020-08-19 6:55 ` [PATCH 02/28] drm/exynos: stop setting DMA_ATTR_NON_CONSISTENT Christoph Hellwig
2020-08-19 6:55 ` [PATCH 03/28] drm/nouveau/gk20a: " Christoph Hellwig
2020-08-19 6:55 ` [PATCH 04/28] net/au1000-eth: stop using DMA_ATTR_NON_CONSISTENT Christoph Hellwig
2020-08-19 6:55 ` [PATCH 05/28] media/v4l2: remove V4L2-FLAG-MEMORY-NON-CONSISTENT Christoph Hellwig
2020-08-19 11:16 ` Tomasz Figa
2020-08-19 11:51 ` Robin Murphy
2020-08-19 12:49 ` Tomasz Figa
2020-08-19 13:57 ` Christoph Hellwig
2020-08-19 14:11 ` Tomasz Figa
2020-08-20 4:45 ` Christoph Hellwig
2020-08-20 10:09 ` Tomasz Figa
2020-08-20 16:51 ` Christoph Hellwig
2020-08-19 14:07 ` Robin Murphy
2020-08-19 14:22 ` Tomasz Figa
2020-08-20 4:52 ` Christoph Hellwig
2020-08-20 5:02 ` Christoph Hellwig
2020-08-20 10:24 ` Tomasz Figa
2020-08-20 16:52 ` Christoph Hellwig
2020-08-20 17:41 ` Tomasz Figa
2020-08-19 13:54 ` Christoph Hellwig
2020-08-19 13:57 ` Tomasz Figa
2020-08-20 4:43 ` Christoph Hellwig
2020-08-20 5:20 ` Christoph Hellwig
2020-08-20 10:05 ` Tomasz Figa
2020-08-20 16:54 ` Christoph Hellwig
2020-08-20 17:33 ` Tomasz Figa
2020-09-01 11:06 ` Christoph Hellwig
2020-09-01 15:02 ` Tomasz Figa
2020-08-19 6:55 ` [PATCH 06/28] lib82596: move DMA allocation into the callers of i82596_probe Christoph Hellwig
2020-09-01 13:29 ` Thomas Bogendoerfer
2020-08-19 6:55 ` [PATCH 07/28] 53c700: improve non-coherent DMA handling Christoph Hellwig
2020-09-01 14:52 ` James Bottomley
2020-09-01 15:05 ` Matthew Wilcox
2020-09-01 15:22 ` James Bottomley
2020-09-01 16:21 ` Helge Deller
2020-09-01 16:41 ` Helge Deller
2020-09-01 16:53 ` Matthew Wilcox
2020-09-02 15:00 ` Helge Deller
2020-08-19 6:55 ` [PATCH 08/28] MIPS: make dma_sync_*_for_cpu a little less overzealous Christoph Hellwig
2020-09-01 13:53 ` Thomas Bogendoerfer
2020-08-19 6:55 ` [PATCH 09/28] MIPS/jazzdma: remove the unused vdma_remap function Christoph Hellwig
2020-09-01 13:49 ` Thomas Bogendoerfer
2020-08-19 6:55 ` [PATCH 10/28] MIPS/jazzdma: decouple from dma-direct Christoph Hellwig
2020-09-01 13:49 ` Thomas Bogendoerfer
2020-08-19 6:55 ` [PATCH 11/28] dma-mapping: add (back) arch_dma_mark_clean for ia64 Christoph Hellwig
2020-08-19 6:55 ` [PATCH 12/28] dma-direct: remove dma_direct_{alloc,free}_pages Christoph Hellwig
2020-08-19 6:55 ` [PATCH 13/28] dma-direct: lift gfp_t manipulation out of__dma_direct_alloc_pages Christoph Hellwig
2020-08-19 6:55 ` [PATCH 14/28] dma-direct: use phys_to_dma_direct in dma_direct_alloc Christoph Hellwig
2020-08-19 6:55 ` [PATCH 15/28] dma-direct: remove __dma_to_phys Christoph Hellwig
2020-08-19 6:55 ` [PATCH 16/28] dma-direct: rename and cleanup __phys_to_dma Christoph Hellwig
2020-08-19 6:55 ` [PATCH 17/28] dma-mapping: move dma_common_{mmap,get_sgtable} out of mapping.c Christoph Hellwig
2020-08-19 6:55 ` [PATCH 18/28] dma-mapping: move the dma_declare_coherent_memory documentation Christoph Hellwig
2020-08-19 6:55 ` [PATCH 19/28] dma-mapping: replace DMA_ATTR_NON_CONSISTENT with dma_{alloc,free}_pages Christoph Hellwig
2020-08-19 15:03 ` [PATCH 19/28] dma-mapping: replace DMA_ATTR_NON_CONSISTENT with dma_{alloc, free}_pages Tomasz Figa
2020-08-20 5:15 ` Christoph Hellwig
2020-08-19 6:55 ` [PATCH 20/28] sgiwd93: convert from dma_cache_sync to dma_sync_single_for_device Christoph Hellwig
2020-08-19 6:55 ` [PATCH 21/28] hal2: " Christoph Hellwig
2020-08-19 6:55 ` [PATCH 22/28] sgiseeq: " Christoph Hellwig
2020-09-01 15:22 ` Thomas Bogendoerfer
2020-09-01 17:12 ` Thomas Bogendoerfer
2020-09-01 17:16 ` Christoph Hellwig
2020-09-01 17:38 ` Thomas Bogendoerfer
2020-09-02 21:38 ` Thomas Bogendoerfer
2020-09-03 8:42 ` Christoph Hellwig
2020-09-03 8:43 ` Christoph Hellwig
2020-09-03 8:46 ` Christoph Hellwig
2020-08-19 6:55 ` [PATCH 23/28] lib82596: " Christoph Hellwig
2020-08-19 6:55 ` [PATCH 24/28] 53c700: " Christoph Hellwig
2020-08-19 6:55 ` [PATCH 25/28] dma-mapping: remove dma_cache_sync Christoph Hellwig
2020-08-19 6:55 ` [PATCH 26/28] dmapool: add dma_alloc_pages support Christoph Hellwig
2020-08-19 6:55 ` [PATCH 27/28] nvme-pci: fix PRP pool size Christoph Hellwig
2020-08-19 6:55 ` Christoph Hellwig [this message]
2020-08-25 11:30 ` a saner API for allocating DMA addressable pages Marek Szyprowski
2020-08-25 13:26 ` Christoph Hellwig
2020-08-29 9:46 ` Helge Deller
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200819065555.1802761-29-hch@lst.de \
--to=hch@lst.de \
--cc=James.Bottomley@HansenPartnership.com \
--cc=alsa-devel@alsa-project.org \
--cc=bskeggs@redhat.com \
--cc=iommu@lists.linux-foundation.org \
--cc=jy0922.shim@samsung.com \
--cc=kyungmin.park@samsung.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-ia64@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-media@vger.kernel.org \
--cc=linux-mips@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-nvme@lists.infradead.org \
--cc=linux-parisc@vger.kernel.org \
--cc=linux-samsung-soc@vger.kernel.org \
--cc=linux-scsi@vger.kernel.org \
--cc=m.szyprowski@samsung.com \
--cc=mchehab@kernel.org \
--cc=mporter@kernel.crashing.org \
--cc=netdev@vger.kernel.org \
--cc=nouveau@lists.freedesktop.org \
--cc=pawel@osciak.com \
--cc=sw0312.kim@samsung.com \
--cc=thomas.lendacky@amd.com \
--cc=tsbogend@alpha.franken.de \
--subject='Re: [PATCH 28/28] nvme-pci: use dma_alloc_pages backed dmapools' \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).