LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Hugh Dickins <hughd@google.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Hugh Dickins <hughd@google.com>,
Shakeel Butt <shakeelb@google.com>,
"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>,
Yang Shi <shy828301@gmail.com>, Miaohe Lin <linmiaohe@huawei.com>,
Mike Kravetz <mike.kravetz@oracle.com>,
Michal Hocko <mhocko@suse.com>, Rik van Riel <riel@surriel.com>,
Christoph Hellwig <hch@infradead.org>,
Matthew Wilcox <willy@infradead.org>,
"Eric W. Biederman" <ebiederm@xmission.com>,
Alexey Gladkov <legion@kernel.org>,
Chris Wilson <chris@chris-wilson.co.uk>,
Matthew Auld <matthew.auld@intel.com>,
linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
linux-api@vger.kernel.org, linux-mm@kvack.org
Subject: [PATCH 06/16] huge tmpfs: shmem_is_huge(vma, inode, index)
Date: Fri, 30 Jul 2021 00:42:16 -0700 (PDT) [thread overview]
Message-ID: <dae523ab-c75b-f532-af9d-8b6a1d4e29b@google.com> (raw)
In-Reply-To: <2862852d-badd-7486-3a8e-c5ea9666d6fb@google.com>
Extend shmem_huge_enabled(vma) to shmem_is_huge(vma, inode, index), so
that a consistent set of checks can be applied, even when the inode is
accessed through read/write syscalls (with NULL vma) instead of mmaps
(the index argument is seldom of interest, but required by mount option
"huge=within_size"). Clean up and rearrange the checks a little.
This then replaces the checks which shmem_fault() and shmem_getpage_gfp()
were making, and eliminates the SGP_HUGE and SGP_NOHUGE modes: while it's
still true that khugepaged's collapse_file() at that point wants a small
page, the race that might allocate it a huge page is too unlikely to be
worth optimizing against (we are there *because* there was at least one
small page in the way), and handled by a later PageTransCompound check.
Replace a couple of 0s by explicit SHMEM_HUGE_NEVERs; and replace the
obscure !shmem_mapping() symlink check by explicit S_ISLNK() - nothing
else needs that symlink check, so leave it there in shmem_getpage_gfp().
Signed-off-by: Hugh Dickins <hughd@google.com>
---
include/linux/shmem_fs.h | 9 +++--
mm/khugepaged.c | 2 +-
mm/shmem.c | 84 ++++++++++++----------------------------
3 files changed, 32 insertions(+), 63 deletions(-)
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 9b7f7ac52351..3b05a28e34c4 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -86,7 +86,12 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
extern int shmem_unuse(unsigned int type, bool frontswap,
unsigned long *fs_pages_to_unuse);
-extern bool shmem_huge_enabled(struct vm_area_struct *vma);
+extern bool shmem_is_huge(struct vm_area_struct *vma,
+ struct inode *inode, pgoff_t index);
+static inline bool shmem_huge_enabled(struct vm_area_struct *vma)
+{
+ return shmem_is_huge(vma, file_inode(vma->vm_file), vma->vm_pgoff);
+}
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end);
@@ -95,8 +100,6 @@ extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
enum sgp_type {
SGP_READ, /* don't exceed i_size, don't allocate page */
SGP_CACHE, /* don't exceed i_size, may allocate page */
- SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */
- SGP_HUGE, /* like SGP_CACHE, huge pages preferred */
SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
};
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index b0412be08fa2..cecb19c3e965 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1721,7 +1721,7 @@ static void collapse_file(struct mm_struct *mm,
xas_unlock_irq(&xas);
/* swap in or instantiate fallocated page */
if (shmem_getpage(mapping->host, index, &page,
- SGP_NOHUGE)) {
+ SGP_CACHE)) {
result = SCAN_FAIL;
goto xa_unlocked;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 740d48ef1eb5..6def7391084c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -474,39 +474,35 @@ static bool shmem_confirm_swap(struct address_space *mapping,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* ifdef here to avoid bloating shmem.o when not necessary */
-static int shmem_huge __read_mostly;
+static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
-bool shmem_huge_enabled(struct vm_area_struct *vma)
+bool shmem_is_huge(struct vm_area_struct *vma,
+ struct inode *inode, pgoff_t index)
{
- struct inode *inode = file_inode(vma->vm_file);
- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
loff_t i_size;
- pgoff_t off;
- if ((vma->vm_flags & VM_NOHUGEPAGE) ||
- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
- return false;
- if (shmem_huge == SHMEM_HUGE_FORCE)
- return true;
if (shmem_huge == SHMEM_HUGE_DENY)
return false;
- switch (sbinfo->huge) {
- case SHMEM_HUGE_NEVER:
+ if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
+ test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
return false;
+ if (shmem_huge == SHMEM_HUGE_FORCE)
+ return true;
+
+ switch (SHMEM_SB(inode->i_sb)->huge) {
case SHMEM_HUGE_ALWAYS:
return true;
case SHMEM_HUGE_WITHIN_SIZE:
- off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
+ index = round_up(index, HPAGE_PMD_NR);
i_size = round_up(i_size_read(inode), PAGE_SIZE);
- if (i_size >= HPAGE_PMD_SIZE &&
- i_size >> PAGE_SHIFT >= off)
+ if (i_size >= HPAGE_PMD_SIZE && (i_size >> PAGE_SHIFT) >= index)
return true;
fallthrough;
case SHMEM_HUGE_ADVISE:
- /* TODO: implement fadvise() hints */
- return (vma->vm_flags & VM_HUGEPAGE);
+ if (vma && (vma->vm_flags & VM_HUGEPAGE))
+ return true;
+ fallthrough;
default:
- VM_BUG_ON(1);
return false;
}
}
@@ -680,6 +676,12 @@ static long shmem_unused_huge_count(struct super_block *sb,
#define shmem_huge SHMEM_HUGE_DENY
+bool shmem_is_huge(struct vm_area_struct *vma,
+ struct inode *inode, pgoff_t index)
+{
+ return false;
+}
+
static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
struct shrink_control *sc, unsigned long nr_to_split)
{
@@ -1829,7 +1831,6 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
struct shmem_sb_info *sbinfo;
struct mm_struct *charge_mm;
struct page *page;
- enum sgp_type sgp_huge = sgp;
pgoff_t hindex = index;
gfp_t huge_gfp;
int error;
@@ -1838,8 +1839,6 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
return -EFBIG;
- if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
- sgp = SGP_CACHE;
repeat:
if (sgp <= SGP_CACHE &&
((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
@@ -1898,36 +1897,12 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
return 0;
}
- /* shmem_symlink() */
- if (!shmem_mapping(mapping))
- goto alloc_nohuge;
- if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
+ /* Never use a huge page for shmem_symlink() */
+ if (S_ISLNK(inode->i_mode))
goto alloc_nohuge;
- if (shmem_huge == SHMEM_HUGE_FORCE)
- goto alloc_huge;
- switch (sbinfo->huge) {
- case SHMEM_HUGE_NEVER:
+ if (!shmem_is_huge(vma, inode, index))
goto alloc_nohuge;
- case SHMEM_HUGE_WITHIN_SIZE: {
- loff_t i_size;
- pgoff_t off;
-
- off = round_up(index, HPAGE_PMD_NR);
- i_size = round_up(i_size_read(inode), PAGE_SIZE);
- if (i_size >= HPAGE_PMD_SIZE &&
- i_size >> PAGE_SHIFT >= off)
- goto alloc_huge;
- fallthrough;
- }
- case SHMEM_HUGE_ADVISE:
- if (sgp_huge == SGP_HUGE)
- goto alloc_huge;
- /* TODO: implement fadvise() hints */
- goto alloc_nohuge;
- }
-
-alloc_huge:
huge_gfp = vma_thp_gfp_mask(vma);
huge_gfp = limit_gfp_mask(huge_gfp, gfp);
page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true);
@@ -2083,7 +2058,6 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
- enum sgp_type sgp;
int err;
vm_fault_t ret = VM_FAULT_LOCKED;
@@ -2146,15 +2120,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
spin_unlock(&inode->i_lock);
}
- sgp = SGP_CACHE;
-
- if ((vma->vm_flags & VM_NOHUGEPAGE) ||
- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
- sgp = SGP_NOHUGE;
- else if (vma->vm_flags & VM_HUGEPAGE)
- sgp = SGP_HUGE;
-
- err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
+ err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, SGP_CACHE,
gfp, vma, vmf, &ret);
if (err)
return vmf_error(err);
@@ -3961,7 +3927,7 @@ int __init shmem_init(void)
if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
else
- shmem_huge = 0; /* just in case it was patched */
+ shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */
#endif
return 0;
--
2.26.2
next prev parent reply other threads:[~2021-07-30 7:42 UTC|newest]
Thread overview: 45+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-30 7:22 [PATCH 00/16] tmpfs: HUGEPAGE and MEM_LOCK fcntls and memfds Hugh Dickins
2021-07-30 7:25 ` [PATCH 01/16] huge tmpfs: fix fallocate(vanilla) advance over huge pages Hugh Dickins
2021-07-30 21:36 ` Yang Shi
2021-08-01 3:38 ` Hugh Dickins
2021-08-02 20:36 ` Yang Shi
2021-07-30 7:28 ` [PATCH 02/16] huge tmpfs: fix split_huge_page() after FALLOC_FL_KEEP_SIZE Hugh Dickins
2021-07-30 23:48 ` Yang Shi
2021-07-30 7:30 ` [PATCH 03/16] huge tmpfs: remove shrinklist addition from shmem_setattr() Hugh Dickins
2021-07-30 21:50 ` Yang Shi
2021-07-30 7:36 ` [PATCH 04/16] huge tmpfs: revert shmem's use of transhuge_vma_enabled() Hugh Dickins
2021-07-30 21:56 ` Yang Shi
2021-08-01 4:01 ` Hugh Dickins
2021-08-02 20:39 ` Yang Shi
2021-07-30 7:39 ` [PATCH 05/16] huge tmpfs: move shmem_huge_enabled() upwards Hugh Dickins
2021-07-30 21:57 ` Yang Shi
2021-07-30 7:42 ` Hugh Dickins [this message]
2021-07-30 23:34 ` [PATCH 06/16] huge tmpfs: shmem_is_huge(vma, inode, index) Yang Shi
2021-08-01 5:22 ` Hugh Dickins
2021-08-01 5:37 ` Hugh Dickins
2021-08-02 21:14 ` Yang Shi
2021-08-04 8:28 ` Hugh Dickins
2021-08-04 19:01 ` Yang Shi
2021-08-06 5:21 ` Hugh Dickins
2021-08-06 17:41 ` Yang Shi
2021-08-05 23:04 ` Yang Shi
2021-08-06 5:43 ` Hugh Dickins
2021-08-06 17:57 ` Yang Shi
2021-08-12 18:19 ` Yang Shi
2021-07-30 7:45 ` [PATCH 07/16] memfd: memfd_create(name, MFD_HUGEPAGE) for shmem huge pages Hugh Dickins
2021-08-04 14:03 ` Kirill A. Shutemov
2021-08-06 3:33 ` Hugh Dickins
2021-07-30 7:48 ` [PATCH 08/16] huge tmpfs: fcntl(fd, F_HUGEPAGE) and fcntl(fd, F_NOHUGEPAGE) Hugh Dickins
2021-08-04 14:08 ` Kirill A. Shutemov
2021-08-06 4:34 ` Hugh Dickins
2021-07-30 7:51 ` [PATCH 09/16] huge tmpfs: decide stat.st_blksize by shmem_is_huge() Hugh Dickins
2021-07-30 23:40 ` Yang Shi
2021-07-30 7:55 ` [PATCH 10/16] tmpfs: fcntl(fd, F_MEM_LOCK) to memlock a tmpfs file Hugh Dickins
2021-08-03 1:38 ` Matthew Wilcox
2021-08-04 9:15 ` Hugh Dickins
2021-07-30 7:57 ` [PATCH 11/16] tmpfs: fcntl(fd, F_MEM_LOCKED) to test if memlocked Hugh Dickins
2021-07-30 8:00 ` [PATCH 12/16] tmpfs: refuse memlock when fallocated beyond i_size Hugh Dickins
2021-07-30 8:03 ` [PATCH 13/16] mm: bool user_shm_lock(loff_t size, struct ucounts *) Hugh Dickins
2021-07-30 8:06 ` [PATCH 14/16] mm: user_shm_lock(,,getuc) and user_shm_unlock(,,putuc) Hugh Dickins
2021-07-30 8:09 ` [PATCH 15/16] tmpfs: permit changing size of memlocked file Hugh Dickins
2021-07-30 8:13 ` [PATCH 16/16] memfd: memfd_create(name, MFD_MEM_LOCK) for memlocked shmem Hugh Dickins
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=dae523ab-c75b-f532-af9d-8b6a1d4e29b@google.com \
--to=hughd@google.com \
--cc=akpm@linux-foundation.org \
--cc=chris@chris-wilson.co.uk \
--cc=ebiederm@xmission.com \
--cc=hch@infradead.org \
--cc=kirill.shutemov@linux.intel.com \
--cc=legion@kernel.org \
--cc=linmiaohe@huawei.com \
--cc=linux-api@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=matthew.auld@intel.com \
--cc=mhocko@suse.com \
--cc=mike.kravetz@oracle.com \
--cc=riel@surriel.com \
--cc=shakeelb@google.com \
--cc=shy828301@gmail.com \
--cc=willy@infradead.org \
--subject='Re: [PATCH 06/16] huge tmpfs: shmem_is_huge(vma, inode, index)' \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).