LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: akpm@linux-foundation.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	linux-fsdevel@vger.kernel.org, Yu Zhao <yuzhao@google.com>,
	Christoph Hellwig <hch@lst.de>,
	David Howells <dhowells@redhat.com>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH v13 11/32] mm/lru: Add folio LRU functions
Date: Mon, 12 Jul 2021 20:01:43 +0100	[thread overview]
Message-ID: <20210712190204.80979-12-willy@infradead.org> (raw)
In-Reply-To: <20210712190204.80979-1-willy@infradead.org>

Handle arbitrary-order folios being added to the LRU.  By definition,
all pages being added to the LRU were already head or base pages,
so define page wrappers around folio functions where the original
page functions involved calling compound_head() to manipulate flags,
but define folio wrappers around page functions where there's no need to
call compound_head().  The one thing that does change for those functions
is calling compound_nr() instead of thp_nr_pages(), in order to handle
arbitrary-sized folios.

Saves 783 bytes of kernel text; no functions grow.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Yu Zhao <yuzhao@google.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 include/linux/mm_inline.h | 85 +++++++++++++++++++++++++++------------
 1 file changed, 59 insertions(+), 26 deletions(-)

diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 355ea1ee32bd..c9e05631e565 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -6,22 +6,27 @@
 #include <linux/swap.h>
 
 /**
- * page_is_file_lru - should the page be on a file LRU or anon LRU?
- * @page: the page to test
+ * folio_is_file_lru - should the folio be on a file LRU or anon LRU?
+ * @folio: the folio to test
  *
- * Returns 1 if @page is a regular filesystem backed page cache page or a lazily
- * freed anonymous page (e.g. via MADV_FREE).  Returns 0 if @page is a normal
- * anonymous page, a tmpfs page or otherwise ram or swap backed page.  Used by
- * functions that manipulate the LRU lists, to sort a page onto the right LRU
- * list.
+ * Returns 1 if @folio is a regular filesystem backed page cache folio
+ * or a lazily freed anonymous folio (e.g. via MADV_FREE).  Returns 0 if
+ * @folio is a normal anonymous folio, a tmpfs folio or otherwise ram or
+ * swap backed folio.  Used by functions that manipulate the LRU lists,
+ * to sort a folio onto the right LRU list.
  *
  * We would like to get this info without a page flag, but the state
- * needs to survive until the page is last deleted from the LRU, which
+ * needs to survive until the folio is last deleted from the LRU, which
  * could be as far down as __page_cache_release.
  */
+static inline int folio_is_file_lru(struct folio *folio)
+{
+	return !folio_swapbacked(folio);
+}
+
 static inline int page_is_file_lru(struct page *page)
 {
-	return !PageSwapBacked(page);
+	return folio_is_file_lru(page_folio(page));
 }
 
 static __always_inline void update_lru_size(struct lruvec *lruvec,
@@ -42,66 +47,94 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
  * __clear_page_lru_flags - clear page lru flags before releasing a page
  * @page: the page that was on lru and now has a zero reference
  */
-static __always_inline void __clear_page_lru_flags(struct page *page)
+static __always_inline void __folio_clear_lru_flags(struct folio *folio)
 {
-	VM_BUG_ON_PAGE(!PageLRU(page), page);
+	VM_BUG_ON_FOLIO(!folio_lru(folio), folio);
 
-	__ClearPageLRU(page);
+	__folio_clear_lru_flag(folio);
 
 	/* this shouldn't happen, so leave the flags to bad_page() */
-	if (PageActive(page) && PageUnevictable(page))
+	if (folio_active(folio) && folio_unevictable(folio))
 		return;
 
-	__ClearPageActive(page);
-	__ClearPageUnevictable(page);
+	__folio_clear_active_flag(folio);
+	__folio_clear_unevictable_flag(folio);
+}
+
+static __always_inline void __clear_page_lru_flags(struct page *page)
+{
+	__folio_clear_lru_flags(page_folio(page));
 }
 
 /**
- * page_lru - which LRU list should a page be on?
- * @page: the page to test
+ * folio_lru_list - which LRU list should a folio be on?
+ * @folio: the folio to test
  *
- * Returns the LRU list a page should be on, as an index
+ * Returns the LRU list a folio should be on, as an index
  * into the array of LRU lists.
  */
-static __always_inline enum lru_list page_lru(struct page *page)
+static __always_inline enum lru_list folio_lru_list(struct folio *folio)
 {
 	enum lru_list lru;
 
-	VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
+	VM_BUG_ON_FOLIO(folio_active(folio) && folio_unevictable(folio), folio);
 
-	if (PageUnevictable(page))
+	if (folio_unevictable(folio))
 		return LRU_UNEVICTABLE;
 
-	lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
-	if (PageActive(page))
+	lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
+	if (folio_active(folio))
 		lru += LRU_ACTIVE;
 
 	return lru;
 }
 
+static __always_inline enum lru_list page_lru(struct page *page)
+{
+	return folio_lru_list(page_folio(page));
+}
+
 static __always_inline void add_page_to_lru_list(struct page *page,
 				struct lruvec *lruvec)
 {
 	enum lru_list lru = page_lru(page);
 
-	update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
+	update_lru_size(lruvec, lru, page_zonenum(page), compound_nr(page));
 	list_add(&page->lru, &lruvec->lists[lru]);
 }
 
+static __always_inline void folio_add_to_lru_list(struct folio *folio,
+				struct lruvec *lruvec)
+{
+	add_page_to_lru_list(&folio->page, lruvec);
+}
+
 static __always_inline void add_page_to_lru_list_tail(struct page *page,
 				struct lruvec *lruvec)
 {
 	enum lru_list lru = page_lru(page);
 
-	update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
+	update_lru_size(lruvec, lru, page_zonenum(page), compound_nr(page));
 	list_add_tail(&page->lru, &lruvec->lists[lru]);
 }
 
+static __always_inline void folio_add_to_lru_list_tail(struct folio *folio,
+				struct lruvec *lruvec)
+{
+	add_page_to_lru_list_tail(&folio->page, lruvec);
+}
+
 static __always_inline void del_page_from_lru_list(struct page *page,
 				struct lruvec *lruvec)
 {
 	list_del(&page->lru);
 	update_lru_size(lruvec, page_lru(page), page_zonenum(page),
-			-thp_nr_pages(page));
+			-compound_nr(page));
+}
+
+static __always_inline void folio_del_from_lru_list(struct folio *folio,
+				struct lruvec *lruvec)
+{
+	del_page_from_lru_list(&folio->page, lruvec);
 }
 #endif
-- 
2.30.2


  parent reply	other threads:[~2021-07-12 19:09 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-12 19:01 [PATCH v13a 00/32] Memory folios Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 01/32] mm: Convert get_page_unless_zero() to return bool Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 02/32] mm: Introduce struct folio Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 03/32] mm: Add folio_pgdat(), folio_zone() and folio_zonenum() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 04/32] mm/vmstat: Add functions to account folio statistics Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 05/32] mm/debug: Add VM_BUG_ON_FOLIO() and VM_WARN_ON_ONCE_FOLIO() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 06/32] mm: Add folio reference count functions Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 07/32] mm: Add folio_put() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 08/32] mm: Add folio_get() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 09/32] mm: Add folio_try_get_rcu() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 10/32] mm: Add folio flag manipulation functions Matthew Wilcox (Oracle)
2021-07-12 19:01 ` Matthew Wilcox (Oracle) [this message]
2021-07-12 19:01 ` [PATCH v13 12/32] mm: Handle per-folio private data Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 13/32] mm/filemap: Add folio_index(), folio_file_page() and folio_contains() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 14/32] mm/filemap: Add folio_next_index() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 15/32] mm/filemap: Add folio_pos() and folio_file_pos() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 16/32] mm/util: Add folio_mapping() and folio_file_mapping() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 17/32] mm/filemap: Add folio_unlock() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 18/32] mm/filemap: Add folio_lock() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 19/32] mm/filemap: Add folio_lock_killable() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 20/32] mm/filemap: Add __folio_lock_async() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 21/32] mm/filemap: Add folio_wait_locked() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 22/32] mm/filemap: Add __folio_lock_or_retry() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 23/32] mm/swap: Add folio_rotate_reclaimable() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 24/32] mm/filemap: Add folio_end_writeback() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 25/32] mm/writeback: Add folio_wait_writeback() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 26/32] mm/writeback: Add folio_wait_stable() Matthew Wilcox (Oracle)
2021-07-12 19:01 ` [PATCH v13 27/32] mm/filemap: Add folio_wait_bit() Matthew Wilcox (Oracle)
2021-07-12 19:02 ` [PATCH v13 28/32] mm/filemap: Add folio_wake_bit() Matthew Wilcox (Oracle)
2021-07-12 19:02 ` [PATCH v13 29/32] mm/filemap: Convert page wait queues to be folios Matthew Wilcox (Oracle)
2021-07-12 19:02 ` [PATCH v13 30/32] mm/filemap: Add folio private_2 functions Matthew Wilcox (Oracle)
2021-07-12 19:02 ` [PATCH v13 31/32] fs/netfs: Add folio fscache functions Matthew Wilcox (Oracle)
2021-07-12 19:02 ` [PATCH v13 32/32] mm: Add folio_mapped() Matthew Wilcox (Oracle)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210712190204.80979-12-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=akpm@linux-foundation.org \
    --cc=dhowells@redhat.com \
    --cc=hch@lst.de \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=yuzhao@google.com \
    --subject='Re: [PATCH v13 11/32] mm/lru: Add folio LRU functions' \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).