Linux-Fsdevel Archive on lore.kernel.org help / color / mirror / Atom feed
From: Anthony Yznaga <anthony.yznaga@oracle.com> To: linux-mm@kvack.org, linux-kernel@vger.kernel.org Cc: willy@infradead.org, corbet@lwn.net, tglx@linutronix.de, mingo@redhat.com, bp@alien8.de, x86@kernel.org, hpa@zytor.com, dave.hansen@linux.intel.com, luto@kernel.org, peterz@infradead.org, rppt@linux.ibm.com, akpm@linux-foundation.org, hughd@google.com, ebiederm@xmission.com, masahiroy@kernel.org, ardb@kernel.org, ndesaulniers@google.com, dima@golovin.in, daniel.kiper@oracle.com, nivedita@alum.mit.edu, rafael.j.wysocki@intel.com, dan.j.williams@intel.com, zhenzhong.duan@oracle.com, jroedel@suse.de, bhe@redhat.com, guro@fb.com, Thomas.Lendacky@amd.com, andriy.shevchenko@linux.intel.com, keescook@chromium.org, hannes@cmpxchg.org, minchan@kernel.org, mhocko@kernel.org, ying.huang@intel.com, yang.shi@linux.alibaba.com, gustavo@embeddedor.com, ziqian.lzq@antfin.com, vdavydov.dev@gmail.com, jason.zeng@intel.com, kevin.tian@intel.com, zhiyuan.lv@intel.com, lei.l.li@intel.com, paul.c.lai@intel.com, ashok.raj@intel.com, linux-fsdevel@vger.kernel.org, linux-doc@vger.kernel.org, kexec@lists.infradead.org Subject: [RFC 02/43] mm: PKRAM: implement node load and save functions Date: Wed, 6 May 2020 17:41:28 -0700 [thread overview] Message-ID: <1588812129-8596-3-git-send-email-anthony.yznaga@oracle.com> (raw) In-Reply-To: <1588812129-8596-1-git-send-email-anthony.yznaga@oracle.com> Preserved memory is divided into nodes which can be saved and loaded independently of each other. PKRAM nodes are kept on a list and identified by unique names. Whenever a save operation is initiated by calling pkram_prepare_save(), a new node is created and linked to the list. When the save operation has been committed by calling pkram_finish_save(), the node becomes loadable. A load operation can be then initiated by calling pkram_prepare_load() which deletes the node from the list and prepares the corresponding stream for loading data from it. After the load has been finished, the pkram_finish_load() function must be called to free the node. Nodes are also deleted when a save operation is discarded, i.e. pkram_discard_save() is called instead of pkram_finish_save(). Originally-by: Vladimir Davydov <vdavydov.dev@gmail.com> Signed-off-by: Anthony Yznaga <anthony.yznaga@oracle.com> --- include/linux/pkram.h | 7 ++- mm/pkram.c | 148 ++++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 149 insertions(+), 6 deletions(-) diff --git a/include/linux/pkram.h b/include/linux/pkram.h index 4c4e13311ec8..83a0579e4c1c 100644 --- a/include/linux/pkram.h +++ b/include/linux/pkram.h @@ -6,7 +6,12 @@ #include <linux/types.h> #include <linux/mm_types.h> -struct pkram_stream; +struct pkram_node; + +struct pkram_stream { + gfp_t gfp_mask; + struct pkram_node *node; +}; #define PKRAM_NAME_MAX 256 /* including nul */ diff --git a/mm/pkram.c b/mm/pkram.c index d6f2f79d4852..5c57126353ff 100644 --- a/mm/pkram.c +++ b/mm/pkram.c @@ -2,16 +2,85 @@ #include <linux/err.h> #include <linux/gfp.h> #include <linux/kernel.h> +#include <linux/list.h> #include <linux/mm.h> +#include <linux/mutex.h> #include <linux/pkram.h> +#include <linux/string.h> #include <linux/types.h> +/* + * Preserved memory is divided into nodes that can be saved or loaded + * independently of each other. The nodes are identified by unique name + * strings. + * + * The structure occupies a memory page. + */ +struct pkram_node { + __u32 flags; + + __u8 name[PKRAM_NAME_MAX]; +}; + +#define PKRAM_SAVE 1 +#define PKRAM_LOAD 2 +#define PKRAM_ACCMODE_MASK 3 + +static LIST_HEAD(pkram_nodes); /* linked through page::lru */ +static DEFINE_MUTEX(pkram_mutex); /* serializes open/close */ + +static inline struct page *pkram_alloc_page(gfp_t gfp_mask) +{ + return alloc_page(gfp_mask); +} + +static inline void pkram_free_page(void *addr) +{ + free_page((unsigned long)addr); +} + +static inline void pkram_insert_node(struct pkram_node *node) +{ + list_add(&virt_to_page(node)->lru, &pkram_nodes); +} + +static inline void pkram_delete_node(struct pkram_node *node) +{ + list_del(&virt_to_page(node)->lru); +} + +static struct pkram_node *pkram_find_node(const char *name) +{ + struct page *page; + struct pkram_node *node; + + list_for_each_entry(page, &pkram_nodes, lru) { + node = page_address(page); + if (strcmp(node->name, name) == 0) + return node; + } + return NULL; +} + +static void pkram_stream_init(struct pkram_stream *ps, + struct pkram_node *node, gfp_t gfp_mask) +{ + memset(ps, 0, sizeof(*ps)); + ps->gfp_mask = gfp_mask; + ps->node = node; +} + /** * Create a preserved memory node with name @name and initialize stream @ps * for saving data to it. * * @gfp_mask specifies the memory allocation mask to be used when saving data. * + * Error values: + * %ENAMETOOLONG: name len >= PKRAM_NAME_MAX + * %ENOMEM: insufficient memory available + * %EEXIST: node with specified name already exists + * * Returns 0 on success, -errno on failure. * * After the save has finished, pkram_finish_save() (or pkram_discard_save() in @@ -19,7 +88,34 @@ */ int pkram_prepare_save(struct pkram_stream *ps, const char *name, gfp_t gfp_mask) { - return -ENOSYS; + struct page *page; + struct pkram_node *node; + int err = 0; + + if (strlen(name) >= PKRAM_NAME_MAX) + return -ENAMETOOLONG; + + page = pkram_alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + return -ENOMEM; + node = page_address(page); + + node->flags = PKRAM_SAVE; + strcpy(node->name, name); + + mutex_lock(&pkram_mutex); + if (!pkram_find_node(name)) + pkram_insert_node(node); + else + err = -EEXIST; + mutex_unlock(&pkram_mutex); + if (err) { + __free_page(page); + return err; + } + + pkram_stream_init(ps, node, gfp_mask); + return 0; } /** @@ -50,7 +146,12 @@ void pkram_finish_save_obj(struct pkram_stream *ps) */ void pkram_finish_save(struct pkram_stream *ps) { - BUG(); + struct pkram_node *node = ps->node; + + BUG_ON((node->flags & PKRAM_ACCMODE_MASK) != PKRAM_SAVE); + + smp_wmb(); + node->flags &= ~PKRAM_ACCMODE_MASK; } /** @@ -60,7 +161,15 @@ void pkram_finish_save(struct pkram_stream *ps) */ void pkram_discard_save(struct pkram_stream *ps) { - BUG(); + struct pkram_node *node = ps->node; + + BUG_ON((node->flags & PKRAM_ACCMODE_MASK) != PKRAM_SAVE); + + mutex_lock(&pkram_mutex); + pkram_delete_node(node); + mutex_unlock(&pkram_mutex); + + pkram_free_page(node); } /** @@ -69,11 +178,36 @@ void pkram_discard_save(struct pkram_stream *ps) * * Returns 0 on success, -errno on failure. * + * Error values: + * %ENOENT: node with specified name does not exist + * %EBUSY: save to required node has not finished yet + * * After the load has finished, pkram_finish_load() is to be called. */ int pkram_prepare_load(struct pkram_stream *ps, const char *name) { - return -ENOSYS; + struct pkram_node *node; + int err = 0; + + mutex_lock(&pkram_mutex); + node = pkram_find_node(name); + if (!node) { + err = -ENOENT; + goto out_unlock; + } + if (node->flags & PKRAM_ACCMODE_MASK) { + err = -EBUSY; + goto out_unlock; + } + pkram_delete_node(node); +out_unlock: + mutex_unlock(&pkram_mutex); + if (err) + return err; + + node->flags |= PKRAM_LOAD; + pkram_stream_init(ps, node, 0); + return 0; } /** @@ -106,7 +240,11 @@ void pkram_finish_load_obj(struct pkram_stream *ps) */ void pkram_finish_load(struct pkram_stream *ps) { - BUG(); + struct pkram_node *node = ps->node; + + BUG_ON((node->flags & PKRAM_ACCMODE_MASK) != PKRAM_LOAD); + + pkram_free_page(node); } /** -- 2.13.3
next prev parent reply other threads:[~2020-05-07 0:45 UTC|newest] Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top 2020-05-07 0:41 [RFC 00/43] PKRAM: Preserved-over-Kexec RAM Anthony Yznaga 2020-05-07 0:41 ` [RFC 01/43] mm: add PKRAM API stubs and Kconfig Anthony Yznaga 2020-05-07 0:41 ` Anthony Yznaga [this message] 2020-05-07 0:41 ` [RFC 03/43] mm: PKRAM: implement object load and save functions Anthony Yznaga 2020-05-07 0:41 ` [RFC 04/43] mm: PKRAM: implement page stream operations Anthony Yznaga 2020-05-07 0:41 ` [RFC 05/43] mm: PKRAM: support preserving transparent hugepages Anthony Yznaga 2020-05-07 0:41 ` [RFC 06/43] mm: PKRAM: implement byte stream operations Anthony Yznaga 2020-05-07 0:41 ` [RFC 07/43] mm: PKRAM: link nodes by pfn before reboot Anthony Yznaga 2020-05-07 0:41 ` [RFC 08/43] mm: PKRAM: introduce super block Anthony Yznaga 2020-05-07 0:41 ` [RFC 09/43] PKRAM: build a physical mapping pagetable of pages to be preserved Anthony Yznaga 2020-05-07 0:41 ` [RFC 10/43] PKRAM: add code for walking the preserved pages pagetable Anthony Yznaga 2020-05-07 0:41 ` [RFC 11/43] PKRAM: pass the preserved pages pagetable to the next kernel Anthony Yznaga 2020-05-07 0:41 ` [RFC 12/43] mm: PKRAM: reserve preserved memory at boot Anthony Yznaga 2020-05-07 0:41 ` [RFC 13/43] mm: PKRAM: free preserved pages pagetable Anthony Yznaga 2020-05-07 0:41 ` [RFC 14/43] mm: memblock: PKRAM: prevent memblock resize from clobbering preserved pages Anthony Yznaga 2020-05-11 13:57 ` Mike Rapoport 2020-05-11 23:29 ` Anthony Yznaga 2020-05-07 0:41 ` [RFC 15/43] PKRAM: provide a way to ban pages from use by PKRAM Anthony Yznaga 2020-05-07 0:41 ` [RFC 16/43] kexec: PKRAM: prevent kexec clobbering preserved pages in some cases Anthony Yznaga 2020-05-07 0:41 ` [RFC 17/43] PKRAM: provide a way to check if a memory range has preserved pages Anthony Yznaga 2020-05-07 0:41 ` [RFC 18/43] kexec: PKRAM: avoid clobbering already " Anthony Yznaga 2020-05-07 0:41 ` [RFC 19/43] mm: PKRAM: allow preserved memory to be freed from userspace Anthony Yznaga 2020-05-07 0:41 ` [RFC 20/43] PKRAM: disable feature when running the kdump kernel Anthony Yznaga 2020-05-07 0:41 ` [RFC 21/43] x86/KASLR: PKRAM: support physical kaslr Anthony Yznaga 2020-05-07 17:51 ` Kees Cook 2020-05-07 18:41 ` Anthony Yznaga 2020-05-07 0:41 ` [RFC 22/43] mm: shmem: introduce shmem_insert_page Anthony Yznaga 2020-05-07 0:41 ` [RFC 23/43] mm: shmem: enable saving to PKRAM Anthony Yznaga 2020-05-07 0:41 ` [RFC 24/43] mm: shmem: prevent swapping of PKRAM-enabled tmpfs pages Anthony Yznaga 2020-05-07 0:41 ` [RFC 25/43] mm: shmem: specify the mm to use when inserting pages Anthony Yznaga 2020-05-07 0:41 ` [RFC 26/43] mm: shmem: when inserting, handle pages already charged to a memcg Anthony Yznaga 2020-05-07 0:41 ` [RFC 27/43] x86/mm/numa: add numa_isolate_memblocks() Anthony Yznaga 2020-05-07 0:41 ` [RFC 28/43] PKRAM: ensure memblocks with preserved pages init'd for numa Anthony Yznaga 2020-05-07 0:41 ` [RFC 29/43] memblock: PKRAM: mark memblocks that contain preserved pages Anthony Yznaga 2020-05-07 0:41 ` [RFC 30/43] memblock: add for_each_reserved_mem_range() Anthony Yznaga 2020-05-07 0:41 ` [RFC 31/43] memblock, mm: defer initialization of preserved pages Anthony Yznaga 2020-05-07 0:41 ` [RFC 32/43] shmem: PKRAM: preserve shmem files a chunk at a time Anthony Yznaga 2020-05-07 0:41 ` [RFC 33/43] PKRAM: atomically add and remove link pages Anthony Yznaga 2020-05-07 0:42 ` [RFC 34/43] shmem: PKRAM: multithread preserving and restoring shmem pages Anthony Yznaga 2020-05-07 16:30 ` Randy Dunlap 2020-05-07 17:59 ` Anthony Yznaga 2020-05-07 0:42 ` [RFC 35/43] shmem: introduce shmem_insert_pages() Anthony Yznaga 2020-05-07 0:42 ` [RFC 36/43] PKRAM: add support for loading pages in bulk Anthony Yznaga 2020-05-07 0:42 ` [RFC 37/43] shmem: PKRAM: enable bulk loading of preserved pages into shmem Anthony Yznaga 2020-05-07 0:42 ` [RFC 38/43] mm: implement splicing a list of pages to the LRU Anthony Yznaga 2020-05-07 0:42 ` [RFC 39/43] shmem: optimize adding pages to the LRU in shmem_insert_pages() Anthony Yznaga 2020-05-07 0:42 ` [RFC 40/43] shmem: initial support for adding multiple pages to pagecache Anthony Yznaga 2020-05-07 0:42 ` [RFC 41/43] XArray: add xas_export_node() and xas_import_node() Anthony Yznaga 2020-05-07 0:42 ` [RFC 42/43] shmem: reduce time holding xa_lock when inserting pages Anthony Yznaga 2020-05-07 0:42 ` [RFC 43/43] PKRAM: improve index alignment of pkram_link entries Anthony Yznaga
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=1588812129-8596-3-git-send-email-anthony.yznaga@oracle.com \ --to=anthony.yznaga@oracle.com \ --cc=Thomas.Lendacky@amd.com \ --cc=akpm@linux-foundation.org \ --cc=andriy.shevchenko@linux.intel.com \ --cc=ardb@kernel.org \ --cc=ashok.raj@intel.com \ --cc=bhe@redhat.com \ --cc=bp@alien8.de \ --cc=corbet@lwn.net \ --cc=dan.j.williams@intel.com \ --cc=daniel.kiper@oracle.com \ --cc=dave.hansen@linux.intel.com \ --cc=dima@golovin.in \ --cc=ebiederm@xmission.com \ --cc=guro@fb.com \ --cc=gustavo@embeddedor.com \ --cc=hannes@cmpxchg.org \ --cc=hpa@zytor.com \ --cc=hughd@google.com \ --cc=jason.zeng@intel.com \ --cc=jroedel@suse.de \ --cc=keescook@chromium.org \ --cc=kevin.tian@intel.com \ --cc=kexec@lists.infradead.org \ --cc=lei.l.li@intel.com \ --cc=linux-doc@vger.kernel.org \ --cc=linux-fsdevel@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=luto@kernel.org \ --cc=masahiroy@kernel.org \ --cc=mhocko@kernel.org \ --cc=minchan@kernel.org \ --cc=mingo@redhat.com \ --cc=ndesaulniers@google.com \ --cc=nivedita@alum.mit.edu \ --cc=paul.c.lai@intel.com \ --cc=peterz@infradead.org \ --cc=rafael.j.wysocki@intel.com \ --cc=rppt@linux.ibm.com \ --cc=tglx@linutronix.de \ --cc=vdavydov.dev@gmail.com \ --cc=willy@infradead.org \ --cc=x86@kernel.org \ --cc=yang.shi@linux.alibaba.com \ --cc=ying.huang@intel.com \ --cc=zhenzhong.duan@oracle.com \ --cc=zhiyuan.lv@intel.com \ --cc=ziqian.lzq@antfin.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).