* [PATCH 1/3] mm/migrate: remove useless mask of start address
2020-01-07 21:12 [PATCH 0/3] mm/migrate: add missing check for stable Ralph Campbell
@ 2020-01-07 21:12 ` Ralph Campbell
2020-01-08 7:11 ` Christoph Hellwig
2020-01-07 21:12 ` [PATCH 2/3] mm/migrate: clean up some minor coding style Ralph Campbell
2020-01-07 21:12 ` [PATCH 3/3] mm/migrate: add stable check in migrate_vma_insert_page() Ralph Campbell
2 siblings, 1 reply; 8+ messages in thread
From: Ralph Campbell @ 2020-01-07 21:12 UTC (permalink / raw)
To: linux-mm, linux-kernel
Cc: Jerome Glisse, John Hubbard, Christoph Hellwig, Jason Gunthorpe,
Bharata B Rao, Michal Hocko, Andrew Morton, Ralph Campbell
Addresses passed to walk_page_range() callback functions are already page
aligned and don't need to be masked with PAGE_MASK.
Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
---
mm/migrate.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mm/migrate.c b/mm/migrate.c
index ebe2bf070653..b7f5d9ada429 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2136,7 +2136,7 @@ static int migrate_vma_collect_hole(unsigned long start,
struct migrate_vma *migrate = walk->private;
unsigned long addr;
- for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
migrate->dst[migrate->npages] = 0;
migrate->npages++;
@@ -2153,7 +2153,7 @@ static int migrate_vma_collect_skip(unsigned long start,
struct migrate_vma *migrate = walk->private;
unsigned long addr;
- for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
migrate->dst[migrate->npages] = 0;
migrate->src[migrate->npages++] = 0;
}
--
2.20.1
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 2/3] mm/migrate: clean up some minor coding style
2020-01-07 21:12 [PATCH 0/3] mm/migrate: add missing check for stable Ralph Campbell
2020-01-07 21:12 ` [PATCH 1/3] mm/migrate: remove useless mask of start address Ralph Campbell
@ 2020-01-07 21:12 ` Ralph Campbell
2020-01-07 21:33 ` Chris Down
2020-01-08 7:11 ` Christoph Hellwig
2020-01-07 21:12 ` [PATCH 3/3] mm/migrate: add stable check in migrate_vma_insert_page() Ralph Campbell
2 siblings, 2 replies; 8+ messages in thread
From: Ralph Campbell @ 2020-01-07 21:12 UTC (permalink / raw)
To: linux-mm, linux-kernel
Cc: Jerome Glisse, John Hubbard, Christoph Hellwig, Jason Gunthorpe,
Bharata B Rao, Michal Hocko, Andrew Morton, Ralph Campbell
Fix some comment typos and coding style clean up in preparation for the
next patch. No functional changes.
Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
---
mm/migrate.c | 34 +++++++++++++---------------------
1 file changed, 13 insertions(+), 21 deletions(-)
diff --git a/mm/migrate.c b/mm/migrate.c
index b7f5d9ada429..4b1a6d69afb5 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -986,7 +986,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
}
/*
- * Anonymous and movable page->mapping will be cleard by
+ * Anonymous and movable page->mapping will be cleared by
* free_pages_prepare so don't reset it here for keeping
* the type to work PageAnon, for example.
*/
@@ -1199,8 +1199,7 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
/*
* A page that has been migrated has all references
* removed and will be freed. A page that has not been
- * migrated will have kepts its references and be
- * restored.
+ * migrated will have kept its references and be restored.
*/
list_del(&page->lru);
@@ -2759,27 +2758,18 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
if (pte_present(*ptep)) {
unsigned long pfn = pte_pfn(*ptep);
- if (!is_zero_pfn(pfn)) {
- pte_unmap_unlock(ptep, ptl);
- mem_cgroup_cancel_charge(page, memcg, false);
- goto abort;
- }
+ if (!is_zero_pfn(pfn))
+ goto unlock_abort;
flush = true;
- } else if (!pte_none(*ptep)) {
- pte_unmap_unlock(ptep, ptl);
- mem_cgroup_cancel_charge(page, memcg, false);
- goto abort;
- }
+ } else if (!pte_none(*ptep))
+ goto unlock_abort;
/*
- * Check for usefaultfd but do not deliver the fault. Instead,
+ * Check for userfaultfd but do not deliver the fault. Instead,
* just back off.
*/
- if (userfaultfd_missing(vma)) {
- pte_unmap_unlock(ptep, ptl);
- mem_cgroup_cancel_charge(page, memcg, false);
- goto abort;
- }
+ if (userfaultfd_missing(vma))
+ goto unlock_abort;
inc_mm_counter(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, addr, false);
@@ -2803,6 +2793,9 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
*src = MIGRATE_PFN_MIGRATE;
return;
+unlock_abort:
+ pte_unmap_unlock(ptep, ptl);
+ mem_cgroup_cancel_charge(page, memcg, false);
abort:
*src &= ~MIGRATE_PFN_MIGRATE;
}
@@ -2835,9 +2828,8 @@ void migrate_vma_pages(struct migrate_vma *migrate)
}
if (!page) {
- if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) {
+ if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
continue;
- }
if (!notified) {
notified = true;
--
2.20.1
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 3/3] mm/migrate: add stable check in migrate_vma_insert_page()
2020-01-07 21:12 [PATCH 0/3] mm/migrate: add missing check for stable Ralph Campbell
2020-01-07 21:12 ` [PATCH 1/3] mm/migrate: remove useless mask of start address Ralph Campbell
2020-01-07 21:12 ` [PATCH 2/3] mm/migrate: clean up some minor coding style Ralph Campbell
@ 2020-01-07 21:12 ` Ralph Campbell
2020-01-08 7:12 ` Christoph Hellwig
2 siblings, 1 reply; 8+ messages in thread
From: Ralph Campbell @ 2020-01-07 21:12 UTC (permalink / raw)
To: linux-mm, linux-kernel
Cc: Jerome Glisse, John Hubbard, Christoph Hellwig, Jason Gunthorpe,
Bharata B Rao, Michal Hocko, Andrew Morton, Ralph Campbell
migrate_vma_insert_page() closely follows the code in:
__handle_mm_fault()
handle_pte_fault()
do_anonymous_page()
Add a call to check_stable_address_space() after locking the page table
entry before inserting a ZONE_DEVICE private zero page mapping similar to
page faulting a new anonymous page.
Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
---
mm/migrate.c | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/mm/migrate.c b/mm/migrate.c
index 4b1a6d69afb5..403b82472d24 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -48,6 +48,7 @@
#include <linux/page_owner.h>
#include <linux/sched/mm.h>
#include <linux/ptrace.h>
+#include <linux/oom.h>
#include <asm/tlbflush.h>
@@ -2675,6 +2676,14 @@ int migrate_vma_setup(struct migrate_vma *args)
}
EXPORT_SYMBOL(migrate_vma_setup);
+/*
+ * This code closely matches the code in:
+ * __handle_mm_fault()
+ * handle_pte_fault()
+ * do_anonymous_page()
+ * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
+ * private page.
+ */
static void migrate_vma_insert_page(struct migrate_vma *migrate,
unsigned long addr,
struct page *page,
@@ -2755,6 +2764,9 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
+ if (check_stable_address_space(mm))
+ goto unlock_abort;
+
if (pte_present(*ptep)) {
unsigned long pfn = pte_pfn(*ptep);
--
2.20.1
^ permalink raw reply [flat|nested] 8+ messages in thread