LKML Archive on lore.kernel.org help / color / mirror / Atom feed
From: Andrea Arcangeli <andrea@qumranet.com> To: Nick Piggin <npiggin@suse.de> Cc: akpm@linux-foundation.org, Robin Holt <holt@sgi.com>, Avi Kivity <avi@qumranet.com>, Izik Eidus <izike@qumranet.com>, kvm-devel@lists.sourceforge.net, Peter Zijlstra <a.p.zijlstra@chello.nl>, general@lists.openfabrics.org, Steve Wise <swise@opengridcomputing.com>, Roland Dreier <rdreier@cisco.com>, Kanoj Sarcar <kanojsarcar@yahoo.com>, steiner@sgi.com, linux-kernel@vger.kernel.org, linux-mm@kvack.org, daniel.blueman@quadrics.com, Christoph Lameter <clameter@sgi.com> Subject: [PATCH] KVM swapping with mmu notifiers #v7 Date: Wed, 27 Feb 2008 23:06:56 +0100 [thread overview] Message-ID: <20080227220656.GJ28483@v2.random> (raw) In-Reply-To: <20080220104517.GV7128@v2.random> Same as before but one one hand ported to #v7 API and on the other hand ported to latest kvm.git. Signed-off-by: Andrea Arcangeli <andrea@qumranet.com> diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 41962e7..e1287ab 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -21,6 +21,7 @@ config KVM tristate "Kernel-based Virtual Machine (KVM) support" depends on HAVE_KVM && EXPERIMENTAL select PREEMPT_NOTIFIERS + select MMU_NOTIFIER select ANON_INODES ---help--- Support hosting fully virtualized guest machines using hardware diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4583329..4067b0f 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -642,6 +642,110 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn) account_shadowed(kvm, gfn); } +static void kvm_unmap_spte(struct kvm *kvm, u64 *spte) +{ + struct page *page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); + get_page(page); + rmap_remove(kvm, spte); + set_shadow_pte(spte, shadow_trap_nonpresent_pte); + kvm_flush_remote_tlbs(kvm); + __free_page(page); +} + +static void kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp) +{ + u64 *spte, *curr_spte; + + spte = rmap_next(kvm, rmapp, NULL); + while (spte) { + BUG_ON(!(*spte & PT_PRESENT_MASK)); + rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte); + curr_spte = spte; + spte = rmap_next(kvm, rmapp, spte); + kvm_unmap_spte(kvm, curr_spte); + } +} + +void kvm_unmap_hva(struct kvm *kvm, unsigned long hva) +{ + int i; + + /* + * If mmap_sem isn't taken, we can look the memslots with only + * the mmu_lock by skipping over the slots with userspace_addr == 0. + */ + spin_lock(&kvm->mmu_lock); + for (i = 0; i < kvm->nmemslots; i++) { + struct kvm_memory_slot *memslot = &kvm->memslots[i]; + unsigned long start = memslot->userspace_addr; + unsigned long end; + + /* mmu_lock protects userspace_addr */ + if (!start) + continue; + + end = start + (memslot->npages << PAGE_SHIFT); + if (hva >= start && hva < end) { + gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; + kvm_unmap_rmapp(kvm, &memslot->rmap[gfn_offset]); + } + } + spin_unlock(&kvm->mmu_lock); +} + +static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp) +{ + u64 *spte; + int young = 0; + + spte = rmap_next(kvm, rmapp, NULL); + while (spte) { + int _young; + u64 _spte = *spte; + BUG_ON(!(_spte & PT_PRESENT_MASK)); + _young = _spte & PT_ACCESSED_MASK; + if (_young) { + young = !!_young; + set_shadow_pte(spte, _spte & ~PT_ACCESSED_MASK); + } + spte = rmap_next(kvm, rmapp, spte); + } + return young; +} + +int kvm_age_hva(struct kvm *kvm, unsigned long hva) +{ + int i; + int young = 0; + + /* + * If mmap_sem isn't taken, we can look the memslots with only + * the mmu_lock by skipping over the slots with userspace_addr == 0. + */ + spin_lock(&kvm->mmu_lock); + for (i = 0; i < kvm->nmemslots; i++) { + struct kvm_memory_slot *memslot = &kvm->memslots[i]; + unsigned long start = memslot->userspace_addr; + unsigned long end; + + /* mmu_lock protects userspace_addr */ + if (!start) + continue; + + end = start + (memslot->npages << PAGE_SHIFT); + if (hva >= start && hva < end) { + gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; + young |= kvm_age_rmapp(kvm, &memslot->rmap[gfn_offset]); + } + } + spin_unlock(&kvm->mmu_lock); + + if (young) + kvm_flush_remote_tlbs(kvm); + + return young; +} + #ifdef MMU_DEBUG static int is_empty_shadow_page(u64 *spt) { diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 17f9d16..b014b19 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -380,6 +380,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, int r; struct page *page; int largepage = 0; + unsigned mmu_seq; pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code); kvm_mmu_audit(vcpu, "pre page fault"); @@ -415,6 +416,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, largepage = 1; } } + mmu_seq = read_seqbegin(&vcpu->kvm->arch.mmu_notifier_invalidate_lock); page = gfn_to_page(vcpu->kvm, walker.gfn); up_read(¤t->mm->mmap_sem); @@ -440,6 +442,15 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, ++vcpu->stat.pf_fixed; kvm_mmu_audit(vcpu, "post page fault (fixed)"); spin_unlock(&vcpu->kvm->mmu_lock); + + if (read_seqretry(&vcpu->kvm->arch.mmu_notifier_invalidate_lock, mmu_seq)) { + down_read(¤t->mm->mmap_sem); + if (page != gfn_to_page(vcpu->kvm, walker.gfn)) + BUG(); + up_read(¤t->mm->mmap_sem); + kvm_release_page_clean(page); + } + up_read(&vcpu->kvm->slots_lock); return write_pt; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6f09840..6eafb74 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3319,6 +3319,47 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) free_page((unsigned long)vcpu->arch.pio_data); } +static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) +{ + struct kvm_arch *kvm_arch; + kvm_arch = container_of(mn, struct kvm_arch, mmu_notifier); + return container_of(kvm_arch, struct kvm, arch); +} + +void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address) +{ + struct kvm *kvm = mmu_notifier_to_kvm(mn); + BUG_ON(mm != kvm->mm); + write_seqlock(&kvm->arch.mmu_notifier_invalidate_lock); + kvm_unmap_hva(kvm, address); + write_sequnlock(&kvm->arch.mmu_notifier_invalidate_lock); +} + +void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + for (; start < end; start += PAGE_SIZE) + kvm_mmu_notifier_invalidate_page(mn, mm, start); +} + +int kvm_mmu_notifier_age_page(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address) +{ + struct kvm *kvm = mmu_notifier_to_kvm(mn); + BUG_ON(mm != kvm->mm); + return kvm_age_hva(kvm, address); +} + +static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { + .invalidate_page = kvm_mmu_notifier_invalidate_page, + .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, + .age_page = kvm_mmu_notifier_age_page, +}; + struct kvm *kvm_arch_create_vm(void) { struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); @@ -3328,6 +3369,10 @@ struct kvm *kvm_arch_create_vm(void) INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); + kvm->arch.mmu_notifier.ops = &kvm_mmu_notifier_ops; + mmu_notifier_register(&kvm->arch.mmu_notifier, current->mm); + seqlock_init(&kvm->arch.mmu_notifier_invalidate_lock); + return kvm; } diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h index 024b57c..305b7c3 100644 --- a/include/asm-x86/kvm_host.h +++ b/include/asm-x86/kvm_host.h @@ -13,6 +13,7 @@ #include <linux/types.h> #include <linux/mm.h> +#include <linux/mmu_notifier.h> #include <linux/kvm.h> #include <linux/kvm_para.h> @@ -303,6 +304,9 @@ struct kvm_arch{ struct page *apic_access_page; gpa_t wall_clock; + + struct mmu_notifier mmu_notifier; + seqlock_t mmu_notifier_invalidate_lock; }; struct kvm_vm_stat { @@ -422,6 +426,8 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu); int kvm_mmu_setup(struct kvm_vcpu *vcpu); void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); +void kvm_unmap_hva(struct kvm *kvm, unsigned long hva); +int kvm_age_hva(struct kvm *kvm, unsigned long hva); int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); void kvm_mmu_zap_all(struct kvm *kvm); As usual (for completeness) I append the change to the memslot readonly locking through kvm->mmu_lock: Signed-off-by: Andrea Arcangeli <andrea@qumranet.com> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6f09840..a519fd8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3379,16 +3379,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm, */ if (!user_alloc) { if (npages && !old.rmap) { + unsigned long userspace_addr; + down_write(¤t->mm->mmap_sem); - memslot->userspace_addr = do_mmap(NULL, 0, - npages * PAGE_SIZE, - PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, - 0); + userspace_addr = do_mmap(NULL, 0, + npages * PAGE_SIZE, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, + 0); up_write(¤t->mm->mmap_sem); - if (IS_ERR((void *)memslot->userspace_addr)) - return PTR_ERR((void *)memslot->userspace_addr); + if (IS_ERR((void *)userspace_addr)) + return PTR_ERR((void *)userspace_addr); + + /* set userspace_addr atomically for kvm_hva_to_rmapp */ + spin_lock(&kvm->mmu_lock); + memslot->userspace_addr = userspace_addr; + spin_unlock(&kvm->mmu_lock); } else { if (!old.user_alloc && old.rmap) { int ret; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 30bf832..8f3b6d6 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -326,7 +326,15 @@ int __kvm_set_memory_region(struct kvm *kvm, memset(new.rmap, 0, npages * sizeof(*new.rmap)); new.user_alloc = user_alloc; - new.userspace_addr = mem->userspace_addr; + /* + * hva_to_rmmap() serialzies with the mmu_lock and to be + * safe it has to ignore memslots with !user_alloc && + * !userspace_addr. + */ + if (user_alloc) + new.userspace_addr = mem->userspace_addr; + else + new.userspace_addr = 0; } if (npages && !new.lpage_info) { int largepages = npages / KVM_PAGES_PER_HPAGE; @@ -355,14 +363,18 @@ int __kvm_set_memory_region(struct kvm *kvm, memset(new.dirty_bitmap, 0, dirty_bytes); } + spin_lock(&kvm->mmu_lock); if (mem->slot >= kvm->nmemslots) kvm->nmemslots = mem->slot + 1; *memslot = new; + spin_unlock(&kvm->mmu_lock); r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc); if (r) { + spin_lock(&kvm->mmu_lock); *memslot = old; + spin_unlock(&kvm->mmu_lock); goto out_free; }
next prev parent reply other threads:[~2008-02-27 22:07 UTC|newest] Thread overview: 120+ messages / expand[flat|nested] mbox.gz Atom feed top 2008-02-19 8:43 [patch] my mmu notifiers Nick Piggin 2008-02-19 8:44 ` [patch] my mmu notifier sample driver Nick Piggin 2008-02-19 11:59 ` [patch] my mmu notifiers Robin Holt 2008-02-19 13:58 ` Andrea Arcangeli 2008-02-19 14:27 ` Jack Steiner 2008-02-19 23:04 ` Nick Piggin 2008-02-20 0:52 ` Andrea Arcangeli 2008-02-20 2:46 ` Robin Holt 2008-02-27 22:50 ` Christoph Lameter 2008-02-19 22:59 ` Nick Piggin 2008-02-20 0:46 ` Andrea Arcangeli 2008-02-27 22:55 ` Christoph Lameter 2008-02-19 23:11 ` Nick Piggin 2008-02-19 23:40 ` Jack Steiner 2008-02-21 4:42 ` Nick Piggin 2008-02-22 16:31 ` Jack Steiner 2008-02-20 1:09 ` Andrea Arcangeli 2008-02-20 10:39 ` [PATCH] mmu notifiers #v6 Andrea Arcangeli 2008-02-20 10:45 ` [PATCH] KVM swapping (+ seqlock fix) with " Andrea Arcangeli 2008-02-27 22:06 ` Andrea Arcangeli [this message] 2008-02-28 8:42 ` [PATCH] KVM swapping with mmu notifiers #v7 izik eidus 2008-02-20 11:33 ` [PATCH] mmu notifiers #v6 Robin Holt 2008-02-20 12:03 ` Andrea Arcangeli 2008-02-20 12:24 ` Robin Holt 2008-02-20 12:32 ` Andrea Arcangeli 2008-02-20 13:15 ` Robin Holt 2008-02-21 5:02 ` Nick Piggin 2008-02-20 14:41 ` Robin Holt 2008-02-20 15:34 ` Andrea Arcangeli 2008-02-20 21:03 ` Jack Steiner 2008-02-21 4:54 ` Nick Piggin 2008-02-21 14:40 ` Andrea Arcangeli 2008-02-21 16:10 ` Jack Steiner 2008-02-27 19:26 ` [PATCH] mmu notifiers #v7 Andrea Arcangeli 2008-02-27 20:04 ` Peter Zijlstra 2008-02-27 23:06 ` Christoph Lameter 2008-02-27 23:43 ` [kvm-devel] " Andrea Arcangeli 2008-02-28 0:08 ` Christoph Lameter 2008-02-28 0:21 ` Andrea Arcangeli 2008-02-28 0:24 ` Christoph Lameter 2008-02-28 19:48 ` Christoph Lameter 2008-02-28 21:52 ` Andrea Arcangeli 2008-02-28 22:00 ` Christoph Lameter 2008-02-28 23:17 ` Jack Steiner 2008-02-29 0:24 ` Andrea Arcangeli 2008-02-29 1:13 ` Christoph Lameter 2008-02-28 23:05 ` Christoph Lameter 2008-02-29 0:40 ` Andrea Arcangeli 2008-02-29 0:56 ` Andrew Morton 2008-02-29 1:03 ` Christoph Lameter 2008-02-29 13:09 ` Andrea Arcangeli 2008-02-29 19:46 ` Christoph Lameter 2008-03-02 15:54 ` [PATCH] mmu notifiers #v8 Andrea Arcangeli 2008-03-02 16:03 ` [PATCH] mmu notifiers #v8 + xpmem Andrea Arcangeli 2008-03-02 16:23 ` Peter Zijlstra 2008-03-03 3:29 ` [PATCH] mmu notifiers #v8 Nick Piggin 2008-03-03 12:51 ` Andrea Arcangeli 2008-03-03 13:10 ` Nick Piggin 2008-03-03 13:24 ` Andrea Arcangeli 2008-03-03 15:18 ` Jack Steiner 2008-03-03 16:59 ` Nick Piggin 2008-03-03 18:06 ` Jack Steiner 2008-03-03 18:09 ` Avi Kivity 2008-03-03 18:23 ` Jack Steiner 2008-03-03 18:45 ` Nick Piggin 2008-03-03 19:15 ` Jack Steiner 2008-03-04 10:35 ` Peter Zijlstra 2008-03-04 14:44 ` Jack Steiner 2008-03-03 19:02 ` Christoph Lameter 2008-03-03 19:01 ` Christoph Lameter 2008-03-03 21:15 ` Andrea Arcangeli 2008-03-05 0:37 ` Nick Piggin 2008-03-05 18:48 ` Christoph Lameter 2008-03-06 2:59 ` Nick Piggin 2008-03-03 3:33 ` Nick Piggin 2008-03-03 19:03 ` Christoph Lameter 2008-03-03 3:34 ` Nick Piggin 2008-03-03 19:04 ` Christoph Lameter 2008-03-03 3:39 ` Nick Piggin 2008-03-03 21:37 ` [PATCH] mmu notifiers #v9 Andrea Arcangeli 2008-03-03 22:05 ` [PATCH] KVM swapping with " Andrea Arcangeli 2008-03-04 0:44 ` izik eidus 2008-03-04 7:31 ` [RFC] Notifier for Externally Mapped Memory (EMM) Christoph Lameter 2008-03-04 7:34 ` [Early draft] Conversion of i_mmap_lock to semaphore Christoph Lameter 2008-03-04 13:30 ` [RFC] Notifier for Externally Mapped Memory (EMM) Andrea Arcangeli 2008-03-04 19:00 ` Christoph Lameter 2008-03-04 22:20 ` Andrea Arcangeli 2008-03-04 22:35 ` Christoph Lameter 2008-03-04 22:42 ` Peter Zijlstra 2008-03-04 23:14 ` Christoph Lameter 2008-03-04 23:25 ` Peter Zijlstra 2008-03-04 23:30 ` Peter Zijlstra 2008-03-05 5:09 ` Avi Kivity 2008-03-05 9:47 ` Robin Holt 2008-03-05 9:53 ` Avi Kivity 2008-03-05 10:02 ` [kvm-devel] " Dor Laor 2008-03-07 15:17 ` [PATCH] 2/4 move all invalidate_page outside of PT lock (#v9 was 1/4) Andrea Arcangeli 2008-03-07 15:23 ` [PATCH] 3/4 combine RCU with seqlock to allow mmu notifier methods to sleep " Andrea Arcangeli 2008-03-07 15:52 ` [PATCH] 4/4 i_mmap_lock spinlock2rwsem " Andrea Arcangeli 2008-03-07 20:03 ` Christoph Lameter 2008-03-19 21:27 ` Christoph Lameter 2008-03-07 16:52 ` [PATCH] 3/4 combine RCU with seqlock to allow mmu notifier methods to sleep " Peter Zijlstra 2008-03-07 17:50 ` Andrea Arcangeli 2008-03-07 18:01 ` Peter Zijlstra 2008-03-07 18:45 ` Andrea Arcangeli 2008-03-07 19:47 ` Andrea Arcangeli 2008-03-07 20:15 ` Christoph Lameter 2008-03-07 20:12 ` Christoph Lameter 2008-03-07 20:10 ` Christoph Lameter 2008-03-07 20:00 ` Christoph Lameter 2008-03-07 19:54 ` [PATCH] 2/4 move all invalidate_page outside of PT lock " Christoph Lameter 2008-03-04 13:21 ` [PATCH] KVM swapping with mmu notifiers #v9 Andrea Arcangeli 2008-02-21 4:47 ` [patch] my mmu notifiers Nick Piggin 2008-02-20 2:49 ` Robin Holt 2008-02-27 22:56 ` Christoph Lameter -- strict thread matches above, loose matches on Subject: below -- 2008-02-15 6:48 [patch 0/6] MMU Notifiers V7 Christoph Lameter 2008-02-16 10:48 ` [PATCH] KVM swapping with " Andrea Arcangeli 2008-02-16 11:08 ` Andrew Morton 2008-02-18 12:17 ` Andrea Arcangeli 2008-02-16 11:51 ` Robin Holt 2008-02-18 12:35 ` Andrea Arcangeli
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20080227220656.GJ28483@v2.random \ --to=andrea@qumranet.com \ --cc=a.p.zijlstra@chello.nl \ --cc=akpm@linux-foundation.org \ --cc=avi@qumranet.com \ --cc=clameter@sgi.com \ --cc=daniel.blueman@quadrics.com \ --cc=general@lists.openfabrics.org \ --cc=holt@sgi.com \ --cc=izike@qumranet.com \ --cc=kanojsarcar@yahoo.com \ --cc=kvm-devel@lists.sourceforge.net \ --cc=linux-kernel@vger.kernel.org \ --cc=linux-mm@kvack.org \ --cc=npiggin@suse.de \ --cc=rdreier@cisco.com \ --cc=steiner@sgi.com \ --cc=swise@opengridcomputing.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).