LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Maxim Levitsky <mlevitsk@redhat.com>
To: kvm@vger.kernel.org
Cc: Wanpeng Li <wanpengli@tencent.com>,
Thomas Gleixner <tglx@linutronix.de>,
Joerg Roedel <joro@8bytes.org>, Borislav Petkov <bp@alien8.de>,
Sean Christopherson <seanjc@google.com>,
Jim Mattson <jmattson@google.com>,
x86@kernel.org (maintainer:X86 ARCHITECTURE (32-BIT AND 64-BIT)),
linux-kernel@vger.kernel.org (open list:X86 ARCHITECTURE (32-BIT
AND 64-BIT)),
Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>,
Vitaly Kuznetsov <vkuznets@redhat.com>,
Ingo Molnar <mingo@redhat.com>, "H. Peter Anvin" <hpa@zytor.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Maxim Levitsky <mlevitsk@redhat.com>
Subject: [PATCH v3 01/12] Revert "KVM: x86/mmu: Allow zap gfn range to operate under the mmu read lock"
Date: Mon, 2 Aug 2021 21:33:18 +0300 [thread overview]
Message-ID: <20210802183329.2309921-2-mlevitsk@redhat.com> (raw)
In-Reply-To: <20210802183329.2309921-1-mlevitsk@redhat.com>
From: Sean Christopherson <seanjc@google.com>
This together with the next patch will fix a future race between
kvm_zap_gfn_range and the page fault handler, which will happen
when AVIC memslot is going to be only partially disabled.
This is based on a patch suggested by Sean Christopherson:
https://lkml.org/lkml/2021/7/22/1025
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
---
arch/x86/kvm/mmu/mmu.c | 19 ++++++++-----------
arch/x86/kvm/mmu/tdp_mmu.c | 15 ++++-----------
arch/x86/kvm/mmu/tdp_mmu.h | 11 ++++-------
3 files changed, 16 insertions(+), 29 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a8cdfd8d45c4..9d78cb1c0f35 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5638,8 +5638,9 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
int i;
bool flush = false;
+ write_lock(&kvm->mmu_lock);
+
if (kvm_memslots_have_rmaps(kvm)) {
- write_lock(&kvm->mmu_lock);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
kvm_for_each_memslot(memslot, slots) {
@@ -5659,22 +5660,18 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
}
if (flush)
kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
- write_unlock(&kvm->mmu_lock);
}
if (is_tdp_mmu_enabled(kvm)) {
- flush = false;
-
- read_lock(&kvm->mmu_lock);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
- gfn_end, flush, true);
- if (flush)
- kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
- gfn_end);
-
- read_unlock(&kvm->mmu_lock);
+ gfn_end, flush);
}
+
+ if (flush)
+ kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
+
+ write_unlock(&kvm->mmu_lock);
}
static bool slot_rmap_write_protect(struct kvm *kvm,
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 43f12f5d12c0..3e0222ce3f4e 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -777,21 +777,15 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
* non-root pages mapping GFNs strictly within that range. Returns true if
* SPTEs have been cleared and a TLB flush is needed before releasing the
* MMU lock.
- *
- * If shared is true, this thread holds the MMU lock in read mode and must
- * account for the possibility that other threads are modifying the paging
- * structures concurrently. If shared is false, this thread should hold the
- * MMU in write mode.
*/
bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
- gfn_t end, bool can_yield, bool flush,
- bool shared)
+ gfn_t end, bool can_yield, bool flush)
{
struct kvm_mmu_page *root;
- for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, shared)
+ for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, false)
flush = zap_gfn_range(kvm, root, start, end, can_yield, flush,
- shared);
+ false);
return flush;
}
@@ -803,8 +797,7 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
int i;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
- flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn,
- flush, false);
+ flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn, flush);
if (flush)
kvm_flush_remote_tlbs(kvm);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index b224d126adf9..358f447d4012 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -20,14 +20,11 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
bool shared);
bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
- gfn_t end, bool can_yield, bool flush,
- bool shared);
+ gfn_t end, bool can_yield, bool flush);
static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
- gfn_t start, gfn_t end, bool flush,
- bool shared)
+ gfn_t start, gfn_t end, bool flush)
{
- return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush,
- shared);
+ return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush);
}
static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
@@ -44,7 +41,7 @@ static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
*/
lockdep_assert_held_write(&kvm->mmu_lock);
return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp),
- sp->gfn, end, false, false, false);
+ sp->gfn, end, false, false);
}
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
--
2.26.3
next prev parent reply other threads:[~2021-08-02 18:33 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-08-02 18:33 [PATCH v3 00/12] My AVIC patch queue Maxim Levitsky
2021-08-02 18:33 ` Maxim Levitsky [this message]
2021-08-03 8:05 ` [PATCH v3 01/12] Revert "KVM: x86/mmu: Allow zap gfn range to operate under the mmu read lock" Paolo Bonzini
2021-08-03 15:11 ` Sean Christopherson
2021-08-03 17:29 ` Paolo Bonzini
2021-08-02 18:33 ` [PATCH v3 02/12] KVM: x86/mmu: bump mmu notifier count in kvm_zap_gfn_range Maxim Levitsky
2021-08-03 9:00 ` Paolo Bonzini
2021-08-02 18:33 ` [PATCH v3 03/12] KVM: x86/mmu: rename try_async_pf to kvm_faultin_pfn Maxim Levitsky
2021-08-03 9:00 ` Paolo Bonzini
2021-08-02 18:33 ` [PATCH v3 04/12] KVM: x86/mmu: allow kvm_faultin_pfn to return page fault handling code Maxim Levitsky
2021-08-03 9:00 ` Paolo Bonzini
2021-08-02 18:33 ` [PATCH v3 05/12] KVM: x86/mmu: allow APICv memslot to be partially enabled Maxim Levitsky
2021-08-03 9:12 ` Paolo Bonzini
2021-08-02 18:33 ` [PATCH v3 06/12] KVM: x86: don't disable APICv memslot when inhibited Maxim Levitsky
2021-08-03 8:44 ` Paolo Bonzini
2021-08-09 18:51 ` Maxim Levitsky
2021-08-09 19:14 ` Sean Christopherson
2021-08-02 18:33 ` [PATCH v3 07/12] KVM: x86: APICv: fix race in kvm_request_apicv_update on SVM Maxim Levitsky
2021-08-02 18:33 ` [PATCH v3 08/12] KVM: SVM: add warning for mistmatch between AVIC state and AVIC access page state Maxim Levitsky
2021-08-03 8:45 ` Paolo Bonzini
2021-08-02 18:33 ` [PATCH v3 09/12] KVM: x86: hyper-v: Deactivate APICv only when AutoEOI feature is in use Maxim Levitsky
2021-08-03 8:47 ` Paolo Bonzini
2021-08-03 9:01 ` Paolo Bonzini
2021-08-03 9:11 ` Paolo Bonzini
2021-08-02 18:33 ` [PATCH v3 10/12] KVM: SVM: remove svm_toggle_avic_for_irq_window Maxim Levitsky
2021-08-03 9:11 ` Paolo Bonzini
2021-08-02 18:33 ` [PATCH v3 11/12] KVM: SVM: call avic_vcpu_load/avic_vcpu_put when enabling/disabling AVIC Maxim Levitsky
2021-08-03 9:00 ` Paolo Bonzini
2021-08-02 18:33 ` [PATCH v3 12/12] KVM: SVM: AVIC: drop unsupported AVIC base relocation code Maxim Levitsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210802183329.2309921-2-mlevitsk@redhat.com \
--to=mlevitsk@redhat.com \
--cc=bp@alien8.de \
--cc=hpa@zytor.com \
--cc=jmattson@google.com \
--cc=joro@8bytes.org \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
--cc=suravee.suthikulpanit@amd.com \
--cc=tglx@linutronix.de \
--cc=vkuznets@redhat.com \
--cc=wanpengli@tencent.com \
--cc=x86@kernel.org \
--subject='Re: [PATCH v3 01/12] Revert "KVM: x86/mmu: Allow zap gfn range to operate under the mmu read lock"' \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).