LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Maxim Levitsky <mlevitsk@redhat.com>
To: kvm@vger.kernel.org
Cc: Jim Mattson <jmattson@google.com>,
linux-kernel@vger.kernel.org, Wanpeng Li <wanpengli@tencent.com>,
Borislav Petkov <bp@alien8.de>, Joerg Roedel <joro@8bytes.org>,
Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>,
"H. Peter Anvin" <hpa@zytor.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>,
Vitaly Kuznetsov <vkuznets@redhat.com>,
Sean Christopherson <seanjc@google.com>,
x86@kernel.org (maintainer:X86 ARCHITECTURE (32-BIT AND 64-BIT)),
Maxim Levitsky <mlevitsk@redhat.com>
Subject: [PATCH v4 01/16] Revert "KVM: x86/mmu: Allow zap gfn range to operate under the mmu read lock"
Date: Tue, 10 Aug 2021 23:52:36 +0300 [thread overview]
Message-ID: <20210810205251.424103-2-mlevitsk@redhat.com> (raw)
In-Reply-To: <20210810205251.424103-1-mlevitsk@redhat.com>
From: Sean Christopherson <seanjc@google.com>
This together with the next patch will fix a future race between
kvm_zap_gfn_range and the page fault handler, which will happen
when AVIC memslot is going to be only partially disabled.
The performance impact is minimal since kvm_zap_gfn_range is only
called by users, update_mtrr() and kvm_post_set_cr0().
Both only use it if the guest has non-coherent DMA, in order to
honor the guest's UC memtype.
MTRR and CD setup only happens at boot, and generally in an area
where the page tables should be small (for CD) or should not
include the affected GFNs at all (for MTRRs).
This is based on a patch suggested by Sean Christopherson:
https://lkml.org/lkml/2021/7/22/1025
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
---
arch/x86/kvm/mmu/mmu.c | 19 ++++++++-----------
arch/x86/kvm/mmu/tdp_mmu.c | 15 ++++-----------
arch/x86/kvm/mmu/tdp_mmu.h | 11 ++++-------
3 files changed, 16 insertions(+), 29 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index d574c68cbc5c..9e3839971844 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5654,8 +5654,9 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
int i;
bool flush = false;
+ write_lock(&kvm->mmu_lock);
+
if (kvm_memslots_have_rmaps(kvm)) {
- write_lock(&kvm->mmu_lock);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
kvm_for_each_memslot(memslot, slots) {
@@ -5675,22 +5676,18 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
}
if (flush)
kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
- write_unlock(&kvm->mmu_lock);
}
if (is_tdp_mmu_enabled(kvm)) {
- flush = false;
-
- read_lock(&kvm->mmu_lock);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
- gfn_end, flush, true);
- if (flush)
- kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
- gfn_end);
-
- read_unlock(&kvm->mmu_lock);
+ gfn_end, flush);
}
+
+ if (flush)
+ kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
+
+ write_unlock(&kvm->mmu_lock);
}
static bool slot_rmap_write_protect(struct kvm *kvm,
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index dab6cb46cdb2..133d94e30c2b 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -802,21 +802,15 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
* non-root pages mapping GFNs strictly within that range. Returns true if
* SPTEs have been cleared and a TLB flush is needed before releasing the
* MMU lock.
- *
- * If shared is true, this thread holds the MMU lock in read mode and must
- * account for the possibility that other threads are modifying the paging
- * structures concurrently. If shared is false, this thread should hold the
- * MMU in write mode.
*/
bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
- gfn_t end, bool can_yield, bool flush,
- bool shared)
+ gfn_t end, bool can_yield, bool flush)
{
struct kvm_mmu_page *root;
- for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, shared)
+ for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, false)
flush = zap_gfn_range(kvm, root, start, end, can_yield, flush,
- shared);
+ false);
return flush;
}
@@ -828,8 +822,7 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
int i;
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
- flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn,
- flush, false);
+ flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn, flush);
if (flush)
kvm_flush_remote_tlbs(kvm);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index b224d126adf9..358f447d4012 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -20,14 +20,11 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
bool shared);
bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
- gfn_t end, bool can_yield, bool flush,
- bool shared);
+ gfn_t end, bool can_yield, bool flush);
static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
- gfn_t start, gfn_t end, bool flush,
- bool shared)
+ gfn_t start, gfn_t end, bool flush)
{
- return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush,
- shared);
+ return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush);
}
static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
@@ -44,7 +41,7 @@ static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
*/
lockdep_assert_held_write(&kvm->mmu_lock);
return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp),
- sp->gfn, end, false, false, false);
+ sp->gfn, end, false, false);
}
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
--
2.26.3
next prev parent reply other threads:[~2021-08-10 20:53 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-08-10 20:52 [PATCH v4 00/16] My AVIC patch queue Maxim Levitsky
2021-08-10 20:52 ` Maxim Levitsky [this message]
2021-08-10 20:52 ` [PATCH v4 02/16] KVM: x86/mmu: fix parameters to kvm_flush_remote_tlbs_with_address Maxim Levitsky
2021-08-10 20:52 ` [PATCH v4 03/16] KVM: x86/mmu: add comment explaining arguments to kvm_zap_gfn_range Maxim Levitsky
2021-08-10 20:52 ` [PATCH v4 04/16] KVM: x86/mmu: bump mmu notifier count in kvm_zap_gfn_range Maxim Levitsky
2021-08-10 20:52 ` [PATCH v4 05/16] KVM: x86/mmu: rename try_async_pf to kvm_faultin_pfn Maxim Levitsky
2021-08-10 20:52 ` [PATCH v4 06/16] KVM: x86/mmu: allow kvm_faultin_pfn to return page fault handling code Maxim Levitsky
2021-08-10 20:52 ` [PATCH v4 07/16] KVM: x86/mmu: allow APICv memslot to be enabled but invisible Maxim Levitsky
2021-08-10 20:52 ` [PATCH v4 08/16] KVM: x86: don't disable APICv memslot when inhibited Maxim Levitsky
2021-08-10 20:52 ` [PATCH v4 09/16] KVM: x86: APICv: fix race in kvm_request_apicv_update on SVM Maxim Levitsky
2021-08-10 20:52 ` [PATCH v4 10/16] KVM: SVM: add warning for mistmatch between AVIC vcpu state and AVIC inhibition Maxim Levitsky
2021-08-10 20:52 ` [PATCH v4 11/16] KVM: x86: hyper-v: Deactivate APICv only when AutoEOI feature is in use Maxim Levitsky
2021-08-10 20:52 ` [PATCH v4 12/16] KVM: SVM: remove svm_toggle_avic_for_irq_window Maxim Levitsky
2021-08-10 20:52 ` [PATCH v4 13/16] KVM: SVM: avoid refreshing avic if its state didn't change Maxim Levitsky
2021-08-10 20:52 ` [PATCH v4 14/16] KVM: SVM: move check for kvm_vcpu_apicv_active outside of avic_vcpu_{put|load} Maxim Levitsky
2021-08-10 20:52 ` [PATCH v4 15/16] KVM: SVM: call avic_vcpu_load/avic_vcpu_put when enabling/disabling AVIC Maxim Levitsky
2021-08-10 20:52 ` [PATCH v4 16/16] KVM: SVM: AVIC: drop unsupported AVIC base relocation code Maxim Levitsky
2021-08-10 21:21 ` [PATCH v4 00/16] My AVIC patch queue Maxim Levitsky
2021-08-11 8:06 ` Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210810205251.424103-2-mlevitsk@redhat.com \
--to=mlevitsk@redhat.com \
--cc=bp@alien8.de \
--cc=hpa@zytor.com \
--cc=jmattson@google.com \
--cc=joro@8bytes.org \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
--cc=suravee.suthikulpanit@amd.com \
--cc=tglx@linutronix.de \
--cc=vkuznets@redhat.com \
--cc=wanpengli@tencent.com \
--cc=x86@kernel.org \
--subject='Re: [PATCH v4 01/16] Revert "KVM: x86/mmu: Allow zap gfn range to operate under the mmu read lock"' \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).