LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Gavin Shan <gshan@redhat.com>
To: Eric Auger <eauger@redhat.com>, kvmarm@lists.cs.columbia.edu
Cc: kvm@vger.kernel.org, maz@kernel.org,
linux-kernel@vger.kernel.org, shan.gavin@gmail.com,
Jonathan.Cameron@huawei.com, pbonzini@redhat.com,
vkuznets@redhat.com, will@kernel.org
Subject: Re: [PATCH v4 04/15] KVM: x86: Use generic async PF slot management
Date: Thu, 13 Jan 2022 15:44:29 +0800 [thread overview]
Message-ID: <e1dd3940-95fb-e10b-93d6-bcbc11d6bc4e@redhat.com> (raw)
In-Reply-To: <a3b0e70a-eddd-9a85-2c9c-ba5446ac542b@redhat.com>
Hi Eric,
On 11/11/21 1:03 AM, Eric Auger wrote:
> On 8/15/21 2:59 AM, Gavin Shan wrote:
>> This uses the generic slot management mechanism for asynchronous
> Now we have moved the hash table management in the generic code, Use
> this latter ...
Ok.
>> page fault by enabling CONFIG_KVM_ASYNC_PF_SLOT because the private
>> implementation is totally duplicate to the generic one.
>>
>> The changes introduced by this is pretty mechanical and shouldn't
>> cause any logical changes.
> suggest: No functional change intended.
Ok. The commit log will be improved accordingly in next respin.
>>
>> Signed-off-by: Gavin Shan <gshan@redhat.com>
>> ---
>> arch/x86/include/asm/kvm_host.h | 2 -
>> arch/x86/kvm/Kconfig | 1 +
>> arch/x86/kvm/mmu/mmu.c | 2 +-
>> arch/x86/kvm/x86.c | 86 +++------------------------------
>> 4 files changed, 8 insertions(+), 83 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
>> index 974cbfb1eefe..409c1e7137cd 100644
>> --- a/arch/x86/include/asm/kvm_host.h
>> +++ b/arch/x86/include/asm/kvm_host.h
>> @@ -810,7 +810,6 @@ struct kvm_vcpu_arch {
>>
>> struct {
>> bool halted;
>> - gfn_t gfns[ASYNC_PF_PER_VCPU];
>> struct gfn_to_hva_cache data;
>> u64 msr_en_val; /* MSR_KVM_ASYNC_PF_EN */
>> u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */
>> @@ -1878,7 +1877,6 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
>> struct kvm_async_pf *work);
>> void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu);
>> bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
>> -extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
>>
>> int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
>> int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
>> diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
>> index ac69894eab88..53a6ef30b6ee 100644
>> --- a/arch/x86/kvm/Kconfig
>> +++ b/arch/x86/kvm/Kconfig
>> @@ -32,6 +32,7 @@ config KVM
>> select HAVE_KVM_IRQ_ROUTING
>> select HAVE_KVM_EVENTFD
>> select KVM_ASYNC_PF
>> + select KVM_ASYNC_PF_SLOT
>> select USER_RETURN_NOTIFIER
>> select KVM_MMIO
>> select SCHED_INFO
>> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
>> index c4f4fa23320e..cd8aaa662ac2 100644
>> --- a/arch/x86/kvm/mmu/mmu.c
>> +++ b/arch/x86/kvm/mmu/mmu.c
>> @@ -3799,7 +3799,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
>>
>> if (!prefault && kvm_can_do_async_pf(vcpu)) {
>> trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
>> - if (kvm_find_async_pf_gfn(vcpu, gfn)) {
>> + if (kvm_async_pf_find_slot(vcpu, gfn)) {
>> trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
>> kvm_make_request(KVM_REQ_APF_HALT, vcpu);
>> return true;
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index 7f35d9324b99..a5f7d6122178 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -332,13 +332,6 @@ static struct kmem_cache *kvm_alloc_emulator_cache(void)
>>
>> static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
>>
>> -static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
>> -{
>> - int i;
>> - for (i = 0; i < ASYNC_PF_PER_VCPU; i++)
>> - vcpu->arch.apf.gfns[i] = ~0;
>> -}
>> -
>> static void kvm_on_user_return(struct user_return_notifier *urn)
>> {
>> unsigned slot;
>> @@ -854,7 +847,7 @@ void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned lon
>> {
>> if ((cr0 ^ old_cr0) & X86_CR0_PG) {
>> kvm_clear_async_pf_completion_queue(vcpu);
>> - kvm_async_pf_hash_reset(vcpu);
>> + kvm_async_pf_reset_slot(vcpu);
>> }
>>
>> if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS)
>> @@ -3118,7 +3111,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
>>
>> if (!kvm_pv_async_pf_enabled(vcpu)) {
>> kvm_clear_async_pf_completion_queue(vcpu);
>> - kvm_async_pf_hash_reset(vcpu);
>> + kvm_async_pf_reset_slot(vcpu);
>> return 0;
>> }
>>
>> @@ -10704,7 +10697,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
>>
>> vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
>>
>> - kvm_async_pf_hash_reset(vcpu);
>> + kvm_async_pf_reset_slot(vcpu);
>> kvm_pmu_init(vcpu);
>>
>> vcpu->arch.pending_external_vector = -1;
>> @@ -10828,7 +10821,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
>> kvmclock_reset(vcpu);
>>
>> kvm_clear_async_pf_completion_queue(vcpu);
>> - kvm_async_pf_hash_reset(vcpu);
>> + kvm_async_pf_reset_slot(vcpu);
>> vcpu->arch.apf.halted = false;
>>
>> if (vcpu->arch.guest_fpu && kvm_mpx_supported()) {
>> @@ -11737,73 +11730,6 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
>> kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
>> }
>>
>> -static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
>> -{
>> - BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU));
>> -
>> - return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
>> -}
>> -
>> -static inline u32 kvm_async_pf_next_probe(u32 key)
>> -{
>> - return (key + 1) & (ASYNC_PF_PER_VCPU - 1);
>> -}
>> -
>> -static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
>> -{
>> - u32 key = kvm_async_pf_hash_fn(gfn);
>> -
>> - while (vcpu->arch.apf.gfns[key] != ~0)
>> - key = kvm_async_pf_next_probe(key);
>> -
>> - vcpu->arch.apf.gfns[key] = gfn;
>> -}
>> -
>> -static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
>> -{
>> - int i;
>> - u32 key = kvm_async_pf_hash_fn(gfn);
>> -
>> - for (i = 0; i < ASYNC_PF_PER_VCPU &&
>> - (vcpu->arch.apf.gfns[key] != gfn &&
>> - vcpu->arch.apf.gfns[key] != ~0); i++)
>> - key = kvm_async_pf_next_probe(key);
>> -
>> - return key;
>> -}
>> -
>> -bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
>> -{
>> - return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
>> -}
>> -
>> -static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
>> -{
>> - u32 i, j, k;
>> -
>> - i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
>> -
>> - if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn))
>> - return;
>> -
>> - while (true) {
>> - vcpu->arch.apf.gfns[i] = ~0;
>> - do {
>> - j = kvm_async_pf_next_probe(j);
>> - if (vcpu->arch.apf.gfns[j] == ~0)
>> - return;
>> - k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
>> - /*
>> - * k lies cyclically in ]i,j]
>> - * | i.k.j |
>> - * |....j i.k.| or |.k..j i...|
>> - */
>> - } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
>> - vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
>> - i = j;
>> - }
>> -}
>> -
>> static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu)
>> {
>> u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT;
>> @@ -11867,7 +11793,7 @@ bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
>> struct x86_exception fault;
>>
>> trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa);
>> - kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
>> + kvm_async_pf_add_slot(vcpu, work->arch.gfn);
>>
>> if (kvm_can_deliver_async_pf(vcpu) &&
>> !apf_put_user_notpresent(vcpu)) {
>> @@ -11904,7 +11830,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
>> if (work->wakeup_all)
>> work->arch.token = ~0; /* broadcast wakeup */
>> else
>> - kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
>> + kvm_async_pf_remove_slot(vcpu, work->arch.gfn);
>> trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
>>
>> if ((work->wakeup_all || work->notpresent_injected) &&
>>
> Looks good to me
>
Ok.
Thanks,
Gavin
next prev parent reply other threads:[~2022-01-13 7:45 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-08-15 0:59 [PATCH v4 00/15] Support Asynchronous Page Fault Gavin Shan
2021-08-15 0:59 ` [PATCH v4 01/15] KVM: async_pf: Move struct kvm_async_pf around Gavin Shan
2021-11-10 15:37 ` Eric Auger
2022-01-13 7:21 ` Gavin Shan
2021-08-15 0:59 ` [PATCH v4 02/15] KVM: async_pf: Add helper function to check completion queue Gavin Shan
2021-08-16 16:53 ` Vitaly Kuznetsov
2021-08-17 10:44 ` Gavin Shan
2021-11-10 15:37 ` Eric Auger
2022-01-13 7:38 ` Gavin Shan
2021-08-15 0:59 ` [PATCH v4 03/15] KVM: async_pf: Make GFN slot management generic Gavin Shan
2021-11-10 17:00 ` Eric Auger
2022-01-13 7:42 ` Gavin Shan
2021-11-10 17:00 ` Eric Auger
2021-08-15 0:59 ` [PATCH v4 04/15] KVM: x86: Use generic async PF slot management Gavin Shan
2021-11-10 17:03 ` Eric Auger
2022-01-13 7:44 ` Gavin Shan [this message]
2021-08-15 0:59 ` [PATCH v4 05/15] KVM: arm64: Export kvm_handle_user_mem_abort() Gavin Shan
2021-11-10 18:02 ` Eric Auger
2022-01-13 7:55 ` Gavin Shan
2021-08-15 0:59 ` [PATCH v4 06/15] KVM: arm64: Add paravirtualization header files Gavin Shan
2021-11-10 18:06 ` Eric Auger
2022-01-13 8:00 ` Gavin Shan
2021-08-15 0:59 ` [PATCH v4 07/15] KVM: arm64: Support page-not-present notification Gavin Shan
2021-11-12 15:01 ` Eric Auger
2022-01-13 8:43 ` Gavin Shan
2021-08-15 0:59 ` [PATCH v4 08/15] KVM: arm64: Support page-ready notification Gavin Shan
2021-08-15 0:59 ` [PATCH v4 09/15] KVM: arm64: Support async PF hypercalls Gavin Shan
2021-08-15 0:59 ` [PATCH v4 10/15] KVM: arm64: Support async PF ioctl commands Gavin Shan
2021-08-15 0:59 ` [PATCH v4 11/15] KVM: arm64: Export async PF capability Gavin Shan
2021-08-15 0:59 ` [PATCH v4 12/15] arm64: Detect async PF para-virtualization feature Gavin Shan
2021-08-15 0:59 ` [PATCH v4 13/15] arm64: Reschedule process on aync PF Gavin Shan
2021-08-15 0:59 ` [PATCH v4 14/15] arm64: Enable async PF Gavin Shan
2021-08-16 17:05 ` Vitaly Kuznetsov
2021-08-17 10:49 ` Gavin Shan
2021-08-15 0:59 ` [PATCH v4 15/15] KVM: arm64: Add async PF document Gavin Shan
2021-11-11 10:39 ` Eric Auger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=e1dd3940-95fb-e10b-93d6-bcbc11d6bc4e@redhat.com \
--to=gshan@redhat.com \
--cc=Jonathan.Cameron@huawei.com \
--cc=eauger@redhat.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.cs.columbia.edu \
--cc=linux-kernel@vger.kernel.org \
--cc=maz@kernel.org \
--cc=pbonzini@redhat.com \
--cc=shan.gavin@gmail.com \
--cc=vkuznets@redhat.com \
--cc=will@kernel.org \
--subject='Re: [PATCH v4 04/15] KVM: x86: Use generic async PF slot management' \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).