LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
* [PATCH v3 0/2] kvm: x86: kvm_emulate_*
@ 2015-03-02 19:43 Joel Schopp
  2015-03-02 19:43 ` [PATCH v3 1/2] kvm: x86: make kvm_emulate_* consistant Joel Schopp
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Joel Schopp @ 2015-03-02 19:43 UTC (permalink / raw)
  To: Gleb Natapov, Paolo Bonzini, kvm
  Cc: Joerg Roedel, Borislav Petkov, linux-kernel, David Kaplan, rkrcmar

Review comments from v1 that used kvm_emulate_wbinvd() pointed out that 
kvm_emulate_* was inconsistant in using skipping, while kvm_emulate() always
skips.  The first patch cleans up the existing use while the second patch
adds use of the updated version of kvm_emulate_wbinvd() in svm

Changes since v2:
	* fixed email subject line on series short description
	* renamed kvm_emulate_halt_noskip() to kvm_vcpu_halt()
	* added header declaration for kvm_vcpu_halt()
	* squashed blank line 
---

David Kaplan (1):
      x86: svm: make wbinvd faster

Joel Schopp (1):
      kvm: x86: make kvm_emulate_* consistant


 arch/x86/include/asm/kvm_host.h |    1 +
 arch/x86/kvm/svm.c              |   10 +++++++---
 arch/x86/kvm/vmx.c              |    9 +++------
 arch/x86/kvm/x86.c              |   23 ++++++++++++++++++++---
 4 files changed, 31 insertions(+), 12 deletions(-)

--


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v3 1/2] kvm: x86: make kvm_emulate_* consistant
  2015-03-02 19:43 [PATCH v3 0/2] kvm: x86: kvm_emulate_* Joel Schopp
@ 2015-03-02 19:43 ` Joel Schopp
  2015-03-03 16:51   ` Radim Krčmář
  2015-03-02 19:43 ` [PATCH v3 2/2] x86: svm: make wbinvd faster Joel Schopp
  2015-03-10 23:58 ` [PATCH v3 0/2] kvm: x86: kvm_emulate_* Marcelo Tosatti
  2 siblings, 1 reply; 6+ messages in thread
From: Joel Schopp @ 2015-03-02 19:43 UTC (permalink / raw)
  To: Gleb Natapov, Paolo Bonzini, kvm
  Cc: Joerg Roedel, Borislav Petkov, linux-kernel, David Kaplan, rkrcmar

Currently kvm_emulate() skips the instruction but kvm_emulate_* sometimes
don't.  The end reult is the caller ends up doing the skip themselves.
Let's make them consistant.

Signed-off-by: Joel Schopp <joel.schopp@amd.com>
---
 arch/x86/include/asm/kvm_host.h |    1 +
 arch/x86/kvm/svm.c              |    2 --
 arch/x86/kvm/vmx.c              |    9 +++------
 arch/x86/kvm/x86.c              |   23 ++++++++++++++++++++---
 4 files changed, 24 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a236e39..bf5a160 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -933,6 +933,7 @@ struct x86_emulate_ctxt;
 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port);
 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
+int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
 
 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d319e0c..0c9e377 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1929,14 +1929,12 @@ static int nop_on_interception(struct vcpu_svm *svm)
 static int halt_interception(struct vcpu_svm *svm)
 {
 	svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
-	skip_emulated_instruction(&svm->vcpu);
 	return kvm_emulate_halt(&svm->vcpu);
 }
 
 static int vmmcall_interception(struct vcpu_svm *svm)
 {
 	svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
-	skip_emulated_instruction(&svm->vcpu);
 	kvm_emulate_hypercall(&svm->vcpu);
 	return 1;
 }
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 14c1a18..eef7f53 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4995,7 +4995,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
 		if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
 			if (vcpu->arch.halt_request) {
 				vcpu->arch.halt_request = 0;
-				return kvm_emulate_halt(vcpu);
+				return kvm_vcpu_halt(vcpu);
 			}
 			return 1;
 		}
@@ -5522,13 +5522,11 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
 
 static int handle_halt(struct kvm_vcpu *vcpu)
 {
-	skip_emulated_instruction(vcpu);
 	return kvm_emulate_halt(vcpu);
 }
 
 static int handle_vmcall(struct kvm_vcpu *vcpu)
 {
-	skip_emulated_instruction(vcpu);
 	kvm_emulate_hypercall(vcpu);
 	return 1;
 }
@@ -5559,7 +5557,6 @@ static int handle_rdpmc(struct kvm_vcpu *vcpu)
 
 static int handle_wbinvd(struct kvm_vcpu *vcpu)
 {
-	skip_emulated_instruction(vcpu);
 	kvm_emulate_wbinvd(vcpu);
 	return 1;
 }
@@ -5898,7 +5895,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
 
 		if (vcpu->arch.halt_request) {
 			vcpu->arch.halt_request = 0;
-			ret = kvm_emulate_halt(vcpu);
+			ret = kvm_vcpu_halt(vcpu);
 			goto out;
 		}
 
@@ -9513,7 +9510,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 	vmcs12->launch_state = 1;
 
 	if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
-		return kvm_emulate_halt(vcpu);
+		return kvm_vcpu_halt(vcpu);
 
 	vmx->nested.nested_run_pending = 1;
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bd7a70b..6ff90f7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4706,7 +4706,7 @@ static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
 	kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
 }
 
-int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
+int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
 {
 	if (!need_emulate_wbinvd(vcpu))
 		return X86EMUL_CONTINUE;
@@ -4723,11 +4723,19 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
 		wbinvd();
 	return X86EMUL_CONTINUE;
 }
+
+int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
+{
+	kvm_x86_ops->skip_emulated_instruction(vcpu);
+	return kvm_emulate_wbinvd_noskip(vcpu);
+}
 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
 
+
+
 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
 {
-	kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
+	kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
 }
 
 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
@@ -5817,7 +5825,7 @@ void kvm_arch_exit(void)
 	free_percpu(shared_msrs);
 }
 
-int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
 {
 	++vcpu->stat.halt_exits;
 	if (irqchip_in_kernel(vcpu->kvm)) {
@@ -5828,6 +5836,13 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 		return 0;
 	}
 }
+EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
+
+int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+{
+	kvm_x86_ops->skip_emulated_instruction(vcpu);
+	return kvm_vcpu_halt(vcpu);
+}
 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
 
 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
@@ -5912,6 +5927,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 	unsigned long nr, a0, a1, a2, a3, ret;
 	int op_64_bit, r = 1;
 
+	kvm_x86_ops->skip_emulated_instruction(vcpu);
+
 	if (kvm_hv_hypercall_enabled(vcpu->kvm))
 		return kvm_hv_hypercall(vcpu);
 


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v3 2/2] x86: svm: make wbinvd faster
  2015-03-02 19:43 [PATCH v3 0/2] kvm: x86: kvm_emulate_* Joel Schopp
  2015-03-02 19:43 ` [PATCH v3 1/2] kvm: x86: make kvm_emulate_* consistant Joel Schopp
@ 2015-03-02 19:43 ` Joel Schopp
  2015-03-03 16:52   ` Radim Krčmář
  2015-03-10 23:58 ` [PATCH v3 0/2] kvm: x86: kvm_emulate_* Marcelo Tosatti
  2 siblings, 1 reply; 6+ messages in thread
From: Joel Schopp @ 2015-03-02 19:43 UTC (permalink / raw)
  To: Gleb Natapov, Paolo Bonzini, kvm
  Cc: David Kaplan, David Kaplan, rkrcmar, Joerg Roedel, linux-kernel,
	Borislav Petkov

From: David Kaplan <David.Kaplan@amd.com>

No need to re-decode WBINVD since we know what it is from the intercept.

Signed-off-by: David Kaplan <David.Kaplan@amd.com>
[extracted from larger unlrelated patch, forward ported, tested,style cleanup]
Signed-off-by: Joel Schopp <joel.schopp@amd.com>
---
 arch/x86/kvm/svm.c |    8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 0c9e377..6fa4222 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2774,6 +2774,12 @@ static int skinit_interception(struct vcpu_svm *svm)
 	return 1;
 }
 
+static int wbinvd_interception(struct vcpu_svm *svm)
+{
+	kvm_emulate_wbinvd(&svm->vcpu);
+	return 1;
+}
+
 static int xsetbv_interception(struct vcpu_svm *svm)
 {
 	u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
@@ -3374,7 +3380,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
 	[SVM_EXIT_STGI]				= stgi_interception,
 	[SVM_EXIT_CLGI]				= clgi_interception,
 	[SVM_EXIT_SKINIT]			= skinit_interception,
-	[SVM_EXIT_WBINVD]                       = emulate_on_interception,
+	[SVM_EXIT_WBINVD]                       = wbinvd_interception,
 	[SVM_EXIT_MONITOR]			= monitor_interception,
 	[SVM_EXIT_MWAIT]			= mwait_interception,
 	[SVM_EXIT_XSETBV]			= xsetbv_interception,


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 1/2] kvm: x86: make kvm_emulate_* consistant
  2015-03-02 19:43 ` [PATCH v3 1/2] kvm: x86: make kvm_emulate_* consistant Joel Schopp
@ 2015-03-03 16:51   ` Radim Krčmář
  0 siblings, 0 replies; 6+ messages in thread
From: Radim Krčmář @ 2015-03-03 16:51 UTC (permalink / raw)
  To: Joel Schopp
  Cc: Gleb Natapov, Paolo Bonzini, kvm, Joerg Roedel, Borislav Petkov,
	linux-kernel, David Kaplan

2015-03-02 13:43-0600, Joel Schopp:
> Currently kvm_emulate() skips the instruction but kvm_emulate_* sometimes
> don't.  The end reult is the caller ends up doing the skip themselves.
> Let's make them consistant.
> 
> Signed-off-by: Joel Schopp <joel.schopp@amd.com>
> ---

Reviewed-by: Radim Krčmář <rkrcmar@redhat.com>

> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> @@ -4723,11 +4723,19 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
>  		wbinvd();
>  	return X86EMUL_CONTINUE;
>  }
> +
> +int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
> +{
> +	kvm_x86_ops->skip_emulated_instruction(vcpu);
> +	return kvm_emulate_wbinvd_noskip(vcpu);
> +}
>  EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
>  
> +
> +

(sneaky newlines)

>  static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
>  {
> -	kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
> +	kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
>  }
>  
>  int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 2/2] x86: svm: make wbinvd faster
  2015-03-02 19:43 ` [PATCH v3 2/2] x86: svm: make wbinvd faster Joel Schopp
@ 2015-03-03 16:52   ` Radim Krčmář
  0 siblings, 0 replies; 6+ messages in thread
From: Radim Krčmář @ 2015-03-03 16:52 UTC (permalink / raw)
  To: Joel Schopp
  Cc: Gleb Natapov, Paolo Bonzini, kvm, David Kaplan, Joerg Roedel,
	linux-kernel, Borislav Petkov

2015-03-02 13:43-0600, Joel Schopp:
> From: David Kaplan <David.Kaplan@amd.com>
> 
> No need to re-decode WBINVD since we know what it is from the intercept.
> 
> Signed-off-by: David Kaplan <David.Kaplan@amd.com>
> [extracted from larger unlrelated patch, forward ported, tested,style cleanup]
> Signed-off-by: Joel Schopp <joel.schopp@amd.com>
> ---

Reviewed-by: Radim Krčmář <rkrcmar@redhat.com>

Thanks.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 0/2] kvm: x86: kvm_emulate_*
  2015-03-02 19:43 [PATCH v3 0/2] kvm: x86: kvm_emulate_* Joel Schopp
  2015-03-02 19:43 ` [PATCH v3 1/2] kvm: x86: make kvm_emulate_* consistant Joel Schopp
  2015-03-02 19:43 ` [PATCH v3 2/2] x86: svm: make wbinvd faster Joel Schopp
@ 2015-03-10 23:58 ` Marcelo Tosatti
  2 siblings, 0 replies; 6+ messages in thread
From: Marcelo Tosatti @ 2015-03-10 23:58 UTC (permalink / raw)
  To: Joel Schopp
  Cc: Gleb Natapov, Paolo Bonzini, kvm, Joerg Roedel, Borislav Petkov,
	linux-kernel, David Kaplan, rkrcmar

On Mon, Mar 02, 2015 at 01:43:24PM -0600, Joel Schopp wrote:
> Review comments from v1 that used kvm_emulate_wbinvd() pointed out that 
> kvm_emulate_* was inconsistant in using skipping, while kvm_emulate() always
> skips.  The first patch cleans up the existing use while the second patch
> adds use of the updated version of kvm_emulate_wbinvd() in svm
> 
> Changes since v2:
> 	* fixed email subject line on series short description
> 	* renamed kvm_emulate_halt_noskip() to kvm_vcpu_halt()
> 	* added header declaration for kvm_vcpu_halt()
> 	* squashed blank line 
> ---
> 
> David Kaplan (1):
>       x86: svm: make wbinvd faster
> 
> Joel Schopp (1):
>       kvm: x86: make kvm_emulate_* consistant
> 
> 
>  arch/x86/include/asm/kvm_host.h |    1 +
>  arch/x86/kvm/svm.c              |   10 +++++++---
>  arch/x86/kvm/vmx.c              |    9 +++------
>  arch/x86/kvm/x86.c              |   23 ++++++++++++++++++++---
>  4 files changed, 31 insertions(+), 12 deletions(-)

Applied, thanks.


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2015-03-10 23:59 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-03-02 19:43 [PATCH v3 0/2] kvm: x86: kvm_emulate_* Joel Schopp
2015-03-02 19:43 ` [PATCH v3 1/2] kvm: x86: make kvm_emulate_* consistant Joel Schopp
2015-03-03 16:51   ` Radim Krčmář
2015-03-02 19:43 ` [PATCH v3 2/2] x86: svm: make wbinvd faster Joel Schopp
2015-03-03 16:52   ` Radim Krčmář
2015-03-10 23:58 ` [PATCH v3 0/2] kvm: x86: kvm_emulate_* Marcelo Tosatti

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).