From: Xiong Zhang <xiong.y.zhang@linux.intel.com>
To: seanjc@google.com, pbonzini@redhat.com, peterz@infradead.org,
mizhang@google.com, kan.liang@intel.com, zhenyuw@linux.intel.com,
dapeng1.mi@linux.intel.com, jmattson@google.com
Cc: kvm@vger.kernel.org, linux-perf-users@vger.kernel.org,
linux-kernel@vger.kernel.org, zhiyuan.lv@intel.com,
eranian@google.com, irogers@google.com, samantha.alt@intel.com,
like.xu.linux@gmail.com, chao.gao@intel.com,
xiong.y.zhang@linux.intel.com,
Xiong Zhang <xiong.y.zhang@intel.com>
Subject: [RFC PATCH 15/41] KVM: x86/pmu: Manage MSR interception for IA32_PERF_GLOBAL_CTRL
Date: Fri, 26 Jan 2024 16:54:18 +0800 [thread overview]
Message-ID: <20240126085444.324918-16-xiong.y.zhang@linux.intel.com> (raw)
In-Reply-To: <20240126085444.324918-1-xiong.y.zhang@linux.intel.com>
From: Xiong Zhang <xiong.y.zhang@intel.com>
In PMU passthrough mode, there are three requirements to manage
IA32_PERF_GLOBAL_CTRL:
- guest IA32_PERF_GLOBAL_CTRL MSR must be saved at vm exit.
- IA32_PERF_GLOBAL_CTRL MSR must be cleared at vm exit to avoid any
counter of running within KVM runloop.
- guest IA32_PERF_GLOBAL_CTRL MSR must be restored at vm entry.
Introduce vmx_set_perf_global_ctrl() function to auto switching
IA32_PERF_GLOBAL_CTR and invoke it after the VMM finishes setting up the
CPUID bits.
Signed-off-by: Xiong Zhang <xiong.y.zhang@intel.com>
Signed-off-by: Mingwei Zhang <mizhang@google.com>
---
arch/x86/include/asm/vmx.h | 1 +
arch/x86/kvm/vmx/vmx.c | 89 ++++++++++++++++++++++++++++++++------
arch/x86/kvm/vmx/vmx.h | 3 +-
3 files changed, 78 insertions(+), 15 deletions(-)
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 0e73616b82f3..f574e7b429a3 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -104,6 +104,7 @@
#define VM_EXIT_CLEAR_BNDCFGS 0x00800000
#define VM_EXIT_PT_CONCEAL_PIP 0x01000000
#define VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000
+#define VM_EXIT_SAVE_IA32_PERF_GLOBAL_CTRL 0x40000000
#define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 33cb69ff0804..8ab266e1e2a7 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -4387,6 +4387,74 @@ static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
return pin_based_exec_ctrl;
}
+static void vmx_set_perf_global_ctrl(struct vcpu_vmx *vmx)
+{
+ u32 vmentry_ctrl = vm_entry_controls_get(vmx);
+ u32 vmexit_ctrl = vm_exit_controls_get(vmx);
+ int i;
+
+ /*
+ * PERF_GLOBAL_CTRL is toggled dynamically in emulated vPMU.
+ */
+ if (cpu_has_perf_global_ctrl_bug() ||
+ !is_passthrough_pmu_enabled(&vmx->vcpu)) {
+ vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
+ vmexit_ctrl &= ~VM_EXIT_SAVE_IA32_PERF_GLOBAL_CTRL;
+ }
+
+ if (is_passthrough_pmu_enabled(&vmx->vcpu)) {
+ /*
+ * Setup auto restore guest PERF_GLOBAL_CTRL MSR at vm entry.
+ */
+ if (vmentry_ctrl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
+ vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, 0);
+ else {
+ i = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest,
+ MSR_CORE_PERF_GLOBAL_CTRL);
+ if (i < 0) {
+ i = vmx->msr_autoload.guest.nr++;
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT,
+ vmx->msr_autoload.guest.nr);
+ }
+ vmx->msr_autoload.guest.val[i].index = MSR_CORE_PERF_GLOBAL_CTRL;
+ vmx->msr_autoload.guest.val[i].value = 0;
+ }
+ /*
+ * Setup auto clear host PERF_GLOBAL_CTRL msr at vm exit.
+ */
+ if (vmexit_ctrl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
+ vmcs_write64(HOST_IA32_PERF_GLOBAL_CTRL, 0);
+ else {
+ i = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.host,
+ MSR_CORE_PERF_GLOBAL_CTRL);
+ if (i < 0) {
+ i = vmx->msr_autoload.host.nr++;
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT,
+ vmx->msr_autoload.host.nr);
+ }
+ vmx->msr_autoload.host.val[i].index = MSR_CORE_PERF_GLOBAL_CTRL;
+ vmx->msr_autoload.host.val[i].value = 0;
+ }
+ /*
+ * Setup auto save guest PERF_GLOBAL_CTRL msr at vm exit
+ */
+ if (!(vmexit_ctrl & VM_EXIT_SAVE_IA32_PERF_GLOBAL_CTRL)) {
+ i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest,
+ MSR_CORE_PERF_GLOBAL_CTRL);
+ if (i < 0) {
+ i = vmx->msr_autostore.guest.nr++;
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT,
+ vmx->msr_autostore.guest.nr);
+ }
+ vmx->msr_autostore.guest.val[i].index = MSR_CORE_PERF_GLOBAL_CTRL;
+ }
+ }
+
+ vm_entry_controls_set(vmx, vmentry_ctrl);
+ vm_exit_controls_set(vmx, vmexit_ctrl);
+}
+
static u32 vmx_vmentry_ctrl(void)
{
u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
@@ -4394,15 +4462,9 @@ static u32 vmx_vmentry_ctrl(void)
if (vmx_pt_mode_is_system())
vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
VM_ENTRY_LOAD_IA32_RTIT_CTL);
- /*
- * IA32e mode, and loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically.
- */
- vmentry_ctrl &= ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |
- VM_ENTRY_LOAD_IA32_EFER |
- VM_ENTRY_IA32E_MODE);
- if (cpu_has_perf_global_ctrl_bug())
- vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ /* IA32e mode, and loading of EFER is toggled dynamically. */
+ vmentry_ctrl &= ~(VM_ENTRY_LOAD_IA32_EFER | VM_ENTRY_IA32E_MODE);
return vmentry_ctrl;
}
@@ -4422,12 +4484,8 @@ static u32 vmx_vmexit_ctrl(void)
vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
VM_EXIT_CLEAR_IA32_RTIT_CTL);
- if (cpu_has_perf_global_ctrl_bug())
- vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
-
- /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
- return vmexit_ctrl &
- ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
+ /* Loading of EFER is toggled dynamically */
+ return vmexit_ctrl & ~VM_EXIT_LOAD_IA32_EFER;
}
static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
@@ -4765,6 +4823,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
vmcs_write64(VM_FUNCTION_CONTROL, 0);
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val));
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
@@ -7822,6 +7881,8 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
if (is_passthrough_pmu_enabled(&vmx->vcpu))
exec_controls_clearbit(vmx, CPU_BASED_RDPMC_EXITING);
+ vmx_set_perf_global_ctrl(vmx);
+
/* Refresh #PF interception to account for MAXPHYADDR changes. */
vmx_update_exception_bitmap(vcpu);
}
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index c2130d2c8e24..c89db35e1de8 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -502,7 +502,8 @@ static inline u8 vmx_get_rvi(void)
VM_EXIT_LOAD_IA32_EFER | \
VM_EXIT_CLEAR_BNDCFGS | \
VM_EXIT_PT_CONCEAL_PIP | \
- VM_EXIT_CLEAR_IA32_RTIT_CTL)
+ VM_EXIT_CLEAR_IA32_RTIT_CTL | \
+ VM_EXIT_SAVE_IA32_PERF_GLOBAL_CTRL)
#define KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL \
(PIN_BASED_EXT_INTR_MASK | \
--
2.34.1
next prev parent reply other threads:[~2024-01-26 8:56 UTC|newest]
Thread overview: 181+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-01-26 8:54 [RFC PATCH 00/41] KVM: x86/pmu: Introduce passthrough vPM Xiong Zhang
2024-01-26 8:54 ` [RFC PATCH 01/41] perf: x86/intel: Support PERF_PMU_CAP_VPMU_PASSTHROUGH Xiong Zhang
2024-04-11 17:04 ` Sean Christopherson
2024-04-11 17:21 ` Liang, Kan
2024-04-11 17:24 ` Jim Mattson
2024-04-11 17:46 ` Sean Christopherson
2024-04-11 19:13 ` Liang, Kan
2024-04-11 20:43 ` Sean Christopherson
2024-04-11 21:04 ` Liang, Kan
2024-04-11 19:32 ` Sean Christopherson
2024-01-26 8:54 ` [RFC PATCH 02/41] perf: Support guest enter/exit interfaces Xiong Zhang
2024-03-20 16:40 ` Raghavendra Rao Ananta
2024-03-20 17:12 ` Liang, Kan
2024-04-11 18:06 ` Sean Christopherson
2024-04-11 19:53 ` Liang, Kan
2024-04-12 19:17 ` Sean Christopherson
2024-04-12 20:56 ` Liang, Kan
2024-04-15 16:03 ` Liang, Kan
2024-04-16 5:34 ` Zhang, Xiong Y
2024-04-16 12:48 ` Liang, Kan
2024-04-17 9:42 ` Zhang, Xiong Y
2024-04-18 16:11 ` Sean Christopherson
2024-04-19 1:37 ` Zhang, Xiong Y
2024-04-26 4:09 ` Zhang, Xiong Y
2024-01-26 8:54 ` [RFC PATCH 03/41] perf: Set exclude_guest onto nmi_watchdog Xiong Zhang
2024-04-11 18:56 ` Sean Christopherson
2024-01-26 8:54 ` [RFC PATCH 04/41] perf: core/x86: Add support to register a new vector for PMI handling Xiong Zhang
2024-04-11 17:10 ` Sean Christopherson
2024-04-11 19:05 ` Sean Christopherson
2024-04-12 3:56 ` Zhang, Xiong Y
2024-04-13 1:17 ` Mi, Dapeng
2024-01-26 8:54 ` [RFC PATCH 05/41] KVM: x86/pmu: Register PMI handler for passthrough PMU Xiong Zhang
2024-04-11 19:07 ` Sean Christopherson
2024-04-12 5:44 ` Zhang, Xiong Y
2024-01-26 8:54 ` [RFC PATCH 06/41] perf: x86: Add function to switch PMI handler Xiong Zhang
2024-04-11 19:17 ` Sean Christopherson
2024-04-11 19:34 ` Sean Christopherson
2024-04-12 6:03 ` Zhang, Xiong Y
2024-04-12 5:57 ` Zhang, Xiong Y
2024-01-26 8:54 ` [RFC PATCH 07/41] perf/x86: Add interface to reflect virtual LVTPC_MASK bit onto HW Xiong Zhang
2024-04-11 19:21 ` Sean Christopherson
2024-04-12 6:17 ` Zhang, Xiong Y
2024-01-26 8:54 ` [RFC PATCH 08/41] KVM: x86/pmu: Add get virtual LVTPC_MASK bit function Xiong Zhang
2024-04-11 19:22 ` Sean Christopherson
2024-01-26 8:54 ` [RFC PATCH 09/41] perf: core/x86: Forbid PMI handler when guest own PMU Xiong Zhang
2024-04-11 19:26 ` Sean Christopherson
2024-01-26 8:54 ` [RFC PATCH 10/41] perf: core/x86: Plumb passthrough PMU capability from x86_pmu to x86_pmu_cap Xiong Zhang
2024-01-26 8:54 ` [RFC PATCH 11/41] KVM: x86/pmu: Introduce enable_passthrough_pmu module parameter and propage to KVM instance Xiong Zhang
2024-04-11 20:54 ` Sean Christopherson
2024-04-11 21:03 ` Sean Christopherson
2024-01-26 8:54 ` [RFC PATCH 12/41] KVM: x86/pmu: Plumb through passthrough PMU to vcpu for Intel CPUs Xiong Zhang
2024-04-11 20:57 ` Sean Christopherson
2024-01-26 8:54 ` [RFC PATCH 13/41] KVM: x86/pmu: Add a helper to check if passthrough PMU is enabled Xiong Zhang
2024-01-26 8:54 ` [RFC PATCH 14/41] KVM: x86/pmu: Allow RDPMC pass through Xiong Zhang
2024-01-26 8:54 ` Xiong Zhang [this message]
2024-04-11 21:21 ` [RFC PATCH 15/41] KVM: x86/pmu: Manage MSR interception for IA32_PERF_GLOBAL_CTRL Sean Christopherson
2024-04-11 22:30 ` Jim Mattson
2024-04-11 23:27 ` Sean Christopherson
2024-04-13 2:10 ` Mi, Dapeng
2024-01-26 8:54 ` [RFC PATCH 16/41] KVM: x86/pmu: Create a function prototype to disable MSR interception Xiong Zhang
2024-01-26 8:54 ` [RFC PATCH 17/41] KVM: x86/pmu: Implement pmu function for Intel CPU " Xiong Zhang
2024-01-26 8:54 ` [RFC PATCH 18/41] KVM: x86/pmu: Intercept full-width GP counter MSRs by checking with perf capabilities Xiong Zhang
2024-04-11 21:23 ` Sean Christopherson
2024-04-11 21:50 ` Jim Mattson
2024-04-12 16:01 ` Sean Christopherson
2024-01-26 8:54 ` [RFC PATCH 19/41] KVM: x86/pmu: Whitelist PMU MSRs for passthrough PMU Xiong Zhang
2024-01-26 8:54 ` [RFC PATCH 20/41] KVM: x86/pmu: Introduce PMU operation prototypes for save/restore PMU context Xiong Zhang
2024-01-26 8:54 ` [RFC PATCH 21/41] KVM: x86/pmu: Introduce function prototype for Intel CPU to " Xiong Zhang
2024-01-26 8:54 ` [RFC PATCH 22/41] x86: Introduce MSR_CORE_PERF_GLOBAL_STATUS_SET for passthrough PMU Xiong Zhang
2024-01-26 8:54 ` [RFC PATCH 23/41] KVM: x86/pmu: Implement the save/restore of PMU state for Intel CPU Xiong Zhang
2024-04-11 21:26 ` Sean Christopherson
2024-04-13 2:29 ` Mi, Dapeng
2024-04-11 21:44 ` Sean Christopherson
2024-04-11 22:19 ` Jim Mattson
2024-04-11 23:31 ` Sean Christopherson
2024-04-13 3:19 ` Mi, Dapeng
2024-04-13 3:03 ` Mi, Dapeng
2024-04-13 3:34 ` Mingwei Zhang
2024-04-13 4:25 ` Mi, Dapeng
2024-04-15 6:06 ` Mingwei Zhang
2024-04-15 10:04 ` Mi, Dapeng
2024-04-15 16:44 ` Mingwei Zhang
2024-04-15 17:38 ` Sean Christopherson
2024-04-15 17:54 ` Mingwei Zhang
2024-04-15 22:45 ` Sean Christopherson
2024-04-22 2:14 ` maobibo
2024-04-22 17:01 ` Sean Christopherson
2024-04-23 1:01 ` maobibo
2024-04-23 2:44 ` Mi, Dapeng
2024-04-23 2:53 ` maobibo
2024-04-23 3:13 ` Mi, Dapeng
2024-04-23 3:26 ` maobibo
2024-04-23 3:59 ` Mi, Dapeng
2024-04-23 3:55 ` maobibo
2024-04-23 4:23 ` Mingwei Zhang
2024-04-23 6:08 ` maobibo
2024-04-23 6:45 ` Mi, Dapeng
2024-04-23 7:10 ` Mingwei Zhang
2024-04-23 8:24 ` Mi, Dapeng
2024-04-23 8:51 ` maobibo
2024-04-23 16:50 ` Mingwei Zhang
2024-04-23 12:12 ` maobibo
2024-04-23 17:02 ` Mingwei Zhang
2024-04-24 1:07 ` maobibo
2024-04-24 8:18 ` Mi, Dapeng
2024-04-24 15:00 ` Sean Christopherson
2024-04-25 3:55 ` Mi, Dapeng
2024-04-25 4:24 ` Mingwei Zhang
2024-04-25 16:13 ` Liang, Kan
2024-04-25 20:16 ` Mingwei Zhang
2024-04-25 20:43 ` Liang, Kan
2024-04-25 21:46 ` Sean Christopherson
2024-04-26 1:46 ` Mi, Dapeng
2024-04-26 3:12 ` Mingwei Zhang
2024-04-26 4:02 ` Mi, Dapeng
2024-04-26 4:46 ` Mingwei Zhang
2024-04-26 14:09 ` Liang, Kan
2024-04-26 18:41 ` Mingwei Zhang
2024-04-26 19:06 ` Liang, Kan
2024-04-26 19:46 ` Sean Christopherson
2024-04-27 3:04 ` Mingwei Zhang
2024-04-28 0:58 ` Mi, Dapeng
2024-04-28 6:01 ` Mingwei Zhang
2024-04-29 17:44 ` Sean Christopherson
2024-05-01 17:43 ` Mingwei Zhang
2024-05-01 18:00 ` Liang, Kan
2024-05-01 20:36 ` Sean Christopherson
2024-04-29 13:08 ` Liang, Kan
2024-04-26 13:53 ` Liang, Kan
2024-04-26 1:50 ` Mi, Dapeng
2024-04-18 21:21 ` Mingwei Zhang
2024-04-18 21:41 ` Mingwei Zhang
2024-04-19 1:02 ` Mi, Dapeng
2024-01-26 8:54 ` [RFC PATCH 24/41] KVM: x86/pmu: Zero out unexposed Counters/Selectors to avoid information leakage Xiong Zhang
2024-04-11 21:36 ` Sean Christopherson
2024-04-11 21:56 ` Jim Mattson
2024-01-26 8:54 ` [RFC PATCH 25/41] KVM: x86/pmu: Introduce macro PMU_CAP_PERF_METRICS Xiong Zhang
2024-01-26 8:54 ` [RFC PATCH 26/41] KVM: x86/pmu: Add host_perf_cap field in kvm_caps to record host PMU capability Xiong Zhang
2024-04-11 21:49 ` Sean Christopherson
2024-01-26 8:54 ` [RFC PATCH 27/41] KVM: x86/pmu: Clear PERF_METRICS MSR for guest Xiong Zhang
2024-04-11 21:50 ` Sean Christopherson
2024-04-13 3:29 ` Mi, Dapeng
2024-01-26 8:54 ` [RFC PATCH 28/41] KVM: x86/pmu: Switch IA32_PERF_GLOBAL_CTRL at VM boundary Xiong Zhang
2024-04-11 21:54 ` Sean Christopherson
2024-04-11 22:10 ` Jim Mattson
2024-04-11 22:54 ` Sean Christopherson
2024-04-11 23:08 ` Jim Mattson
2024-01-26 8:54 ` [RFC PATCH 29/41] KVM: x86/pmu: Exclude existing vLBR logic from the passthrough PMU Xiong Zhang
2024-01-26 8:54 ` [RFC PATCH 30/41] KVM: x86/pmu: Switch PMI handler at KVM context switch boundary Xiong Zhang
2024-01-26 8:54 ` [RFC PATCH 31/41] KVM: x86/pmu: Call perf_guest_enter() at PMU context switch Xiong Zhang
2024-01-26 8:54 ` [RFC PATCH 32/41] KVM: x86/pmu: Add support for PMU context switch at VM-exit/enter Xiong Zhang
2024-01-26 8:54 ` [RFC PATCH 33/41] KVM: x86/pmu: Make check_pmu_event_filter() an exported function Xiong Zhang
2024-01-26 8:54 ` [RFC PATCH 34/41] KVM: x86/pmu: Intercept EVENT_SELECT MSR Xiong Zhang
2024-04-11 21:55 ` Sean Christopherson
2024-01-26 8:54 ` [RFC PATCH 35/41] KVM: x86/pmu: Allow writing to event selector for GP counters if event is allowed Xiong Zhang
2024-01-26 8:54 ` [RFC PATCH 36/41] KVM: x86/pmu: Intercept FIXED_CTR_CTRL MSR Xiong Zhang
2024-04-11 21:56 ` Sean Christopherson
2024-01-26 8:54 ` [RFC PATCH 37/41] KVM: x86/pmu: Allow writing to fixed counter selector if counter is exposed Xiong Zhang
2024-04-11 22:03 ` Sean Christopherson
2024-04-13 4:12 ` Mi, Dapeng
2024-01-26 8:54 ` [RFC PATCH 38/41] KVM: x86/pmu: Introduce PMU helper to increment counter Xiong Zhang
2024-01-26 8:54 ` [RFC PATCH 39/41] KVM: x86/pmu: Implement emulated counter increment for passthrough PMU Xiong Zhang
2024-04-11 23:12 ` Sean Christopherson
2024-04-11 23:17 ` Sean Christopherson
2024-01-26 8:54 ` [RFC PATCH 40/41] KVM: x86/pmu: Separate passthrough PMU logic in set/get_msr() from non-passthrough vPMU Xiong Zhang
2024-04-11 23:18 ` Sean Christopherson
2024-04-18 21:54 ` Mingwei Zhang
2024-01-26 8:54 ` [RFC PATCH 41/41] KVM: nVMX: Add nested virtualization support for passthrough PMU Xiong Zhang
2024-04-11 23:21 ` Sean Christopherson
2024-04-11 17:03 ` [RFC PATCH 00/41] KVM: x86/pmu: Introduce passthrough vPM Sean Christopherson
2024-04-12 2:19 ` Zhang, Xiong Y
2024-04-12 18:32 ` Sean Christopherson
2024-04-15 1:06 ` Zhang, Xiong Y
2024-04-15 15:05 ` Sean Christopherson
2024-04-16 5:11 ` Zhang, Xiong Y
2024-04-18 20:46 ` Mingwei Zhang
2024-04-18 21:52 ` Mingwei Zhang
2024-04-19 19:14 ` Sean Christopherson
2024-04-19 22:02 ` Mingwei Zhang
2024-04-11 23:25 ` Sean Christopherson
2024-04-11 23:56 ` Mingwei Zhang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240126085444.324918-16-xiong.y.zhang@linux.intel.com \
--to=xiong.y.zhang@linux.intel.com \
--cc=chao.gao@intel.com \
--cc=dapeng1.mi@linux.intel.com \
--cc=eranian@google.com \
--cc=irogers@google.com \
--cc=jmattson@google.com \
--cc=kan.liang@intel.com \
--cc=kvm@vger.kernel.org \
--cc=like.xu.linux@gmail.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=mizhang@google.com \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=samantha.alt@intel.com \
--cc=seanjc@google.com \
--cc=xiong.y.zhang@intel.com \
--cc=zhenyuw@linux.intel.com \
--cc=zhiyuan.lv@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).