KVM Archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] vPMU code refines
@ 2024-04-30  0:52 Dapeng Mi
  2024-04-30  0:52 ` [PATCH 1/2] KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu Dapeng Mi
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: Dapeng Mi @ 2024-04-30  0:52 UTC (permalink / raw
  To: Sean Christopherson, Paolo Bonzini
  Cc: kvm, linux-kernel, Jim Mattson, Mingwei Zhang, Xiong Zhang,
	Zhenyu Wang, Like Xu, Jinrong Liang, Dapeng Mi, Dapeng Mi

This small patchset refines the ambiguous naming in kvm_pmu structure
and use macros instead of magic numbers to manipulate FIXED_CTR_CTRL MSR
to increase readability.

No logic change is introduced in this patchset.

Dapeng Mi (2):
  KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu
  KVM: x86/pmu: Manipulate FIXED_CTR_CTRL MSR with macros

 arch/x86/include/asm/kvm_host.h | 10 ++++-----
 arch/x86/kvm/pmu.c              | 26 ++++++++++++------------
 arch/x86/kvm/pmu.h              |  8 +++++---
 arch/x86/kvm/svm/pmu.c          |  4 ++--
 arch/x86/kvm/vmx/pmu_intel.c    | 36 +++++++++++++++++++--------------
 5 files changed, 46 insertions(+), 38 deletions(-)


base-commit: 7b076c6a308ec5bce9fc96e2935443ed228b9148
-- 
2.40.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 1/2] KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu
  2024-04-30  0:52 [PATCH 0/2] vPMU code refines Dapeng Mi
@ 2024-04-30  0:52 ` Dapeng Mi
  2024-04-30  0:52 ` [PATCH 2/2] KVM: x86/pmu: Manipulate FIXED_CTR_CTRL MSR with macros Dapeng Mi
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 8+ messages in thread
From: Dapeng Mi @ 2024-04-30  0:52 UTC (permalink / raw
  To: Sean Christopherson, Paolo Bonzini
  Cc: kvm, linux-kernel, Jim Mattson, Mingwei Zhang, Xiong Zhang,
	Zhenyu Wang, Like Xu, Jinrong Liang, Dapeng Mi, Dapeng Mi

Several '_mask' suffixed variables such as, global_ctrl_mask, are
defined in kvm_pmu structure. However the _mask suffix is ambiguous and
misleading since it's not a real mask with positive logic. On the contrary
it represents the reserved bits of corresponding MSRs and these bits
should not be accessed.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
 arch/x86/include/asm/kvm_host.h | 10 +++++-----
 arch/x86/kvm/pmu.c              | 16 ++++++++--------
 arch/x86/kvm/pmu.h              |  2 +-
 arch/x86/kvm/svm/pmu.c          |  4 ++--
 arch/x86/kvm/vmx/pmu_intel.c    | 26 +++++++++++++-------------
 5 files changed, 29 insertions(+), 29 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 1d13e3cd1dc5..90edb7d30fce 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -543,12 +543,12 @@ struct kvm_pmu {
 	unsigned nr_arch_fixed_counters;
 	unsigned available_event_types;
 	u64 fixed_ctr_ctrl;
-	u64 fixed_ctr_ctrl_mask;
+	u64 fixed_ctr_ctrl_rsvd;
 	u64 global_ctrl;
 	u64 global_status;
 	u64 counter_bitmask[2];
-	u64 global_ctrl_mask;
-	u64 global_status_mask;
+	u64 global_ctrl_rsvd;
+	u64 global_status_rsvd;
 	u64 reserved_bits;
 	u64 raw_event_mask;
 	struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC];
@@ -568,9 +568,9 @@ struct kvm_pmu {
 
 	u64 ds_area;
 	u64 pebs_enable;
-	u64 pebs_enable_mask;
+	u64 pebs_enable_rsvd;
 	u64 pebs_data_cfg;
-	u64 pebs_data_cfg_mask;
+	u64 pebs_data_cfg_rsvd;
 
 	/*
 	 * If a guest counter is cross-mapped to host counter with different
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index a593b03c9aed..afbd67ca782c 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -681,13 +681,13 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		if (!msr_info->host_initiated)
 			break;
 
-		if (data & pmu->global_status_mask)
+		if (data & pmu->global_status_rsvd)
 			return 1;
 
 		pmu->global_status = data;
 		break;
 	case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
-		data &= ~pmu->global_ctrl_mask;
+		data &= ~pmu->global_ctrl_rsvd;
 		fallthrough;
 	case MSR_CORE_PERF_GLOBAL_CTRL:
 		if (!kvm_valid_perf_global_ctrl(pmu, data))
@@ -704,7 +704,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		 * GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
 		 * GLOBAL_STATUS, and so the set of reserved bits is the same.
 		 */
-		if (data & pmu->global_status_mask)
+		if (data & pmu->global_status_rsvd)
 			return 1;
 		fallthrough;
 	case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
@@ -768,11 +768,11 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
 	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
 	pmu->reserved_bits = 0xffffffff00200000ull;
 	pmu->raw_event_mask = X86_RAW_EVENT_MASK;
-	pmu->global_ctrl_mask = ~0ull;
-	pmu->global_status_mask = ~0ull;
-	pmu->fixed_ctr_ctrl_mask = ~0ull;
-	pmu->pebs_enable_mask = ~0ull;
-	pmu->pebs_data_cfg_mask = ~0ull;
+	pmu->global_ctrl_rsvd = ~0ull;
+	pmu->global_status_rsvd = ~0ull;
+	pmu->fixed_ctr_ctrl_rsvd = ~0ull;
+	pmu->pebs_enable_rsvd = ~0ull;
+	pmu->pebs_data_cfg_rsvd = ~0ull;
 	bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
 
 	if (!vcpu->kvm->arch.enable_pmu)
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 4d52b0b539ba..2eab8ea610db 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -129,7 +129,7 @@ static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
 static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
 						 u64 data)
 {
-	return !(pmu->global_ctrl_mask & data);
+	return !(pmu->global_ctrl_rsvd & data);
 }
 
 /* returns general purpose PMC with the specified MSR. Note that it can be
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index dfcc38bd97d3..6e908bdc3310 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -199,8 +199,8 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
 					 kvm_pmu_cap.num_counters_gp);
 
 	if (pmu->version > 1) {
-		pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1);
-		pmu->global_status_mask = pmu->global_ctrl_mask;
+		pmu->global_ctrl_rsvd = ~((1ull << pmu->nr_arch_gp_counters) - 1);
+		pmu->global_status_rsvd = pmu->global_ctrl_rsvd;
 	}
 
 	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index be40474de6e4..eaee9a08952e 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -348,14 +348,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
 	switch (msr) {
 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
-		if (data & pmu->fixed_ctr_ctrl_mask)
+		if (data & pmu->fixed_ctr_ctrl_rsvd)
 			return 1;
 
 		if (pmu->fixed_ctr_ctrl != data)
 			reprogram_fixed_counters(pmu, data);
 		break;
 	case MSR_IA32_PEBS_ENABLE:
-		if (data & pmu->pebs_enable_mask)
+		if (data & pmu->pebs_enable_rsvd)
 			return 1;
 
 		if (pmu->pebs_enable != data) {
@@ -371,7 +371,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		pmu->ds_area = data;
 		break;
 	case MSR_PEBS_DATA_CFG:
-		if (data & pmu->pebs_data_cfg_mask)
+		if (data & pmu->pebs_data_cfg_rsvd)
 			return 1;
 
 		pmu->pebs_data_cfg = data;
@@ -456,7 +456,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 	union cpuid10_eax eax;
 	union cpuid10_edx edx;
 	u64 perf_capabilities;
-	u64 counter_mask;
+	u64 counter_rsvd;
 	int i;
 
 	memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
@@ -502,21 +502,21 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 	}
 
 	for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
-		pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
-	counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
+		pmu->fixed_ctr_ctrl_rsvd &= ~(0xbull << (i * 4));
+	counter_rsvd = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
 		(((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
-	pmu->global_ctrl_mask = counter_mask;
+	pmu->global_ctrl_rsvd = counter_rsvd;
 
 	/*
 	 * GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET)
 	 * share reserved bit definitions.  The kernel just happens to use
 	 * OVF_CTRL for the names.
 	 */
-	pmu->global_status_mask = pmu->global_ctrl_mask
+	pmu->global_status_rsvd = pmu->global_ctrl_rsvd
 			& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
 			    MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
 	if (vmx_pt_mode_is_host_guest())
-		pmu->global_status_mask &=
+		pmu->global_status_rsvd &=
 				~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
 
 	entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
@@ -544,15 +544,15 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 
 	if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
 		if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
-			pmu->pebs_enable_mask = counter_mask;
+			pmu->pebs_enable_rsvd = counter_rsvd;
 			pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
 			for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
-				pmu->fixed_ctr_ctrl_mask &=
+				pmu->fixed_ctr_ctrl_rsvd &=
 					~(1ULL << (KVM_FIXED_PMC_BASE_IDX + i * 4));
 			}
-			pmu->pebs_data_cfg_mask = ~0xff00000full;
+			pmu->pebs_data_cfg_rsvd = ~0xff00000full;
 		} else {
-			pmu->pebs_enable_mask =
+			pmu->pebs_enable_rsvd =
 				~((1ull << pmu->nr_arch_gp_counters) - 1);
 		}
 	}
-- 
2.40.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/2] KVM: x86/pmu: Manipulate FIXED_CTR_CTRL MSR with macros
  2024-04-30  0:52 [PATCH 0/2] vPMU code refines Dapeng Mi
  2024-04-30  0:52 ` [PATCH 1/2] KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu Dapeng Mi
@ 2024-04-30  0:52 ` Dapeng Mi
  2024-04-30 18:15 ` [PATCH 0/2] vPMU code refines Mingwei Zhang
  2024-06-04 23:29 ` Sean Christopherson
  3 siblings, 0 replies; 8+ messages in thread
From: Dapeng Mi @ 2024-04-30  0:52 UTC (permalink / raw
  To: Sean Christopherson, Paolo Bonzini
  Cc: kvm, linux-kernel, Jim Mattson, Mingwei Zhang, Xiong Zhang,
	Zhenyu Wang, Like Xu, Jinrong Liang, Dapeng Mi, Dapeng Mi

Magic numbers are used to manipulate the bit fields of
FIXED_CTR_CTRL MSR. This makes reading code become difficult, so use
pre-defined macros to replace these magic numbers.

Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
---
 arch/x86/kvm/pmu.c           | 10 +++++-----
 arch/x86/kvm/pmu.h           |  6 ++++--
 arch/x86/kvm/vmx/pmu_intel.c | 12 +++++++++---
 3 files changed, 18 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index afbd67ca782c..0314a4fe8b2d 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -469,11 +469,11 @@ static int reprogram_counter(struct kvm_pmc *pmc)
 	if (pmc_is_fixed(pmc)) {
 		fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl,
 						  pmc->idx - KVM_FIXED_PMC_BASE_IDX);
-		if (fixed_ctr_ctrl & 0x1)
+		if (fixed_ctr_ctrl & INTEL_FIXED_0_KERNEL)
 			eventsel |= ARCH_PERFMON_EVENTSEL_OS;
-		if (fixed_ctr_ctrl & 0x2)
+		if (fixed_ctr_ctrl & INTEL_FIXED_0_USER)
 			eventsel |= ARCH_PERFMON_EVENTSEL_USR;
-		if (fixed_ctr_ctrl & 0x8)
+		if (fixed_ctr_ctrl & INTEL_FIXED_0_ENABLE_PMI)
 			eventsel |= ARCH_PERFMON_EVENTSEL_INT;
 		new_config = (u64)fixed_ctr_ctrl;
 	}
@@ -846,8 +846,8 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc)
 	} else {
 		config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
 					  pmc->idx - KVM_FIXED_PMC_BASE_IDX);
-		select_os = config & 0x1;
-		select_user = config & 0x2;
+		select_os = config & INTEL_FIXED_0_KERNEL;
+		select_user = config & INTEL_FIXED_0_USER;
 	}
 
 	/*
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 2eab8ea610db..d54741fe4bdd 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -14,7 +14,8 @@
 					  MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
 
 /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
-#define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
+#define fixed_ctrl_field(ctrl_reg, idx) \
+	(((ctrl_reg) >> ((idx) * INTEL_FIXED_BITS_STRIDE)) & INTEL_FIXED_BITS_MASK)
 
 #define VMWARE_BACKDOOR_PMC_HOST_TSC		0x10000
 #define VMWARE_BACKDOOR_PMC_REAL_TIME		0x10001
@@ -170,7 +171,8 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
 
 	if (pmc_is_fixed(pmc))
 		return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
-					pmc->idx - KVM_FIXED_PMC_BASE_IDX) & 0x3;
+					pmc->idx - KVM_FIXED_PMC_BASE_IDX) &
+					(INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER);
 
 	return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
 }
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index eaee9a08952e..846a4e7fd34a 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -501,8 +501,14 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 			((u64)1 << edx.split.bit_width_fixed) - 1;
 	}
 
-	for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
-		pmu->fixed_ctr_ctrl_rsvd &= ~(0xbull << (i * 4));
+	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
+		pmu->fixed_ctr_ctrl_rsvd &=
+			 ~intel_fixed_bits_by_idx(i,
+						  INTEL_FIXED_0_KERNEL |
+						  INTEL_FIXED_0_USER |
+						  INTEL_FIXED_0_ENABLE_PMI);
+	}
+
 	counter_rsvd = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
 		(((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
 	pmu->global_ctrl_rsvd = counter_rsvd;
@@ -548,7 +554,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 			pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
 			for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
 				pmu->fixed_ctr_ctrl_rsvd &=
-					~(1ULL << (KVM_FIXED_PMC_BASE_IDX + i * 4));
+					~intel_fixed_bits_by_idx(i, ICL_FIXED_0_ADAPTIVE);
 			}
 			pmu->pebs_data_cfg_rsvd = ~0xff00000full;
 		} else {
-- 
2.40.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 0/2] vPMU code refines
  2024-04-30  0:52 [PATCH 0/2] vPMU code refines Dapeng Mi
  2024-04-30  0:52 ` [PATCH 1/2] KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu Dapeng Mi
  2024-04-30  0:52 ` [PATCH 2/2] KVM: x86/pmu: Manipulate FIXED_CTR_CTRL MSR with macros Dapeng Mi
@ 2024-04-30 18:15 ` Mingwei Zhang
  2024-05-06  1:37   ` Mi, Dapeng
  2024-06-04 23:29 ` Sean Christopherson
  3 siblings, 1 reply; 8+ messages in thread
From: Mingwei Zhang @ 2024-04-30 18:15 UTC (permalink / raw
  To: Dapeng Mi
  Cc: Sean Christopherson, Paolo Bonzini, kvm, linux-kernel,
	Jim Mattson, Xiong Zhang, Zhenyu Wang, Like Xu, Jinrong Liang,
	Dapeng Mi

On Mon, Apr 29, 2024 at 5:45 PM Dapeng Mi <dapeng1.mi@linux.intel.com> wrote:
>
> This small patchset refines the ambiguous naming in kvm_pmu structure
> and use macros instead of magic numbers to manipulate FIXED_CTR_CTRL MSR
> to increase readability.
>
> No logic change is introduced in this patchset.
>
> Dapeng Mi (2):
>   KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu

So, it looks like the 1st patch is also in the upcoming RFCv2 for
mediated passthrough vPMU. I will remove that from my list then.

Thanks. Regards
-Mingwei

>   KVM: x86/pmu: Manipulate FIXED_CTR_CTRL MSR with macros
>
>  arch/x86/include/asm/kvm_host.h | 10 ++++-----
>  arch/x86/kvm/pmu.c              | 26 ++++++++++++------------
>  arch/x86/kvm/pmu.h              |  8 +++++---
>  arch/x86/kvm/svm/pmu.c          |  4 ++--
>  arch/x86/kvm/vmx/pmu_intel.c    | 36 +++++++++++++++++++--------------
>  5 files changed, 46 insertions(+), 38 deletions(-)
>
>
> base-commit: 7b076c6a308ec5bce9fc96e2935443ed228b9148
> --
> 2.40.1
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 0/2] vPMU code refines
  2024-04-30 18:15 ` [PATCH 0/2] vPMU code refines Mingwei Zhang
@ 2024-05-06  1:37   ` Mi, Dapeng
  2024-05-06  5:35     ` Mingwei Zhang
  0 siblings, 1 reply; 8+ messages in thread
From: Mi, Dapeng @ 2024-05-06  1:37 UTC (permalink / raw
  To: Mingwei Zhang
  Cc: Sean Christopherson, Paolo Bonzini, kvm, linux-kernel,
	Jim Mattson, Xiong Zhang, Zhenyu Wang, Like Xu, Jinrong Liang,
	Dapeng Mi


On 5/1/2024 2:15 AM, Mingwei Zhang wrote:
> On Mon, Apr 29, 2024 at 5:45 PM Dapeng Mi <dapeng1.mi@linux.intel.com> wrote:
>> This small patchset refines the ambiguous naming in kvm_pmu structure
>> and use macros instead of magic numbers to manipulate FIXED_CTR_CTRL MSR
>> to increase readability.
>>
>> No logic change is introduced in this patchset.
>>
>> Dapeng Mi (2):
>>   KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu
> So, it looks like the 1st patch is also in the upcoming RFCv2 for
> mediated passthrough vPMU. I will remove that from my list then.

Mingwei, we'd better keep this patch in RFCv2 until the this patchset is
merged, then we don't rebase it again when this patch is merged. Thanks.


> Thanks. Regards
> -Mingwei
>
>>   KVM: x86/pmu: Manipulate FIXED_CTR_CTRL MSR with macros
>>
>>  arch/x86/include/asm/kvm_host.h | 10 ++++-----
>>  arch/x86/kvm/pmu.c              | 26 ++++++++++++------------
>>  arch/x86/kvm/pmu.h              |  8 +++++---
>>  arch/x86/kvm/svm/pmu.c          |  4 ++--
>>  arch/x86/kvm/vmx/pmu_intel.c    | 36 +++++++++++++++++++--------------
>>  5 files changed, 46 insertions(+), 38 deletions(-)
>>
>>
>> base-commit: 7b076c6a308ec5bce9fc96e2935443ed228b9148
>> --
>> 2.40.1
>>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 0/2] vPMU code refines
  2024-05-06  1:37   ` Mi, Dapeng
@ 2024-05-06  5:35     ` Mingwei Zhang
  2024-05-06  8:01       ` Mi, Dapeng
  0 siblings, 1 reply; 8+ messages in thread
From: Mingwei Zhang @ 2024-05-06  5:35 UTC (permalink / raw
  To: Mi, Dapeng
  Cc: Sean Christopherson, Paolo Bonzini, kvm, linux-kernel,
	Jim Mattson, Xiong Zhang, Zhenyu Wang, Like Xu, Jinrong Liang,
	Dapeng Mi

On Sun, May 5, 2024 at 6:37 PM Mi, Dapeng <dapeng1.mi@linux.intel.com> wrote:
>
>
> On 5/1/2024 2:15 AM, Mingwei Zhang wrote:
> > On Mon, Apr 29, 2024 at 5:45 PM Dapeng Mi <dapeng1.mi@linux.intel.com> wrote:
> >> This small patchset refines the ambiguous naming in kvm_pmu structure
> >> and use macros instead of magic numbers to manipulate FIXED_CTR_CTRL MSR
> >> to increase readability.
> >>
> >> No logic change is introduced in this patchset.
> >>
> >> Dapeng Mi (2):
> >>   KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu
> > So, it looks like the 1st patch is also in the upcoming RFCv2 for
> > mediated passthrough vPMU. I will remove that from my list then.
>
> Mingwei, we'd better keep this patch in RFCv2 until the this patchset is
> merged, then we don't rebase it again when this patch is merged. Thanks.
>

yeah. too late. I don't want to have a duplicate patch in LKML. On the
other hand, you could have waited a little bit before sending this
one. Next time, coordinate with us before sending.

Thanks.
-Mingwei
>
> > Thanks. Regards
> > -Mingwei
> >
> >>   KVM: x86/pmu: Manipulate FIXED_CTR_CTRL MSR with macros
> >>
> >>  arch/x86/include/asm/kvm_host.h | 10 ++++-----
> >>  arch/x86/kvm/pmu.c              | 26 ++++++++++++------------
> >>  arch/x86/kvm/pmu.h              |  8 +++++---
> >>  arch/x86/kvm/svm/pmu.c          |  4 ++--
> >>  arch/x86/kvm/vmx/pmu_intel.c    | 36 +++++++++++++++++++--------------
> >>  5 files changed, 46 insertions(+), 38 deletions(-)
> >>
> >>
> >> base-commit: 7b076c6a308ec5bce9fc96e2935443ed228b9148
> >> --
> >> 2.40.1
> >>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 0/2] vPMU code refines
  2024-05-06  5:35     ` Mingwei Zhang
@ 2024-05-06  8:01       ` Mi, Dapeng
  0 siblings, 0 replies; 8+ messages in thread
From: Mi, Dapeng @ 2024-05-06  8:01 UTC (permalink / raw
  To: Mingwei Zhang
  Cc: Sean Christopherson, Paolo Bonzini, kvm, linux-kernel,
	Jim Mattson, Xiong Zhang, Zhenyu Wang, Like Xu, Jinrong Liang,
	Dapeng Mi


On 5/6/2024 1:35 PM, Mingwei Zhang wrote:
> On Sun, May 5, 2024 at 6:37 PM Mi, Dapeng <dapeng1.mi@linux.intel.com> wrote:
>>
>> On 5/1/2024 2:15 AM, Mingwei Zhang wrote:
>>> On Mon, Apr 29, 2024 at 5:45 PM Dapeng Mi <dapeng1.mi@linux.intel.com> wrote:
>>>> This small patchset refines the ambiguous naming in kvm_pmu structure
>>>> and use macros instead of magic numbers to manipulate FIXED_CTR_CTRL MSR
>>>> to increase readability.
>>>>
>>>> No logic change is introduced in this patchset.
>>>>
>>>> Dapeng Mi (2):
>>>>   KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu
>>> So, it looks like the 1st patch is also in the upcoming RFCv2 for
>>> mediated passthrough vPMU. I will remove that from my list then.
>> Mingwei, we'd better keep this patch in RFCv2 until the this patchset is
>> merged, then we don't rebase it again when this patch is merged. Thanks.
>>
> yeah. too late. I don't want to have a duplicate patch in LKML. On the
> other hand, you could have waited a little bit before sending this
> one. Next time, coordinate with us before sending.

This patch has nothing to do with the mediated vPMU patchset in theory and
can be merged earlier than the mediated vPMU patcheset which may need a
long time to review and discuss. I hope this patch can be merged ASAP and
so readers won't be mislead by the ambiguous suffix.


>
> Thanks.
> -Mingwei
>>> Thanks. Regards
>>> -Mingwei
>>>
>>>>   KVM: x86/pmu: Manipulate FIXED_CTR_CTRL MSR with macros
>>>>
>>>>  arch/x86/include/asm/kvm_host.h | 10 ++++-----
>>>>  arch/x86/kvm/pmu.c              | 26 ++++++++++++------------
>>>>  arch/x86/kvm/pmu.h              |  8 +++++---
>>>>  arch/x86/kvm/svm/pmu.c          |  4 ++--
>>>>  arch/x86/kvm/vmx/pmu_intel.c    | 36 +++++++++++++++++++--------------
>>>>  5 files changed, 46 insertions(+), 38 deletions(-)
>>>>
>>>>
>>>> base-commit: 7b076c6a308ec5bce9fc96e2935443ed228b9148
>>>> --
>>>> 2.40.1
>>>>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 0/2] vPMU code refines
  2024-04-30  0:52 [PATCH 0/2] vPMU code refines Dapeng Mi
                   ` (2 preceding siblings ...)
  2024-04-30 18:15 ` [PATCH 0/2] vPMU code refines Mingwei Zhang
@ 2024-06-04 23:29 ` Sean Christopherson
  3 siblings, 0 replies; 8+ messages in thread
From: Sean Christopherson @ 2024-06-04 23:29 UTC (permalink / raw
  To: Sean Christopherson, Paolo Bonzini, Dapeng Mi
  Cc: kvm, linux-kernel, Jim Mattson, Mingwei Zhang, Xiong Zhang,
	Zhenyu Wang, Like Xu, Jinrong Liang, Dapeng Mi

On Tue, 30 Apr 2024 08:52:37 +0800, Dapeng Mi wrote:
> This small patchset refines the ambiguous naming in kvm_pmu structure
> and use macros instead of magic numbers to manipulate FIXED_CTR_CTRL MSR
> to increase readability.
> 
> No logic change is introduced in this patchset.
> 
> Dapeng Mi (2):
>   KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu
>   KVM: x86/pmu: Manipulate FIXED_CTR_CTRL MSR with macros
> 
> [...]

Applied to kvm-x86 pmu, thanks!

[1/2] KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu
      https://github.com/kvm-x86/linux/commit/0e102ce3d413
[2/2] KVM: x86/pmu: Manipulate FIXED_CTR_CTRL MSR with macros
      https://github.com/kvm-x86/linux/commit/75430c412a31

--
https://github.com/kvm-x86/linux/tree/next

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2024-06-04 23:39 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-04-30  0:52 [PATCH 0/2] vPMU code refines Dapeng Mi
2024-04-30  0:52 ` [PATCH 1/2] KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu Dapeng Mi
2024-04-30  0:52 ` [PATCH 2/2] KVM: x86/pmu: Manipulate FIXED_CTR_CTRL MSR with macros Dapeng Mi
2024-04-30 18:15 ` [PATCH 0/2] vPMU code refines Mingwei Zhang
2024-05-06  1:37   ` Mi, Dapeng
2024-05-06  5:35     ` Mingwei Zhang
2024-05-06  8:01       ` Mi, Dapeng
2024-06-04 23:29 ` Sean Christopherson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).