LKML Archive mirror
 help / color / mirror / Atom feed
From: Konrad Dybcio <konrad.dybcio@linaro.org>
To: Andy Gross <agross@kernel.org>,
	Bjorn Andersson <andersson@kernel.org>,
	Michael Turquette <mturquette@baylibre.com>,
	Stephen Boyd <sboyd@kernel.org>,
	Georgi Djakov <djakov@kernel.org>, Leo Yan <leo.yan@linaro.org>,
	Evan Green <evgreen@chromium.org>
Cc: Marijn Suijten <marijn.suijten@somainline.org>,
	linux-arm-msm@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-clk@vger.kernel.org, linux-pm@vger.kernel.org,
	Konrad Dybcio <konrad.dybcio@linaro.org>,
	Stephan Gerhold <stephan@gerhold.net>
Subject: [PATCH v2 22/22] interconnect: qcom: icc-rpm: Fix bandwidth calculations
Date: Fri, 09 Jun 2023 22:19:27 +0200	[thread overview]
Message-ID: <20230526-topic-smd_icc-v2-22-e5934b07d813@linaro.org> (raw)
In-Reply-To: <20230526-topic-smd_icc-v2-0-e5934b07d813@linaro.org>

Up until now, we've been aggregating the bandwidth values and only
dividing them by the bus width of the source node. This was completely
wrong, as different nodes on a given path may (and usually do) have
varying bus widths.  That in turn, resulted in the calculated clock rates
being completely bogus - usually they ended up being much higher, as
NoC_A<->NoC_B links are very wide.

Since we're not using the aggregate bandwidth value for anything other
than clock rate calculations, remodel qcom_icc_bus_aggregate() to
calculate the per-context clock rate for a given provider, taking into
account the bus width of every individual node.

Fixes: 30c8fa3ec61a ("interconnect: qcom: Add MSM8916 interconnect provider driver")
Reported-by: Stephan Gerhold <stephan@gerhold.net>
Signed-off-by: Konrad Dybcio <konrad.dybcio@linaro.org>
---
 drivers/interconnect/qcom/icc-rpm.c | 59 ++++++++++++-------------------------
 1 file changed, 19 insertions(+), 40 deletions(-)

diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index 1508233632f6..d177a76abe2a 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -293,58 +293,44 @@ static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
 }
 
 /**
- * qcom_icc_bus_aggregate - aggregate bandwidth by traversing all nodes
+ * qcom_icc_bus_aggregate - calculate bus clock rates by traversing all nodes
  * @provider: generic interconnect provider
- * @agg_avg: an array for aggregated average bandwidth of buckets
- * @agg_peak: an array for aggregated peak bandwidth of buckets
- * @max_agg_avg: pointer to max value of aggregated average bandwidth
+ * @agg_clk_rate: array containing the aggregated clock rates in kHz
  */
-static void qcom_icc_bus_aggregate(struct icc_provider *provider,
-				   u64 *agg_avg, u64 *agg_peak,
-				   u64 *max_agg_avg)
+static void qcom_icc_bus_aggregate(struct icc_provider *provider, u64 *agg_clk_rate)
 {
-	struct icc_node *node;
+	u64 agg_avg_rate, agg_rate;
 	struct qcom_icc_node *qn;
-	u64 sum_avg[QCOM_SMD_RPM_STATE_NUM];
+	struct icc_node *node;
 	int i;
 
-	/* Initialise aggregate values */
-	for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) {
-		agg_avg[i] = 0;
-		agg_peak[i] = 0;
-	}
-
-	*max_agg_avg = 0;
-
 	/*
-	 * Iterate nodes on the interconnect and aggregate bandwidth
-	 * requests for every bucket.
+	 * Iterate nodes on the provider, aggregate bandwidth requests for
+	 * every bucket and convert them into bus clock rates.
 	 */
 	list_for_each_entry(node, &provider->nodes, node_list) {
 		qn = node->data;
 		for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) {
 			if (qn->channels)
-				sum_avg[i] = div_u64(qn->sum_avg[i], qn->channels);
+				agg_avg_rate = div_u64(qn->sum_avg[i], qn->channels);
 			else
-				sum_avg[i] = qn->sum_avg[i];
-			agg_avg[i] += sum_avg[i];
-			agg_peak[i] = max_t(u64, agg_peak[i], qn->max_peak[i]);
+				agg_avg_rate = qn->sum_avg[i];
+
+			agg_rate = max_t(u64, agg_avg_rate, qn->max_peak[i]);
+			do_div(agg_rate, qn->buswidth);
+
+			agg_clk_rate[i] = max_t(u64, agg_clk_rate[i], agg_rate);
 		}
 	}
-
-	/* Find maximum values across all buckets */
-	for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++)
-		*max_agg_avg = max_t(u64, *max_agg_avg, agg_avg[i]);
 }
 
 static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
 {
-	struct qcom_icc_provider *qp;
 	struct qcom_icc_node *src_qn = NULL, *dst_qn = NULL;
+	u64 agg_clk_rate[QCOM_SMD_RPM_STATE_NUM] = { 0 };
 	struct icc_provider *provider;
+	struct qcom_icc_provider *qp;
 	u64 active_rate, sleep_rate;
-	u64 agg_avg[QCOM_SMD_RPM_STATE_NUM], agg_peak[QCOM_SMD_RPM_STATE_NUM];
-	u64 max_agg_avg;
 	int ret;
 
 	src_qn = src->data;
@@ -353,7 +339,9 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
 	provider = src->provider;
 	qp = to_qcom_provider(provider);
 
-	qcom_icc_bus_aggregate(provider, agg_avg, agg_peak, &max_agg_avg);
+	qcom_icc_bus_aggregate(provider, agg_clk_rate);
+	active_rate = agg_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE];
+	sleep_rate = agg_clk_rate[QCOM_SMD_RPM_SLEEP_STATE];
 
 	ret = qcom_icc_rpm_set(src_qn, src_qn->sum_avg);
 	if (ret)
@@ -369,15 +357,6 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
 	if (!qp->bus_clk_desc && !qp->bus_clk)
 		return 0;
 
-	/* Intentionally keep the rates in kHz as that's what RPM accepts */
-	active_rate = max(agg_avg[QCOM_SMD_RPM_ACTIVE_STATE],
-			  agg_peak[QCOM_SMD_RPM_ACTIVE_STATE]);
-	do_div(active_rate, src_qn->buswidth);
-
-	sleep_rate = max(agg_avg[QCOM_SMD_RPM_SLEEP_STATE],
-			 agg_peak[QCOM_SMD_RPM_SLEEP_STATE]);
-	do_div(sleep_rate, src_qn->buswidth);
-
 	/*
 	 * Downstream checks whether the requested rate is zero, but it makes little sense
 	 * to vote for a value that's below the lower threshold, so let's not do so.

-- 
2.41.0


  parent reply	other threads:[~2023-06-09 20:21 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-06-09 20:19 [PATCH v2 00/22] Restructure RPM SMD ICC Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 01/22] soc: qcom: smd-rpm: Add QCOM_SMD_RPM_STATE_NUM Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 02/22] soc: qcom: smd-rpm: Use tabs for defines Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 03/22] clk: qcom: smd-rpm: Move some RPM resources to the common header Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 04/22] clk: qcom: smd-rpm: Export clock scaling availability Konrad Dybcio
2023-06-10 11:35   ` Stephan Gerhold
2023-06-10 12:15     ` Konrad Dybcio
2023-06-10 18:53       ` Konrad Dybcio
2023-06-10 19:25         ` Stephan Gerhold
2023-06-10 19:39           ` Konrad Dybcio
2023-06-11  9:20             ` Stephan Gerhold
2023-06-12 12:51               ` Konrad Dybcio
2023-06-12 17:03                 ` Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 05/22] interconnect: qcom: icc-rpm: Introduce keep_alive Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 06/22] interconnect: qcom: icc-rpm: Allow negative QoS offset Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 07/22] interconnect: qcom: Fold smd-rpm.h into icc-rpm.h Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 08/22] interconnect: qcom: smd-rpm: Add rpmcc handling skeleton code Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 09/22] interconnect: qcom: Add missing headers in icc-rpm.h Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 10/22] interconnect: qcom: Define RPM bus clocks Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 11/22] interconnect: qcom: sdm660: Hook up RPM bus clk definitions Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 12/22] interconnect: qcom: msm8996: " Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 13/22] interconnect: qcom: qcs404: " Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 14/22] interconnect: qcom: msm8939: " Konrad Dybcio
2023-06-10 12:02   ` Stephan Gerhold
2023-06-09 20:19 ` [PATCH v2 15/22] interconnect: qcom: msm8916: " Konrad Dybcio
2023-06-10 12:02   ` Stephan Gerhold
2023-06-09 20:19 ` [PATCH v2 16/22] interconnect: qcom: qcm2290: " Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 17/22] interconnect: qcom: icc-rpm: Control bus rpmcc from icc Konrad Dybcio
2023-06-10 11:58   ` Stephan Gerhold
2023-06-10 12:14     ` Konrad Dybcio
2023-06-10 16:20       ` Stephan Gerhold
2023-06-10 17:54         ` Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 18/22] clk: qcom: smd-rpm: Separate out interconnect bus clocks Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 19/22] interconnect: qcom: icc-rpm: Fix bucket number Konrad Dybcio
2023-06-10 17:46   ` Stephan Gerhold
2023-06-10 17:53     ` Konrad Dybcio
2023-06-09 20:19 ` [PATCH v2 20/22] interconnect: qcom: icc-rpm: Set bandwidth on both contexts Konrad Dybcio
2023-06-10 18:00   ` Stephan Gerhold
2023-06-10 18:28     ` Konrad Dybcio
2023-06-10 18:43       ` Stephan Gerhold
2023-06-09 20:19 ` [PATCH v2 21/22] interconnect: qcom: icc-rpm: Set correct bandwidth through RPM bw req Konrad Dybcio
2023-06-10 18:46   ` Stephan Gerhold
2023-06-09 20:19 ` Konrad Dybcio [this message]
2023-06-10 19:06   ` [PATCH v2 22/22] interconnect: qcom: icc-rpm: Fix bandwidth calculations Stephan Gerhold
2023-06-10 19:09     ` Konrad Dybcio

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230526-topic-smd_icc-v2-22-e5934b07d813@linaro.org \
    --to=konrad.dybcio@linaro.org \
    --cc=agross@kernel.org \
    --cc=andersson@kernel.org \
    --cc=djakov@kernel.org \
    --cc=evgreen@chromium.org \
    --cc=leo.yan@linaro.org \
    --cc=linux-arm-msm@vger.kernel.org \
    --cc=linux-clk@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pm@vger.kernel.org \
    --cc=marijn.suijten@somainline.org \
    --cc=mturquette@baylibre.com \
    --cc=sboyd@kernel.org \
    --cc=stephan@gerhold.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).