Linux-PM Archive mirror
 help / color / mirror / Atom feed
From: Sibi Sankar <quic_sibis@quicinc.com>
To: <sudeep.holla@arm.com>, <cristian.marussi@arm.com>,
	<rafael@kernel.org>, <viresh.kumar@linaro.org>,
	<morten.rasmussen@arm.com>, <dietmar.eggemann@arm.com>,
	<lukasz.luba@arm.com>, <pierre.gondois@arm.com>
Cc: <linux-arm-kernel@lists.infradead.org>,
	<linux-pm@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
	<quic_mdtipton@quicinc.com>, <linux-arm-msm@vger.kernel.org>,
	Sibi Sankar <quic_sibis@quicinc.com>
Subject: [PATCH V4 2/2] cpufreq: scmi: Register for limit change notifications
Date: Thu, 28 Mar 2024 13:11:31 +0530	[thread overview]
Message-ID: <20240328074131.2839871-3-quic_sibis@quicinc.com> (raw)
In-Reply-To: <20240328074131.2839871-1-quic_sibis@quicinc.com>

Register for limit change notifications if supported and use the throttled
frequency from the notification to apply HW pressure.

Signed-off-by: Sibi Sankar <quic_sibis@quicinc.com>
---

v4:
* Use a interim variable to show the khz calc. [Lukasz]
* Use driver_data to pass on the handle and scmi_dev instead of using
  global variables. Dropped Lukasz's Rb due to adding these minor
  changes.

 drivers/cpufreq/scmi-cpufreq.c | 44 ++++++++++++++++++++++++++++++++++
 1 file changed, 44 insertions(+)

diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 3b4f6bfb2f4c..d946b7a08258 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -21,11 +21,18 @@
 #include <linux/types.h>
 #include <linux/units.h>
 
+struct scmi_cpufreq_driver_data {
+	struct scmi_device *sdev;
+	const struct scmi_handle *handle;
+};
+
 struct scmi_data {
 	int domain_id;
 	int nr_opp;
 	struct device *cpu_dev;
+	struct cpufreq_policy *policy;
 	cpumask_var_t opp_shared_cpus;
+	struct notifier_block limit_notify_nb;
 };
 
 static struct scmi_protocol_handle *ph;
@@ -174,6 +181,22 @@ static struct freq_attr *scmi_cpufreq_hw_attr[] = {
 	NULL,
 };
 
+static int scmi_limit_notify_cb(struct notifier_block *nb, unsigned long event, void *data)
+{
+	struct scmi_data *priv = container_of(nb, struct scmi_data, limit_notify_nb);
+	struct scmi_perf_limits_report *limit_notify = data;
+	struct cpufreq_policy *policy = priv->policy;
+	unsigned int limit_freq_khz;
+
+	limit_freq_khz = limit_notify->range_max_freq / HZ_PER_KHZ;
+
+	policy->max = clamp(limit_freq_khz, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
+
+	cpufreq_update_pressure(policy);
+
+	return NOTIFY_OK;
+}
+
 static int scmi_cpufreq_init(struct cpufreq_policy *policy)
 {
 	int ret, nr_opp, domain;
@@ -181,6 +204,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
 	struct device *cpu_dev;
 	struct scmi_data *priv;
 	struct cpufreq_frequency_table *freq_table;
+	struct scmi_cpufreq_driver_data *data = cpufreq_get_driver_data();
 
 	cpu_dev = get_cpu_device(policy->cpu);
 	if (!cpu_dev) {
@@ -294,6 +318,17 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
 		}
 	}
 
+	priv->limit_notify_nb.notifier_call = scmi_limit_notify_cb;
+	ret = data->handle->notify_ops->devm_event_notifier_register(data->sdev, SCMI_PROTOCOL_PERF,
+							SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
+							&domain,
+							&priv->limit_notify_nb);
+	if (ret)
+		dev_warn(cpu_dev,
+			 "failed to register for limits change notifier for domain %d\n", domain);
+
+	priv->policy = policy;
+
 	return 0;
 
 out_free_opp:
@@ -366,12 +401,21 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
 	int ret;
 	struct device *dev = &sdev->dev;
 	const struct scmi_handle *handle;
+	struct scmi_cpufreq_driver_data *data;
 
 	handle = sdev->handle;
 
 	if (!handle)
 		return -ENODEV;
 
+	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->sdev = sdev;
+	data->handle = handle;
+	scmi_cpufreq_driver.driver_data = data;
+
 	perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph);
 	if (IS_ERR(perf_ops))
 		return PTR_ERR(perf_ops);
-- 
2.34.1


  parent reply	other threads:[~2024-03-28  7:42 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-28  7:41 [PATCH V4 0/2] firmware: arm_scmi: Register and handle limits change notification Sibi Sankar
2024-03-28  7:41 ` [PATCH V4 1/2] cpufreq: Export cpufreq_update_pressure Sibi Sankar
2024-03-28  7:41 ` Sibi Sankar [this message]
2024-03-28  8:06   ` [PATCH V4 2/2] cpufreq: scmi: Register for limit change notifications Lukasz Luba
2024-05-01  8:21   ` Cristian Marussi
2024-05-01  8:26     ` Cristian Marussi
2024-05-14  9:40     ` Sibi Sankar
2024-05-28  9:08   ` Vincent Guittot
2024-06-03 18:48     ` Sibi Sankar

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240328074131.2839871-3-quic_sibis@quicinc.com \
    --to=quic_sibis@quicinc.com \
    --cc=cristian.marussi@arm.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-arm-msm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pm@vger.kernel.org \
    --cc=lukasz.luba@arm.com \
    --cc=morten.rasmussen@arm.com \
    --cc=pierre.gondois@arm.com \
    --cc=quic_mdtipton@quicinc.com \
    --cc=rafael@kernel.org \
    --cc=sudeep.holla@arm.com \
    --cc=viresh.kumar@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).