Linux-Tegra Archive mirror
 help / color / mirror / Atom feed
From: Nicolin Chen <nicolinc@nvidia.com>
To: <will@kernel.org>, <robin.murphy@arm.com>
Cc: <joro@8bytes.org>, <jgg@nvidia.com>, <thierry.reding@gmail.com>,
	<vdumpa@nvidia.com>, <jonathanh@nvidia.com>,
	<linux-kernel@vger.kernel.org>, <iommu@lists.linux.dev>,
	<linux-arm-kernel@lists.infradead.org>,
	<linux-tegra@vger.kernel.org>
Subject: [PATCH v6 1/6] iommu/arm-smmu-v3: Pass in cmdq pointer to arm_smmu_cmdq_issue_cmdlist()
Date: Mon, 29 Apr 2024 21:43:44 -0700	[thread overview]
Message-ID: <0acb55059f7212abdf4277a81e2f033127072bc9.1714451595.git.nicolinc@nvidia.com> (raw)
In-Reply-To: <cover.1714451595.git.nicolinc@nvidia.com>

The driver currently calls arm_smmu_get_cmdq() helper in different places,
although they are all called from the arm_smmu_cmdq_issue_cmdlist().

Allow to pass in the cmdq pointer, instead of calling arm_smmu_get_cmdq()
every time.

This will also help CMDQV extension in NVIDIA Tegra241 SoC, as its driver
will maintain its own cmdq pointers, then need to redirect arm_smmu->cmdq
to one of its vcmdqs upon seeing a supported command.

Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
---
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 41f93c3ab160..6a7e6b1ba5f7 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -596,11 +596,11 @@ static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq,
 
 /* Wait for the command queue to become non-full */
 static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
+					     struct arm_smmu_cmdq *cmdq,
 					     struct arm_smmu_ll_queue *llq)
 {
 	unsigned long flags;
 	struct arm_smmu_queue_poll qp;
-	struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
 	int ret = 0;
 
 	/*
@@ -631,11 +631,11 @@ static int arm_smmu_cmdq_poll_until_not_full(struct arm_smmu_device *smmu,
  * Must be called with the cmdq lock held in some capacity.
  */
 static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
+					  struct arm_smmu_cmdq *cmdq,
 					  struct arm_smmu_ll_queue *llq)
 {
 	int ret = 0;
 	struct arm_smmu_queue_poll qp;
-	struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
 	u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
 
 	queue_poll_init(smmu, &qp);
@@ -655,10 +655,10 @@ static int __arm_smmu_cmdq_poll_until_msi(struct arm_smmu_device *smmu,
  * Must be called with the cmdq lock held in some capacity.
  */
 static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
+					       struct arm_smmu_cmdq *cmdq,
 					       struct arm_smmu_ll_queue *llq)
 {
 	struct arm_smmu_queue_poll qp;
-	struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
 	u32 prod = llq->prod;
 	int ret = 0;
 
@@ -705,12 +705,13 @@ static int __arm_smmu_cmdq_poll_until_consumed(struct arm_smmu_device *smmu,
 }
 
 static int arm_smmu_cmdq_poll_until_sync(struct arm_smmu_device *smmu,
+					 struct arm_smmu_cmdq *cmdq,
 					 struct arm_smmu_ll_queue *llq)
 {
 	if (smmu->options & ARM_SMMU_OPT_MSIPOLL)
-		return __arm_smmu_cmdq_poll_until_msi(smmu, llq);
+		return __arm_smmu_cmdq_poll_until_msi(smmu, cmdq, llq);
 
-	return __arm_smmu_cmdq_poll_until_consumed(smmu, llq);
+	return __arm_smmu_cmdq_poll_until_consumed(smmu, cmdq, llq);
 }
 
 static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds,
@@ -767,7 +768,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
 
 		while (!queue_has_space(&llq, n + sync)) {
 			local_irq_restore(flags);
-			if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq))
+			if (arm_smmu_cmdq_poll_until_not_full(smmu, cmdq, &llq))
 				dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
 			local_irq_save(flags);
 		}
@@ -843,7 +844,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu,
 	/* 5. If we are inserting a CMD_SYNC, we must wait for it to complete */
 	if (sync) {
 		llq.prod = queue_inc_prod_n(&llq, n);
-		ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq);
+		ret = arm_smmu_cmdq_poll_until_sync(smmu, cmdq, &llq);
 		if (ret) {
 			dev_err_ratelimited(smmu->dev,
 					    "CMD_SYNC timeout at 0x%08x [hwprod 0x%08x, hwcons 0x%08x]\n",
-- 
2.43.0


  reply	other threads:[~2024-04-30  4:44 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-04-30  4:43 [PATCH v6 0/6] Add Tegra241 (Grace) CMDQV Support (part 1/2) Nicolin Chen
2024-04-30  4:43 ` Nicolin Chen [this message]
2024-04-30 13:54   ` [PATCH v6 1/6] iommu/arm-smmu-v3: Pass in cmdq pointer to arm_smmu_cmdq_issue_cmdlist() Jason Gunthorpe
2024-04-30  4:43 ` [PATCH v6 2/6] iommu/arm-smmu-v3: Add CS_NONE quirk Nicolin Chen
2024-04-30 14:22   ` Jason Gunthorpe
2024-04-30 16:30     ` Nicolin Chen
2024-04-30 16:37       ` Jason Gunthorpe
2024-04-30 16:43         ` Nicolin Chen
2024-04-30  4:43 ` [PATCH v6 3/6] iommu/arm-smmu-v3: Make arm_smmu_cmdq_init reusable Nicolin Chen
2024-04-30 14:24   ` Jason Gunthorpe
2024-04-30 15:50     ` Nicolin Chen
2024-04-30  4:43 ` [PATCH v6 4/6] iommu/arm-smmu-v3: Make __arm_smmu_cmdq_skip_err reusable Nicolin Chen
2024-04-30 14:06   ` Jason Gunthorpe
2024-04-30 15:48     ` Nicolin Chen
2024-04-30  4:43 ` [PATCH v6 5/6] iommu/arm-smmu-v3: Add in-kernel support for NVIDIA Tegra241 (Grace) CMDQV Nicolin Chen
2024-04-30 16:35   ` Jason Gunthorpe
2024-04-30 18:08     ` Nicolin Chen
2024-05-01 13:00       ` Jason Gunthorpe
2024-05-01 17:43         ` Nicolin Chen
2024-05-02 12:41           ` Jason Gunthorpe
2024-05-02 19:26             ` Nicolin Chen
2024-04-30  4:43 ` [PATCH v6 6/6] iommu/tegra241-cmdqv: Limit CMDs for guest owned VINTF Nicolin Chen
2024-04-30 17:06   ` Jason Gunthorpe
2024-04-30 18:58     ` Nicolin Chen
2024-05-01  0:17       ` Jason Gunthorpe
2024-05-01 16:32         ` Nicolin Chen
2024-05-06  3:52         ` Nicolin Chen
2024-05-06 13:00           ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=0acb55059f7212abdf4277a81e2f033127072bc9.1714451595.git.nicolinc@nvidia.com \
    --to=nicolinc@nvidia.com \
    --cc=iommu@lists.linux.dev \
    --cc=jgg@nvidia.com \
    --cc=jonathanh@nvidia.com \
    --cc=joro@8bytes.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-tegra@vger.kernel.org \
    --cc=robin.murphy@arm.com \
    --cc=thierry.reding@gmail.com \
    --cc=vdumpa@nvidia.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).