All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
From: Jeremy Cline <jcline@redhat.com>
To: "Felix Kuehling" <Felix.Kuehling@amd.com>,
	"Alex Deucher" <alexander.deucher@amd.com>,
	"Christian König" <christian.koenig@amd.com>
Cc: David Airlie <airlied@linux.ie>,
	linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org,
	Jeremy Cline <jcline@redhat.com>,
	amd-gfx@lists.freedesktop.org, Daniel Vetter <daniel@ffwll.ch>,
	Kent Russell <kent.russell@amd.com>
Subject: [PATCH] drm/amdkfd: Fix out-of-bounds read in kdf_create_vcrat_image_cpu()
Date: Fri,  8 Jan 2021 11:31:04 -0500	[thread overview]
Message-ID: <20210108163104.411442-1-jcline@redhat.com> (raw)

KASAN reported a slab-out-of-bounds read of size 1 in
kdf_create_vcrat_image_cpu().

This occurs when, for example, when on an x86_64 with a single NUMA node
because kfd_fill_iolink_info_for_cpu() is a no-op, but afterwards the
sub_type_hdr->length, which is out-of-bounds, is read and multiplied by
entries. Fortunately, entries is 0 in this case so the overall
crat_table->length is still correct.

This refactors the helper functions to accept the crat_table directly
and calculate the table entry pointer based on the current table length.
This allows us to avoid an out-of-bounds read and hopefully makes the
pointer arithmetic clearer. It should have no functional change beyond
removing the out-of-bounds read.

Fixes: b7b6c38529c9 ("drm/amdkfd: Calculate CPU VCRAT size dynamically (v2)")
Signed-off-by: Jeremy Cline <jcline@redhat.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 86 +++++++++++++--------------
 1 file changed, 40 insertions(+), 46 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 8cac497c2c45..e50db2c0f4ee 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -829,21 +829,24 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
 /* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
  *
  *	@numa_node_id: CPU NUMA node id
- *	@avail_size: Available size in the memory
- *	@sub_type_hdr: Memory into which compute info will be filled in
+ *	@avail_size: Available space in bytes at the end of the @crat_table.
+ *	@crat_table: The CRAT table to append the Compute info to;
+ *		on success the table length and total_entries count is updated.
  *
  *	Return 0 if successful else return -ve value
  */
 static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
-				int proximity_domain,
-				struct crat_subtype_computeunit *sub_type_hdr)
+				struct crat_header *crat_table)
 {
 	const struct cpumask *cpumask;
+	struct crat_subtype_computeunit *sub_type_hdr;
 
 	*avail_size -= sizeof(struct crat_subtype_computeunit);
 	if (*avail_size < 0)
 		return -ENOMEM;
 
+	sub_type_hdr = (typeof(sub_type_hdr))((char *)crat_table +
+		crat_table->length);
 	memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
 
 	/* Fill in subtype header data */
@@ -855,36 +858,42 @@ static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
 
 	/* Fill in CU data */
 	sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
-	sub_type_hdr->proximity_domain = proximity_domain;
+	sub_type_hdr->proximity_domain = crat_table->num_domains;
 	sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
 	if (sub_type_hdr->processor_id_low == -1)
 		return -EINVAL;
 
 	sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
 
+	crat_table->length += sub_type_hdr->length;
+	crat_table->total_entries++;
+
 	return 0;
 }
 
 /* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
  *
  *	@numa_node_id: CPU NUMA node id
- *	@avail_size: Available size in the memory
- *	@sub_type_hdr: Memory into which compute info will be filled in
+ *	@avail_size: Available space in bytes at the end of the @crat_table.
+ *	@crat_table: The CRAT table to append the Memory info to;
+ *		on success the table length and total_entries count is updated.
  *
  *	Return 0 if successful else return -ve value
  */
 static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
-			int proximity_domain,
-			struct crat_subtype_memory *sub_type_hdr)
+			struct crat_header *crat_table)
 {
 	uint64_t mem_in_bytes = 0;
 	pg_data_t *pgdat;
 	int zone_type;
+	struct crat_subtype_memory *sub_type_hdr;
 
 	*avail_size -= sizeof(struct crat_subtype_memory);
 	if (*avail_size < 0)
 		return -ENOMEM;
 
+	sub_type_hdr = (typeof(sub_type_hdr))((char *)crat_table +
+		crat_table->length);
 	memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
 
 	/* Fill in subtype header data */
@@ -905,27 +914,37 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
 
 	sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
 	sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
-	sub_type_hdr->proximity_domain = proximity_domain;
+	sub_type_hdr->proximity_domain = crat_table->num_domains;
+
+	crat_table->length += sub_type_hdr->length;
+	crat_table->total_entries++;
 
 	return 0;
 }
 
 #ifdef CONFIG_X86_64
+/* kfd_fill_iolink_info_for_cpu() - Add IO link info to a Virtual CRAT
+ *
+ * @numa_node_id: The NUMA node ID for the CPU; as from for_each_online_node()
+ * @avail_size: Available space in bytes at the end of the @crat_table.
+ * @crat_table: The CRAT table to append the IO link info to; on success the
+ *              table length and total_entries count is updated.
+ *
+ * Return: 0 if successful else return -ve value
+ */
 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
-				uint32_t *num_entries,
-				struct crat_subtype_iolink *sub_type_hdr)
+				struct crat_header *crat_table)
 {
 	int nid;
 	struct cpuinfo_x86 *c = &cpu_data(0);
 	uint8_t link_type;
+	struct crat_subtype_iolink *sub_type_hdr;
 
 	if (c->x86_vendor == X86_VENDOR_AMD)
 		link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
 	else
 		link_type = CRAT_IOLINK_TYPE_QPI_1_1;
 
-	*num_entries = 0;
-
 	/* Create IO links from this node to other CPU nodes */
 	for_each_online_node(nid) {
 		if (nid == numa_node_id) /* node itself */
@@ -935,6 +954,8 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
 		if (*avail_size < 0)
 			return -ENOMEM;
 
+		sub_type_hdr = (typeof(sub_type_hdr))((char *)crat_table +
+			crat_table->length);
 		memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
 
 		/* Fill in subtype header data */
@@ -947,8 +968,8 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
 		sub_type_hdr->proximity_domain_to = nid;
 		sub_type_hdr->io_interface_type = link_type;
 
-		(*num_entries)++;
-		sub_type_hdr++;
+		crat_table->length += sub_type_hdr->length;
+		crat_table->total_entries++;
 	}
 
 	return 0;
@@ -966,12 +987,8 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
 	struct crat_header *crat_table = (struct crat_header *)pcrat_image;
 	struct acpi_table_header *acpi_table;
 	acpi_status status;
-	struct crat_subtype_generic *sub_type_hdr;
 	int avail_size = *size;
 	int numa_node_id;
-#ifdef CONFIG_X86_64
-	uint32_t entries = 0;
-#endif
 	int ret = 0;
 
 	if (!pcrat_image)
@@ -1003,48 +1020,25 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
 	crat_table->total_entries = 0;
 	crat_table->num_domains = 0;
 
-	sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
-
 	for_each_online_node(numa_node_id) {
 		if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
 			continue;
 
 		/* Fill in Subtype: Compute Unit */
-		ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
-			crat_table->num_domains,
-			(struct crat_subtype_computeunit *)sub_type_hdr);
+		ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size, crat_table);
 		if (ret < 0)
 			return ret;
-		crat_table->length += sub_type_hdr->length;
-		crat_table->total_entries++;
-
-		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
-			sub_type_hdr->length);
 
 		/* Fill in Subtype: Memory */
-		ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
-			crat_table->num_domains,
-			(struct crat_subtype_memory *)sub_type_hdr);
+		ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size, crat_table);
 		if (ret < 0)
 			return ret;
-		crat_table->length += sub_type_hdr->length;
-		crat_table->total_entries++;
-
-		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
-			sub_type_hdr->length);
 
 		/* Fill in Subtype: IO Link */
 #ifdef CONFIG_X86_64
-		ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
-				&entries,
-				(struct crat_subtype_iolink *)sub_type_hdr);
+		ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, crat_table);
 		if (ret < 0)
 			return ret;
-		crat_table->length += (sub_type_hdr->length * entries);
-		crat_table->total_entries += entries;
-
-		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
-				sub_type_hdr->length * entries);
 #else
 		pr_info("IO link not available for non x86 platforms\n");
 #endif
-- 
2.28.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

WARNING: multiple messages have this Message-ID (diff)
From: Jeremy Cline <jcline@redhat.com>
To: "Felix Kuehling" <Felix.Kuehling@amd.com>,
	"Alex Deucher" <alexander.deucher@amd.com>,
	"Christian König" <christian.koenig@amd.com>
Cc: David Airlie <airlied@linux.ie>,
	linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org,
	Jeremy Cline <jcline@redhat.com>,
	amd-gfx@lists.freedesktop.org,
	Kent Russell <kent.russell@amd.com>
Subject: [PATCH] drm/amdkfd: Fix out-of-bounds read in kdf_create_vcrat_image_cpu()
Date: Fri,  8 Jan 2021 11:31:04 -0500	[thread overview]
Message-ID: <20210108163104.411442-1-jcline@redhat.com> (raw)

KASAN reported a slab-out-of-bounds read of size 1 in
kdf_create_vcrat_image_cpu().

This occurs when, for example, when on an x86_64 with a single NUMA node
because kfd_fill_iolink_info_for_cpu() is a no-op, but afterwards the
sub_type_hdr->length, which is out-of-bounds, is read and multiplied by
entries. Fortunately, entries is 0 in this case so the overall
crat_table->length is still correct.

This refactors the helper functions to accept the crat_table directly
and calculate the table entry pointer based on the current table length.
This allows us to avoid an out-of-bounds read and hopefully makes the
pointer arithmetic clearer. It should have no functional change beyond
removing the out-of-bounds read.

Fixes: b7b6c38529c9 ("drm/amdkfd: Calculate CPU VCRAT size dynamically (v2)")
Signed-off-by: Jeremy Cline <jcline@redhat.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 86 +++++++++++++--------------
 1 file changed, 40 insertions(+), 46 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 8cac497c2c45..e50db2c0f4ee 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -829,21 +829,24 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
 /* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
  *
  *	@numa_node_id: CPU NUMA node id
- *	@avail_size: Available size in the memory
- *	@sub_type_hdr: Memory into which compute info will be filled in
+ *	@avail_size: Available space in bytes at the end of the @crat_table.
+ *	@crat_table: The CRAT table to append the Compute info to;
+ *		on success the table length and total_entries count is updated.
  *
  *	Return 0 if successful else return -ve value
  */
 static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
-				int proximity_domain,
-				struct crat_subtype_computeunit *sub_type_hdr)
+				struct crat_header *crat_table)
 {
 	const struct cpumask *cpumask;
+	struct crat_subtype_computeunit *sub_type_hdr;
 
 	*avail_size -= sizeof(struct crat_subtype_computeunit);
 	if (*avail_size < 0)
 		return -ENOMEM;
 
+	sub_type_hdr = (typeof(sub_type_hdr))((char *)crat_table +
+		crat_table->length);
 	memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
 
 	/* Fill in subtype header data */
@@ -855,36 +858,42 @@ static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
 
 	/* Fill in CU data */
 	sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
-	sub_type_hdr->proximity_domain = proximity_domain;
+	sub_type_hdr->proximity_domain = crat_table->num_domains;
 	sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
 	if (sub_type_hdr->processor_id_low == -1)
 		return -EINVAL;
 
 	sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
 
+	crat_table->length += sub_type_hdr->length;
+	crat_table->total_entries++;
+
 	return 0;
 }
 
 /* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
  *
  *	@numa_node_id: CPU NUMA node id
- *	@avail_size: Available size in the memory
- *	@sub_type_hdr: Memory into which compute info will be filled in
+ *	@avail_size: Available space in bytes at the end of the @crat_table.
+ *	@crat_table: The CRAT table to append the Memory info to;
+ *		on success the table length and total_entries count is updated.
  *
  *	Return 0 if successful else return -ve value
  */
 static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
-			int proximity_domain,
-			struct crat_subtype_memory *sub_type_hdr)
+			struct crat_header *crat_table)
 {
 	uint64_t mem_in_bytes = 0;
 	pg_data_t *pgdat;
 	int zone_type;
+	struct crat_subtype_memory *sub_type_hdr;
 
 	*avail_size -= sizeof(struct crat_subtype_memory);
 	if (*avail_size < 0)
 		return -ENOMEM;
 
+	sub_type_hdr = (typeof(sub_type_hdr))((char *)crat_table +
+		crat_table->length);
 	memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
 
 	/* Fill in subtype header data */
@@ -905,27 +914,37 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
 
 	sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
 	sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
-	sub_type_hdr->proximity_domain = proximity_domain;
+	sub_type_hdr->proximity_domain = crat_table->num_domains;
+
+	crat_table->length += sub_type_hdr->length;
+	crat_table->total_entries++;
 
 	return 0;
 }
 
 #ifdef CONFIG_X86_64
+/* kfd_fill_iolink_info_for_cpu() - Add IO link info to a Virtual CRAT
+ *
+ * @numa_node_id: The NUMA node ID for the CPU; as from for_each_online_node()
+ * @avail_size: Available space in bytes at the end of the @crat_table.
+ * @crat_table: The CRAT table to append the IO link info to; on success the
+ *              table length and total_entries count is updated.
+ *
+ * Return: 0 if successful else return -ve value
+ */
 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
-				uint32_t *num_entries,
-				struct crat_subtype_iolink *sub_type_hdr)
+				struct crat_header *crat_table)
 {
 	int nid;
 	struct cpuinfo_x86 *c = &cpu_data(0);
 	uint8_t link_type;
+	struct crat_subtype_iolink *sub_type_hdr;
 
 	if (c->x86_vendor == X86_VENDOR_AMD)
 		link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
 	else
 		link_type = CRAT_IOLINK_TYPE_QPI_1_1;
 
-	*num_entries = 0;
-
 	/* Create IO links from this node to other CPU nodes */
 	for_each_online_node(nid) {
 		if (nid == numa_node_id) /* node itself */
@@ -935,6 +954,8 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
 		if (*avail_size < 0)
 			return -ENOMEM;
 
+		sub_type_hdr = (typeof(sub_type_hdr))((char *)crat_table +
+			crat_table->length);
 		memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
 
 		/* Fill in subtype header data */
@@ -947,8 +968,8 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
 		sub_type_hdr->proximity_domain_to = nid;
 		sub_type_hdr->io_interface_type = link_type;
 
-		(*num_entries)++;
-		sub_type_hdr++;
+		crat_table->length += sub_type_hdr->length;
+		crat_table->total_entries++;
 	}
 
 	return 0;
@@ -966,12 +987,8 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
 	struct crat_header *crat_table = (struct crat_header *)pcrat_image;
 	struct acpi_table_header *acpi_table;
 	acpi_status status;
-	struct crat_subtype_generic *sub_type_hdr;
 	int avail_size = *size;
 	int numa_node_id;
-#ifdef CONFIG_X86_64
-	uint32_t entries = 0;
-#endif
 	int ret = 0;
 
 	if (!pcrat_image)
@@ -1003,48 +1020,25 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
 	crat_table->total_entries = 0;
 	crat_table->num_domains = 0;
 
-	sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
-
 	for_each_online_node(numa_node_id) {
 		if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
 			continue;
 
 		/* Fill in Subtype: Compute Unit */
-		ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
-			crat_table->num_domains,
-			(struct crat_subtype_computeunit *)sub_type_hdr);
+		ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size, crat_table);
 		if (ret < 0)
 			return ret;
-		crat_table->length += sub_type_hdr->length;
-		crat_table->total_entries++;
-
-		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
-			sub_type_hdr->length);
 
 		/* Fill in Subtype: Memory */
-		ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
-			crat_table->num_domains,
-			(struct crat_subtype_memory *)sub_type_hdr);
+		ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size, crat_table);
 		if (ret < 0)
 			return ret;
-		crat_table->length += sub_type_hdr->length;
-		crat_table->total_entries++;
-
-		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
-			sub_type_hdr->length);
 
 		/* Fill in Subtype: IO Link */
 #ifdef CONFIG_X86_64
-		ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
-				&entries,
-				(struct crat_subtype_iolink *)sub_type_hdr);
+		ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, crat_table);
 		if (ret < 0)
 			return ret;
-		crat_table->length += (sub_type_hdr->length * entries);
-		crat_table->total_entries += entries;
-
-		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
-				sub_type_hdr->length * entries);
 #else
 		pr_info("IO link not available for non x86 platforms\n");
 #endif
-- 
2.28.0

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

WARNING: multiple messages have this Message-ID (diff)
From: Jeremy Cline <jcline@redhat.com>
To: "Felix Kuehling" <Felix.Kuehling@amd.com>,
	"Alex Deucher" <alexander.deucher@amd.com>,
	"Christian König" <christian.koenig@amd.com>
Cc: David Airlie <airlied@linux.ie>, Daniel Vetter <daniel@ffwll.ch>,
	Kent Russell <kent.russell@amd.com>,
	amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org,
	linux-kernel@vger.kernel.org, Jeremy Cline <jcline@redhat.com>
Subject: [PATCH] drm/amdkfd: Fix out-of-bounds read in kdf_create_vcrat_image_cpu()
Date: Fri,  8 Jan 2021 11:31:04 -0500	[thread overview]
Message-ID: <20210108163104.411442-1-jcline@redhat.com> (raw)

KASAN reported a slab-out-of-bounds read of size 1 in
kdf_create_vcrat_image_cpu().

This occurs when, for example, when on an x86_64 with a single NUMA node
because kfd_fill_iolink_info_for_cpu() is a no-op, but afterwards the
sub_type_hdr->length, which is out-of-bounds, is read and multiplied by
entries. Fortunately, entries is 0 in this case so the overall
crat_table->length is still correct.

This refactors the helper functions to accept the crat_table directly
and calculate the table entry pointer based on the current table length.
This allows us to avoid an out-of-bounds read and hopefully makes the
pointer arithmetic clearer. It should have no functional change beyond
removing the out-of-bounds read.

Fixes: b7b6c38529c9 ("drm/amdkfd: Calculate CPU VCRAT size dynamically (v2)")
Signed-off-by: Jeremy Cline <jcline@redhat.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 86 +++++++++++++--------------
 1 file changed, 40 insertions(+), 46 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 8cac497c2c45..e50db2c0f4ee 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -829,21 +829,24 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
 /* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
  *
  *	@numa_node_id: CPU NUMA node id
- *	@avail_size: Available size in the memory
- *	@sub_type_hdr: Memory into which compute info will be filled in
+ *	@avail_size: Available space in bytes at the end of the @crat_table.
+ *	@crat_table: The CRAT table to append the Compute info to;
+ *		on success the table length and total_entries count is updated.
  *
  *	Return 0 if successful else return -ve value
  */
 static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
-				int proximity_domain,
-				struct crat_subtype_computeunit *sub_type_hdr)
+				struct crat_header *crat_table)
 {
 	const struct cpumask *cpumask;
+	struct crat_subtype_computeunit *sub_type_hdr;
 
 	*avail_size -= sizeof(struct crat_subtype_computeunit);
 	if (*avail_size < 0)
 		return -ENOMEM;
 
+	sub_type_hdr = (typeof(sub_type_hdr))((char *)crat_table +
+		crat_table->length);
 	memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
 
 	/* Fill in subtype header data */
@@ -855,36 +858,42 @@ static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
 
 	/* Fill in CU data */
 	sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
-	sub_type_hdr->proximity_domain = proximity_domain;
+	sub_type_hdr->proximity_domain = crat_table->num_domains;
 	sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
 	if (sub_type_hdr->processor_id_low == -1)
 		return -EINVAL;
 
 	sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
 
+	crat_table->length += sub_type_hdr->length;
+	crat_table->total_entries++;
+
 	return 0;
 }
 
 /* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
  *
  *	@numa_node_id: CPU NUMA node id
- *	@avail_size: Available size in the memory
- *	@sub_type_hdr: Memory into which compute info will be filled in
+ *	@avail_size: Available space in bytes at the end of the @crat_table.
+ *	@crat_table: The CRAT table to append the Memory info to;
+ *		on success the table length and total_entries count is updated.
  *
  *	Return 0 if successful else return -ve value
  */
 static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
-			int proximity_domain,
-			struct crat_subtype_memory *sub_type_hdr)
+			struct crat_header *crat_table)
 {
 	uint64_t mem_in_bytes = 0;
 	pg_data_t *pgdat;
 	int zone_type;
+	struct crat_subtype_memory *sub_type_hdr;
 
 	*avail_size -= sizeof(struct crat_subtype_memory);
 	if (*avail_size < 0)
 		return -ENOMEM;
 
+	sub_type_hdr = (typeof(sub_type_hdr))((char *)crat_table +
+		crat_table->length);
 	memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
 
 	/* Fill in subtype header data */
@@ -905,27 +914,37 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
 
 	sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
 	sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
-	sub_type_hdr->proximity_domain = proximity_domain;
+	sub_type_hdr->proximity_domain = crat_table->num_domains;
+
+	crat_table->length += sub_type_hdr->length;
+	crat_table->total_entries++;
 
 	return 0;
 }
 
 #ifdef CONFIG_X86_64
+/* kfd_fill_iolink_info_for_cpu() - Add IO link info to a Virtual CRAT
+ *
+ * @numa_node_id: The NUMA node ID for the CPU; as from for_each_online_node()
+ * @avail_size: Available space in bytes at the end of the @crat_table.
+ * @crat_table: The CRAT table to append the IO link info to; on success the
+ *              table length and total_entries count is updated.
+ *
+ * Return: 0 if successful else return -ve value
+ */
 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
-				uint32_t *num_entries,
-				struct crat_subtype_iolink *sub_type_hdr)
+				struct crat_header *crat_table)
 {
 	int nid;
 	struct cpuinfo_x86 *c = &cpu_data(0);
 	uint8_t link_type;
+	struct crat_subtype_iolink *sub_type_hdr;
 
 	if (c->x86_vendor == X86_VENDOR_AMD)
 		link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
 	else
 		link_type = CRAT_IOLINK_TYPE_QPI_1_1;
 
-	*num_entries = 0;
-
 	/* Create IO links from this node to other CPU nodes */
 	for_each_online_node(nid) {
 		if (nid == numa_node_id) /* node itself */
@@ -935,6 +954,8 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
 		if (*avail_size < 0)
 			return -ENOMEM;
 
+		sub_type_hdr = (typeof(sub_type_hdr))((char *)crat_table +
+			crat_table->length);
 		memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
 
 		/* Fill in subtype header data */
@@ -947,8 +968,8 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
 		sub_type_hdr->proximity_domain_to = nid;
 		sub_type_hdr->io_interface_type = link_type;
 
-		(*num_entries)++;
-		sub_type_hdr++;
+		crat_table->length += sub_type_hdr->length;
+		crat_table->total_entries++;
 	}
 
 	return 0;
@@ -966,12 +987,8 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
 	struct crat_header *crat_table = (struct crat_header *)pcrat_image;
 	struct acpi_table_header *acpi_table;
 	acpi_status status;
-	struct crat_subtype_generic *sub_type_hdr;
 	int avail_size = *size;
 	int numa_node_id;
-#ifdef CONFIG_X86_64
-	uint32_t entries = 0;
-#endif
 	int ret = 0;
 
 	if (!pcrat_image)
@@ -1003,48 +1020,25 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
 	crat_table->total_entries = 0;
 	crat_table->num_domains = 0;
 
-	sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
-
 	for_each_online_node(numa_node_id) {
 		if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
 			continue;
 
 		/* Fill in Subtype: Compute Unit */
-		ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
-			crat_table->num_domains,
-			(struct crat_subtype_computeunit *)sub_type_hdr);
+		ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size, crat_table);
 		if (ret < 0)
 			return ret;
-		crat_table->length += sub_type_hdr->length;
-		crat_table->total_entries++;
-
-		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
-			sub_type_hdr->length);
 
 		/* Fill in Subtype: Memory */
-		ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
-			crat_table->num_domains,
-			(struct crat_subtype_memory *)sub_type_hdr);
+		ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size, crat_table);
 		if (ret < 0)
 			return ret;
-		crat_table->length += sub_type_hdr->length;
-		crat_table->total_entries++;
-
-		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
-			sub_type_hdr->length);
 
 		/* Fill in Subtype: IO Link */
 #ifdef CONFIG_X86_64
-		ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
-				&entries,
-				(struct crat_subtype_iolink *)sub_type_hdr);
+		ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, crat_table);
 		if (ret < 0)
 			return ret;
-		crat_table->length += (sub_type_hdr->length * entries);
-		crat_table->total_entries += entries;
-
-		sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
-				sub_type_hdr->length * entries);
 #else
 		pr_info("IO link not available for non x86 platforms\n");
 #endif
-- 
2.28.0


             reply	other threads:[~2021-01-08 16:31 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-08 16:31 Jeremy Cline [this message]
2021-01-08 16:31 ` [PATCH] drm/amdkfd: Fix out-of-bounds read in kdf_create_vcrat_image_cpu() Jeremy Cline
2021-01-08 16:31 ` Jeremy Cline
2021-01-08 23:46 ` Felix Kuehling
2021-01-08 23:46   ` Felix Kuehling
2021-01-08 23:46   ` Felix Kuehling
2021-01-09  2:26   ` Jeremy Cline
2021-01-09  2:26     ` Jeremy Cline
2021-01-09  2:26     ` Jeremy Cline
2021-01-11 21:05 ` [PATCH v2] " Jeremy Cline
2021-01-11 21:05   ` Jeremy Cline
2021-01-11 21:05   ` Jeremy Cline
2021-01-11 21:46   ` Felix Kuehling
2021-01-11 21:46     ` Felix Kuehling
2021-01-11 21:46     ` Felix Kuehling

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210108163104.411442-1-jcline@redhat.com \
    --to=jcline@redhat.com \
    --cc=Felix.Kuehling@amd.com \
    --cc=airlied@linux.ie \
    --cc=alexander.deucher@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=christian.koenig@amd.com \
    --cc=daniel@ffwll.ch \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=kent.russell@amd.com \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.