From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8E0D6C43381 for ; Thu, 7 Jan 2021 09:30:52 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 506582333C for ; Thu, 7 Jan 2021 09:30:52 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727871AbhAGJad (ORCPT ); Thu, 7 Jan 2021 04:30:33 -0500 Received: from szxga05-in.huawei.com ([45.249.212.191]:10556 "EHLO szxga05-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727785AbhAGJab (ORCPT ); Thu, 7 Jan 2021 04:30:31 -0500 Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.60]) by szxga05-in.huawei.com (SkyGuard) with ESMTP id 4DBLWb5xgWzMGSd; Thu, 7 Jan 2021 17:28:27 +0800 (CST) Received: from DESKTOP-5IS4806.china.huawei.com (10.174.184.42) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Thu, 7 Jan 2021 17:29:29 +0800 From: Keqian Zhu To: , , , , , Alex Williamson , Kirti Wankhede , Cornelia Huck , Will Deacon , Marc Zyngier , Catalin Marinas CC: Mark Rutland , James Morse , Robin Murphy , Joerg Roedel , "Daniel Lezcano" , Thomas Gleixner , Suzuki K Poulose , Julien Thierry , Andrew Morton , Alexios Zavras , , Subject: [PATCH 1/5] vfio/iommu_type1: Fixes vfio_dma_populate_bitmap to avoid dirty lose Date: Thu, 7 Jan 2021 17:28:57 +0800 Message-ID: <20210107092901.19712-2-zhukeqian1@huawei.com> X-Mailer: git-send-email 2.8.4.windows.1 In-Reply-To: <20210107092901.19712-1-zhukeqian1@huawei.com> References: <20210107092901.19712-1-zhukeqian1@huawei.com> MIME-Version: 1.0 Content-Type: text/plain X-Originating-IP: [10.174.184.42] X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Defer checking whether vfio_dma is of fully-dirty in update_user_bitmap is easy to lose dirty log. For example, after promoting pinned_scope of vfio_iommu, vfio_dma is not considered as fully-dirty, then we may lose dirty log that occurs before vfio_iommu is promoted. The key point is that pinned-dirty is not a real dirty tracking way, it can't continuously track dirty pages, but just restrict dirty scope. It is essentially the same as fully-dirty. Fully-dirty is of full-scope and pinned-dirty is of pinned-scope. So we must mark pinned-dirty or fully-dirty after we start dirty tracking or clear dirty bitmap, to ensure that dirty log is marked right away. Fixes: d6a4c185660c ("vfio iommu: Implementation of ioctl for dirty pages tracking") Signed-off-by: Keqian Zhu --- drivers/vfio/vfio_iommu_type1.c | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index bceda5e8baaa..b0a26e8e0adf 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -224,7 +224,7 @@ static void vfio_dma_bitmap_free(struct vfio_dma *dma) dma->bitmap = NULL; } -static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize) +static void vfio_dma_populate_bitmap_pinned(struct vfio_dma *dma, size_t pgsize) { struct rb_node *p; unsigned long pgshift = __ffs(pgsize); @@ -236,6 +236,25 @@ static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize) } } +static void vfio_dma_populate_bitmap_full(struct vfio_dma *dma, size_t pgsize) +{ + unsigned long pgshift = __ffs(pgsize); + unsigned long nbits = dma->size >> pgshift; + + bitmap_set(dma->bitmap, 0, nbits); +} + +static void vfio_dma_populate_bitmap(struct vfio_iommu *iommu, + struct vfio_dma *dma) +{ + size_t pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap); + + if (iommu->pinned_page_dirty_scope) + vfio_dma_populate_bitmap_pinned(dma, pgsize); + else if (dma->iommu_mapped) + vfio_dma_populate_bitmap_full(dma, pgsize); +} + static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu) { struct rb_node *n; @@ -257,7 +276,7 @@ static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu) } return ret; } - vfio_dma_populate_bitmap(dma, pgsize); + vfio_dma_populate_bitmap(iommu, dma); } return 0; } @@ -987,13 +1006,6 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, unsigned long shift = bit_offset % BITS_PER_LONG; unsigned long leftover; - /* - * mark all pages dirty if any IOMMU capable device is not able - * to report dirty pages and all pages are pinned and mapped. - */ - if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped) - bitmap_set(dma->bitmap, 0, nbits); - if (shift) { bitmap_shift_left(dma->bitmap, dma->bitmap, shift, nbits + shift); @@ -1019,7 +1031,6 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, struct vfio_dma *dma; struct rb_node *n; unsigned long pgshift = __ffs(iommu->pgsize_bitmap); - size_t pgsize = (size_t)1 << pgshift; int ret; /* @@ -1055,7 +1066,7 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, * pages which are marked dirty by vfio_dma_rw() */ bitmap_clear(dma->bitmap, 0, dma->size >> pgshift); - vfio_dma_populate_bitmap(dma, pgsize); + vfio_dma_populate_bitmap(iommu, dma); } return 0; } -- 2.19.1 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id CA1E1C433E9 for ; Thu, 7 Jan 2021 09:29:48 +0000 (UTC) Received: from hemlock.osuosl.org (smtp2.osuosl.org [140.211.166.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 6634223356 for ; Thu, 7 Jan 2021 09:29:48 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 6634223356 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=huawei.com Authentication-Results: mail.kernel.org; spf=pass smtp.mailfrom=iommu-bounces@lists.linux-foundation.org Received: from localhost (localhost [127.0.0.1]) by hemlock.osuosl.org (Postfix) with ESMTP id 3C8CF87359; Thu, 7 Jan 2021 09:29:48 +0000 (UTC) X-Virus-Scanned: amavisd-new at osuosl.org Received: from hemlock.osuosl.org ([127.0.0.1]) by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id QHDFcB36GIpL; Thu, 7 Jan 2021 09:29:46 +0000 (UTC) Received: from lists.linuxfoundation.org (lf-lists.osuosl.org [140.211.9.56]) by hemlock.osuosl.org (Postfix) with ESMTP id 2540286190; Thu, 7 Jan 2021 09:29:46 +0000 (UTC) Received: from lf-lists.osuosl.org (localhost [127.0.0.1]) by lists.linuxfoundation.org (Postfix) with ESMTP id 10712C0891; Thu, 7 Jan 2021 09:29:46 +0000 (UTC) Received: from whitealder.osuosl.org (smtp1.osuosl.org [140.211.166.138]) by lists.linuxfoundation.org (Postfix) with ESMTP id BA747C013A for ; Thu, 7 Jan 2021 09:29:44 +0000 (UTC) Received: from localhost (localhost [127.0.0.1]) by whitealder.osuosl.org (Postfix) with ESMTP id B51DA86A3B for ; Thu, 7 Jan 2021 09:29:44 +0000 (UTC) X-Virus-Scanned: amavisd-new at osuosl.org Received: from whitealder.osuosl.org ([127.0.0.1]) by localhost (.osuosl.org [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id SIXfNwX83TLj for ; Thu, 7 Jan 2021 09:29:43 +0000 (UTC) X-Greylist: domain auto-whitelisted by SQLgrey-1.7.6 Received: from szxga05-in.huawei.com (szxga05-in.huawei.com [45.249.212.191]) by whitealder.osuosl.org (Postfix) with ESMTPS id D882386A31 for ; Thu, 7 Jan 2021 09:29:42 +0000 (UTC) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.60]) by szxga05-in.huawei.com (SkyGuard) with ESMTP id 4DBLWb5xgWzMGSd; Thu, 7 Jan 2021 17:28:27 +0800 (CST) Received: from DESKTOP-5IS4806.china.huawei.com (10.174.184.42) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Thu, 7 Jan 2021 17:29:29 +0800 From: Keqian Zhu To: , , , , , Alex Williamson , Kirti Wankhede , Cornelia Huck , Will Deacon , Marc Zyngier , Catalin Marinas Subject: [PATCH 1/5] vfio/iommu_type1: Fixes vfio_dma_populate_bitmap to avoid dirty lose Date: Thu, 7 Jan 2021 17:28:57 +0800 Message-ID: <20210107092901.19712-2-zhukeqian1@huawei.com> X-Mailer: git-send-email 2.8.4.windows.1 In-Reply-To: <20210107092901.19712-1-zhukeqian1@huawei.com> References: <20210107092901.19712-1-zhukeqian1@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.174.184.42] X-CFilter-Loop: Reflected Cc: Mark Rutland , jiangkunkun@huawei.com, Suzuki K Poulose , Daniel Lezcano , Alexios Zavras , James Morse , wanghaibin.wang@huawei.com, Thomas Gleixner , Robin Murphy , Andrew Morton , Julien Thierry X-BeenThere: iommu@lists.linux-foundation.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: Development issues for Linux IOMMU support List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: iommu-bounces@lists.linux-foundation.org Sender: "iommu" Defer checking whether vfio_dma is of fully-dirty in update_user_bitmap is easy to lose dirty log. For example, after promoting pinned_scope of vfio_iommu, vfio_dma is not considered as fully-dirty, then we may lose dirty log that occurs before vfio_iommu is promoted. The key point is that pinned-dirty is not a real dirty tracking way, it can't continuously track dirty pages, but just restrict dirty scope. It is essentially the same as fully-dirty. Fully-dirty is of full-scope and pinned-dirty is of pinned-scope. So we must mark pinned-dirty or fully-dirty after we start dirty tracking or clear dirty bitmap, to ensure that dirty log is marked right away. Fixes: d6a4c185660c ("vfio iommu: Implementation of ioctl for dirty pages tracking") Signed-off-by: Keqian Zhu --- drivers/vfio/vfio_iommu_type1.c | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index bceda5e8baaa..b0a26e8e0adf 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -224,7 +224,7 @@ static void vfio_dma_bitmap_free(struct vfio_dma *dma) dma->bitmap = NULL; } -static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize) +static void vfio_dma_populate_bitmap_pinned(struct vfio_dma *dma, size_t pgsize) { struct rb_node *p; unsigned long pgshift = __ffs(pgsize); @@ -236,6 +236,25 @@ static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize) } } +static void vfio_dma_populate_bitmap_full(struct vfio_dma *dma, size_t pgsize) +{ + unsigned long pgshift = __ffs(pgsize); + unsigned long nbits = dma->size >> pgshift; + + bitmap_set(dma->bitmap, 0, nbits); +} + +static void vfio_dma_populate_bitmap(struct vfio_iommu *iommu, + struct vfio_dma *dma) +{ + size_t pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap); + + if (iommu->pinned_page_dirty_scope) + vfio_dma_populate_bitmap_pinned(dma, pgsize); + else if (dma->iommu_mapped) + vfio_dma_populate_bitmap_full(dma, pgsize); +} + static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu) { struct rb_node *n; @@ -257,7 +276,7 @@ static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu) } return ret; } - vfio_dma_populate_bitmap(dma, pgsize); + vfio_dma_populate_bitmap(iommu, dma); } return 0; } @@ -987,13 +1006,6 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, unsigned long shift = bit_offset % BITS_PER_LONG; unsigned long leftover; - /* - * mark all pages dirty if any IOMMU capable device is not able - * to report dirty pages and all pages are pinned and mapped. - */ - if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped) - bitmap_set(dma->bitmap, 0, nbits); - if (shift) { bitmap_shift_left(dma->bitmap, dma->bitmap, shift, nbits + shift); @@ -1019,7 +1031,6 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, struct vfio_dma *dma; struct rb_node *n; unsigned long pgshift = __ffs(iommu->pgsize_bitmap); - size_t pgsize = (size_t)1 << pgshift; int ret; /* @@ -1055,7 +1066,7 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, * pages which are marked dirty by vfio_dma_rw() */ bitmap_clear(dma->bitmap, 0, dma->size >> pgshift); - vfio_dma_populate_bitmap(dma, pgsize); + vfio_dma_populate_bitmap(iommu, dma); } return 0; } -- 2.19.1 _______________________________________________ iommu mailing list iommu@lists.linux-foundation.org https://lists.linuxfoundation.org/mailman/listinfo/iommu From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-17.0 required=3.0 tests=BAYES_00,DKIMWL_WL_HIGH, DKIM_SIGNED,DKIM_VALID,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER, INCLUDES_PATCH,MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED, USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 5F8B9C433DB for ; Thu, 7 Jan 2021 09:31:28 +0000 (UTC) Received: from merlin.infradead.org (merlin.infradead.org [205.233.59.134]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 182002333E for ; Thu, 7 Jan 2021 09:31:27 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 182002333E Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=huawei.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=linux-arm-kernel-bounces+linux-arm-kernel=archiver.kernel.org@lists.infradead.org DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=lists.infradead.org; s=merlin.20170209; h=Sender:Content-Transfer-Encoding: Content-Type:Cc:List-Subscribe:List-Help:List-Post:List-Archive: List-Unsubscribe:List-Id:MIME-Version:References:In-Reply-To:Message-ID:Date: Subject:To:From:Reply-To:Content-ID:Content-Description:Resent-Date: Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Owner; bh=MeALkjAOMMEOiMftwBVXrDsp6AoUG46qQ6yGcE0YOlM=; b=3S5IPEy50ks4oSTpzGjjFTW45 Q1YElvNVOOAnFoHHgDOHojJYlFx8nW3zW+vvL+jHDLl6JVTQrAXUNkm1ryyV7vrBJkZv6ziZky8IA 8HrevKQ/caIcV7LtyZaT1i05i951pQPYAbmHQK2UNDd5pqHgaP9N3A5Kb/FzRRoK57xZOrT/6ViSo 3Z2QU+rx3hmy5Kr78XH+QEPTVRJC0wUZsaN6lSmm0+JM4gffpbqStxdnfpXkdaEMfUaIt87ATayWf Yk6so3NknXyqt7lwmhS1lUevGA6CWrzDp7LSPOw8nz3UuzQ4ljJQSgEbwLS21IlUL5A/s/KPEhzD9 4bSP++g4w==; Received: from localhost ([::1] helo=merlin.infradead.org) by merlin.infradead.org with esmtp (Exim 4.92.3 #3 (Red Hat Linux)) id 1kxRcE-0001eh-Ie; Thu, 07 Jan 2021 09:29:58 +0000 Received: from szxga05-in.huawei.com ([45.249.212.191]) by merlin.infradead.org with esmtps (Exim 4.92.3 #3 (Red Hat Linux)) id 1kxRc3-0001Yx-Bs for linux-arm-kernel@lists.infradead.org; Thu, 07 Jan 2021 09:29:51 +0000 Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.60]) by szxga05-in.huawei.com (SkyGuard) with ESMTP id 4DBLWb5xgWzMGSd; Thu, 7 Jan 2021 17:28:27 +0800 (CST) Received: from DESKTOP-5IS4806.china.huawei.com (10.174.184.42) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Thu, 7 Jan 2021 17:29:29 +0800 From: Keqian Zhu To: , , , , , Alex Williamson , Kirti Wankhede , Cornelia Huck , Will Deacon , Marc Zyngier , Catalin Marinas Subject: [PATCH 1/5] vfio/iommu_type1: Fixes vfio_dma_populate_bitmap to avoid dirty lose Date: Thu, 7 Jan 2021 17:28:57 +0800 Message-ID: <20210107092901.19712-2-zhukeqian1@huawei.com> X-Mailer: git-send-email 2.8.4.windows.1 In-Reply-To: <20210107092901.19712-1-zhukeqian1@huawei.com> References: <20210107092901.19712-1-zhukeqian1@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.174.184.42] X-CFilter-Loop: Reflected X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 X-CRM114-CacheID: sfid-20210107_042949_495650_09EDB6BD X-CRM114-Status: GOOD ( 14.10 ) X-BeenThere: linux-arm-kernel@lists.infradead.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Mark Rutland , jiangkunkun@huawei.com, Suzuki K Poulose , Joerg Roedel , Daniel Lezcano , Alexios Zavras , James Morse , wanghaibin.wang@huawei.com, Thomas Gleixner , Robin Murphy , Andrew Morton , Julien Thierry Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Sender: "linux-arm-kernel" Errors-To: linux-arm-kernel-bounces+linux-arm-kernel=archiver.kernel.org@lists.infradead.org Defer checking whether vfio_dma is of fully-dirty in update_user_bitmap is easy to lose dirty log. For example, after promoting pinned_scope of vfio_iommu, vfio_dma is not considered as fully-dirty, then we may lose dirty log that occurs before vfio_iommu is promoted. The key point is that pinned-dirty is not a real dirty tracking way, it can't continuously track dirty pages, but just restrict dirty scope. It is essentially the same as fully-dirty. Fully-dirty is of full-scope and pinned-dirty is of pinned-scope. So we must mark pinned-dirty or fully-dirty after we start dirty tracking or clear dirty bitmap, to ensure that dirty log is marked right away. Fixes: d6a4c185660c ("vfio iommu: Implementation of ioctl for dirty pages tracking") Signed-off-by: Keqian Zhu --- drivers/vfio/vfio_iommu_type1.c | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index bceda5e8baaa..b0a26e8e0adf 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -224,7 +224,7 @@ static void vfio_dma_bitmap_free(struct vfio_dma *dma) dma->bitmap = NULL; } -static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize) +static void vfio_dma_populate_bitmap_pinned(struct vfio_dma *dma, size_t pgsize) { struct rb_node *p; unsigned long pgshift = __ffs(pgsize); @@ -236,6 +236,25 @@ static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize) } } +static void vfio_dma_populate_bitmap_full(struct vfio_dma *dma, size_t pgsize) +{ + unsigned long pgshift = __ffs(pgsize); + unsigned long nbits = dma->size >> pgshift; + + bitmap_set(dma->bitmap, 0, nbits); +} + +static void vfio_dma_populate_bitmap(struct vfio_iommu *iommu, + struct vfio_dma *dma) +{ + size_t pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap); + + if (iommu->pinned_page_dirty_scope) + vfio_dma_populate_bitmap_pinned(dma, pgsize); + else if (dma->iommu_mapped) + vfio_dma_populate_bitmap_full(dma, pgsize); +} + static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu) { struct rb_node *n; @@ -257,7 +276,7 @@ static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu) } return ret; } - vfio_dma_populate_bitmap(dma, pgsize); + vfio_dma_populate_bitmap(iommu, dma); } return 0; } @@ -987,13 +1006,6 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, unsigned long shift = bit_offset % BITS_PER_LONG; unsigned long leftover; - /* - * mark all pages dirty if any IOMMU capable device is not able - * to report dirty pages and all pages are pinned and mapped. - */ - if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped) - bitmap_set(dma->bitmap, 0, nbits); - if (shift) { bitmap_shift_left(dma->bitmap, dma->bitmap, shift, nbits + shift); @@ -1019,7 +1031,6 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, struct vfio_dma *dma; struct rb_node *n; unsigned long pgshift = __ffs(iommu->pgsize_bitmap); - size_t pgsize = (size_t)1 << pgshift; int ret; /* @@ -1055,7 +1066,7 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, * pages which are marked dirty by vfio_dma_rw() */ bitmap_clear(dma->bitmap, 0, dma->size >> pgshift); - vfio_dma_populate_bitmap(dma, pgsize); + vfio_dma_populate_bitmap(iommu, dma); } return 0; } -- 2.19.1 _______________________________________________ linux-arm-kernel mailing list linux-arm-kernel@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-arm-kernel From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 08814C433E0 for ; Thu, 7 Jan 2021 09:29:48 +0000 (UTC) Received: from mm01.cs.columbia.edu (mm01.cs.columbia.edu [128.59.11.253]) by mail.kernel.org (Postfix) with ESMTP id 79C1F23355 for ; Thu, 7 Jan 2021 09:29:47 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 79C1F23355 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=huawei.com Authentication-Results: mail.kernel.org; spf=pass smtp.mailfrom=kvmarm-bounces@lists.cs.columbia.edu Received: from localhost (localhost [127.0.0.1]) by mm01.cs.columbia.edu (Postfix) with ESMTP id 1679D4B3D3; Thu, 7 Jan 2021 04:29:47 -0500 (EST) X-Virus-Scanned: at lists.cs.columbia.edu Received: from mm01.cs.columbia.edu ([127.0.0.1]) by localhost (mm01.cs.columbia.edu [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id g8VKkawqJStW; Thu, 7 Jan 2021 04:29:45 -0500 (EST) Received: from mm01.cs.columbia.edu (localhost [127.0.0.1]) by mm01.cs.columbia.edu (Postfix) with ESMTP id 57AA44B3E6; Thu, 7 Jan 2021 04:29:44 -0500 (EST) Received: from localhost (localhost [127.0.0.1]) by mm01.cs.columbia.edu (Postfix) with ESMTP id AE25C4B3D0 for ; Thu, 7 Jan 2021 04:29:43 -0500 (EST) X-Virus-Scanned: at lists.cs.columbia.edu Received: from mm01.cs.columbia.edu ([127.0.0.1]) by localhost (mm01.cs.columbia.edu [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id SK8+LQxa8wYb for ; Thu, 7 Jan 2021 04:29:42 -0500 (EST) Received: from szxga05-in.huawei.com (szxga05-in.huawei.com [45.249.212.191]) by mm01.cs.columbia.edu (Postfix) with ESMTPS id 0D90E4B3C8 for ; Thu, 7 Jan 2021 04:29:42 -0500 (EST) Received: from DGGEMS404-HUB.china.huawei.com (unknown [172.30.72.60]) by szxga05-in.huawei.com (SkyGuard) with ESMTP id 4DBLWb5xgWzMGSd; Thu, 7 Jan 2021 17:28:27 +0800 (CST) Received: from DESKTOP-5IS4806.china.huawei.com (10.174.184.42) by DGGEMS404-HUB.china.huawei.com (10.3.19.204) with Microsoft SMTP Server id 14.3.498.0; Thu, 7 Jan 2021 17:29:29 +0800 From: Keqian Zhu To: , , , , , Alex Williamson , Kirti Wankhede , Cornelia Huck , Will Deacon , Marc Zyngier , Catalin Marinas Subject: [PATCH 1/5] vfio/iommu_type1: Fixes vfio_dma_populate_bitmap to avoid dirty lose Date: Thu, 7 Jan 2021 17:28:57 +0800 Message-ID: <20210107092901.19712-2-zhukeqian1@huawei.com> X-Mailer: git-send-email 2.8.4.windows.1 In-Reply-To: <20210107092901.19712-1-zhukeqian1@huawei.com> References: <20210107092901.19712-1-zhukeqian1@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.174.184.42] X-CFilter-Loop: Reflected Cc: Joerg Roedel , Daniel Lezcano , Alexios Zavras , Thomas Gleixner , Robin Murphy , Andrew Morton X-BeenThere: kvmarm@lists.cs.columbia.edu X-Mailman-Version: 2.1.14 Precedence: list List-Id: Where KVM/ARM decisions are made List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: kvmarm-bounces@lists.cs.columbia.edu Sender: kvmarm-bounces@lists.cs.columbia.edu Defer checking whether vfio_dma is of fully-dirty in update_user_bitmap is easy to lose dirty log. For example, after promoting pinned_scope of vfio_iommu, vfio_dma is not considered as fully-dirty, then we may lose dirty log that occurs before vfio_iommu is promoted. The key point is that pinned-dirty is not a real dirty tracking way, it can't continuously track dirty pages, but just restrict dirty scope. It is essentially the same as fully-dirty. Fully-dirty is of full-scope and pinned-dirty is of pinned-scope. So we must mark pinned-dirty or fully-dirty after we start dirty tracking or clear dirty bitmap, to ensure that dirty log is marked right away. Fixes: d6a4c185660c ("vfio iommu: Implementation of ioctl for dirty pages tracking") Signed-off-by: Keqian Zhu --- drivers/vfio/vfio_iommu_type1.c | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index bceda5e8baaa..b0a26e8e0adf 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -224,7 +224,7 @@ static void vfio_dma_bitmap_free(struct vfio_dma *dma) dma->bitmap = NULL; } -static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize) +static void vfio_dma_populate_bitmap_pinned(struct vfio_dma *dma, size_t pgsize) { struct rb_node *p; unsigned long pgshift = __ffs(pgsize); @@ -236,6 +236,25 @@ static void vfio_dma_populate_bitmap(struct vfio_dma *dma, size_t pgsize) } } +static void vfio_dma_populate_bitmap_full(struct vfio_dma *dma, size_t pgsize) +{ + unsigned long pgshift = __ffs(pgsize); + unsigned long nbits = dma->size >> pgshift; + + bitmap_set(dma->bitmap, 0, nbits); +} + +static void vfio_dma_populate_bitmap(struct vfio_iommu *iommu, + struct vfio_dma *dma) +{ + size_t pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap); + + if (iommu->pinned_page_dirty_scope) + vfio_dma_populate_bitmap_pinned(dma, pgsize); + else if (dma->iommu_mapped) + vfio_dma_populate_bitmap_full(dma, pgsize); +} + static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu) { struct rb_node *n; @@ -257,7 +276,7 @@ static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu) } return ret; } - vfio_dma_populate_bitmap(dma, pgsize); + vfio_dma_populate_bitmap(iommu, dma); } return 0; } @@ -987,13 +1006,6 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, unsigned long shift = bit_offset % BITS_PER_LONG; unsigned long leftover; - /* - * mark all pages dirty if any IOMMU capable device is not able - * to report dirty pages and all pages are pinned and mapped. - */ - if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped) - bitmap_set(dma->bitmap, 0, nbits); - if (shift) { bitmap_shift_left(dma->bitmap, dma->bitmap, shift, nbits + shift); @@ -1019,7 +1031,6 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, struct vfio_dma *dma; struct rb_node *n; unsigned long pgshift = __ffs(iommu->pgsize_bitmap); - size_t pgsize = (size_t)1 << pgshift; int ret; /* @@ -1055,7 +1066,7 @@ static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, * pages which are marked dirty by vfio_dma_rw() */ bitmap_clear(dma->bitmap, 0, dma->size >> pgshift); - vfio_dma_populate_bitmap(dma, pgsize); + vfio_dma_populate_bitmap(iommu, dma); } return 0; } -- 2.19.1 _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm