All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
From: Ard Biesheuvel <ardb@kernel.org>
To: Mike Rapoport <rppt@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	Anshuman Khandual <anshuman.khandual@arm.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	David Hildenbrand <david@redhat.com>,
	Marc Zyngier <maz@kernel.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Mike Rapoport <rppt@linux.ibm.com>, Will Deacon <will@kernel.org>,
	kvmarm <kvmarm@lists.cs.columbia.edu>,
	Linux ARM <linux-arm-kernel@lists.infradead.org>,
	Linux Kernel Mailing List <linux-kernel@vger.kernel.org>,
	Linux Memory Management List <linux-mm@kvack.org>
Subject: Re: [PATCH v4 3/4] arm64: decouple check whether pfn is in linear map from pfn_valid()
Date: Tue, 11 May 2021 12:25:09 +0200	[thread overview]
Message-ID: <CAMj1kXGE=mu4P3sL-2zuGMrV1QwkXJc+-Y4Pj4RV7YEvdTDPzw@mail.gmail.com> (raw)
In-Reply-To: <20210511100550.28178-4-rppt@kernel.org>

On Tue, 11 May 2021 at 12:06, Mike Rapoport <rppt@kernel.org> wrote:
>
> From: Mike Rapoport <rppt@linux.ibm.com>
>
> The intended semantics of pfn_valid() is to verify whether there is a
> struct page for the pfn in question and nothing else.
>
> Yet, on arm64 it is used to distinguish memory areas that are mapped in the
> linear map vs those that require ioremap() to access them.
>
> Introduce a dedicated pfn_is_map_memory() wrapper for
> memblock_is_map_memory() to perform such check and use it where
> appropriate.
>
> Using a wrapper allows to avoid cyclic include dependencies.
>
> While here also update style of pfn_valid() so that both pfn_valid() and
> pfn_is_map_memory() declarations will be consistent.
>
> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
> Acked-by: David Hildenbrand <david@redhat.com>

Acked-by: Ard Biesheuvel <ardb@kernel.org>

> ---
>  arch/arm64/include/asm/memory.h |  2 +-
>  arch/arm64/include/asm/page.h   |  3 ++-
>  arch/arm64/kvm/mmu.c            |  2 +-
>  arch/arm64/mm/init.c            | 12 ++++++++++++
>  arch/arm64/mm/ioremap.c         |  4 ++--
>  arch/arm64/mm/mmu.c             |  2 +-
>  6 files changed, 19 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
> index 87b90dc27a43..9027b7e16c4c 100644
> --- a/arch/arm64/include/asm/memory.h
> +++ b/arch/arm64/include/asm/memory.h
> @@ -369,7 +369,7 @@ static inline void *phys_to_virt(phys_addr_t x)
>
>  #define virt_addr_valid(addr)  ({                                      \
>         __typeof__(addr) __addr = __tag_reset(addr);                    \
> -       __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr));      \
> +       __is_lm_address(__addr) && pfn_is_map_memory(virt_to_pfn(__addr));      \
>  })
>
>  void dump_mem_limit(void);
> diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
> index 012cffc574e8..75ddfe671393 100644
> --- a/arch/arm64/include/asm/page.h
> +++ b/arch/arm64/include/asm/page.h
> @@ -37,7 +37,8 @@ void copy_highpage(struct page *to, struct page *from);
>
>  typedef struct page *pgtable_t;
>
> -extern int pfn_valid(unsigned long);
> +int pfn_valid(unsigned long pfn);
> +int pfn_is_map_memory(unsigned long pfn);
>
>  #include <asm/memory.h>
>
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index c5d1f3c87dbd..470070073085 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -85,7 +85,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
>
>  static bool kvm_is_device_pfn(unsigned long pfn)
>  {
> -       return !pfn_valid(pfn);
> +       return !pfn_is_map_memory(pfn);
>  }
>
>  static void *stage2_memcache_zalloc_page(void *arg)
> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> index 16a2b2b1c54d..798f74f501d5 100644
> --- a/arch/arm64/mm/init.c
> +++ b/arch/arm64/mm/init.c
> @@ -255,6 +255,18 @@ int pfn_valid(unsigned long pfn)
>  }
>  EXPORT_SYMBOL(pfn_valid);
>
> +int pfn_is_map_memory(unsigned long pfn)
> +{
> +       phys_addr_t addr = PFN_PHYS(pfn);
> +
> +       /* avoid false positives for bogus PFNs, see comment in pfn_valid() */
> +       if (PHYS_PFN(addr) != pfn)
> +               return 0;
> +
> +       return memblock_is_map_memory(addr);
> +}
> +EXPORT_SYMBOL(pfn_is_map_memory);
> +
>  static phys_addr_t memory_limit = PHYS_ADDR_MAX;
>
>  /*
> diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
> index b5e83c46b23e..b7c81dacabf0 100644
> --- a/arch/arm64/mm/ioremap.c
> +++ b/arch/arm64/mm/ioremap.c
> @@ -43,7 +43,7 @@ static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
>         /*
>          * Don't allow RAM to be mapped.
>          */
> -       if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
> +       if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr))))
>                 return NULL;
>
>         area = get_vm_area_caller(size, VM_IOREMAP, caller);
> @@ -84,7 +84,7 @@ EXPORT_SYMBOL(iounmap);
>  void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
>  {
>         /* For normal memory we already have a cacheable mapping. */
> -       if (pfn_valid(__phys_to_pfn(phys_addr)))
> +       if (pfn_is_map_memory(__phys_to_pfn(phys_addr)))
>                 return (void __iomem *)__phys_to_virt(phys_addr);
>
>         return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 6dd9369e3ea0..ab5914cebd3c 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -82,7 +82,7 @@ void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
>  pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
>                               unsigned long size, pgprot_t vma_prot)
>  {
> -       if (!pfn_valid(pfn))
> +       if (!pfn_is_map_memory(pfn))
>                 return pgprot_noncached(vma_prot);
>         else if (file->f_flags & O_SYNC)
>                 return pgprot_writecombine(vma_prot);
> --
> 2.28.0
>

WARNING: multiple messages have this Message-ID (diff)
From: Ard Biesheuvel <ardb@kernel.org>
To: Mike Rapoport <rppt@kernel.org>
Cc: David Hildenbrand <david@redhat.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Anshuman Khandual <anshuman.khandual@arm.com>,
	Linux Kernel Mailing List <linux-kernel@vger.kernel.org>,
	Mike Rapoport <rppt@linux.ibm.com>,
	Linux Memory Management List <linux-mm@kvack.org>,
	Marc Zyngier <maz@kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Will Deacon <will@kernel.org>,
	kvmarm <kvmarm@lists.cs.columbia.edu>,
	Linux ARM <linux-arm-kernel@lists.infradead.org>
Subject: Re: [PATCH v4 3/4] arm64: decouple check whether pfn is in linear map from pfn_valid()
Date: Tue, 11 May 2021 12:25:09 +0200	[thread overview]
Message-ID: <CAMj1kXGE=mu4P3sL-2zuGMrV1QwkXJc+-Y4Pj4RV7YEvdTDPzw@mail.gmail.com> (raw)
In-Reply-To: <20210511100550.28178-4-rppt@kernel.org>

On Tue, 11 May 2021 at 12:06, Mike Rapoport <rppt@kernel.org> wrote:
>
> From: Mike Rapoport <rppt@linux.ibm.com>
>
> The intended semantics of pfn_valid() is to verify whether there is a
> struct page for the pfn in question and nothing else.
>
> Yet, on arm64 it is used to distinguish memory areas that are mapped in the
> linear map vs those that require ioremap() to access them.
>
> Introduce a dedicated pfn_is_map_memory() wrapper for
> memblock_is_map_memory() to perform such check and use it where
> appropriate.
>
> Using a wrapper allows to avoid cyclic include dependencies.
>
> While here also update style of pfn_valid() so that both pfn_valid() and
> pfn_is_map_memory() declarations will be consistent.
>
> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
> Acked-by: David Hildenbrand <david@redhat.com>

Acked-by: Ard Biesheuvel <ardb@kernel.org>

> ---
>  arch/arm64/include/asm/memory.h |  2 +-
>  arch/arm64/include/asm/page.h   |  3 ++-
>  arch/arm64/kvm/mmu.c            |  2 +-
>  arch/arm64/mm/init.c            | 12 ++++++++++++
>  arch/arm64/mm/ioremap.c         |  4 ++--
>  arch/arm64/mm/mmu.c             |  2 +-
>  6 files changed, 19 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
> index 87b90dc27a43..9027b7e16c4c 100644
> --- a/arch/arm64/include/asm/memory.h
> +++ b/arch/arm64/include/asm/memory.h
> @@ -369,7 +369,7 @@ static inline void *phys_to_virt(phys_addr_t x)
>
>  #define virt_addr_valid(addr)  ({                                      \
>         __typeof__(addr) __addr = __tag_reset(addr);                    \
> -       __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr));      \
> +       __is_lm_address(__addr) && pfn_is_map_memory(virt_to_pfn(__addr));      \
>  })
>
>  void dump_mem_limit(void);
> diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
> index 012cffc574e8..75ddfe671393 100644
> --- a/arch/arm64/include/asm/page.h
> +++ b/arch/arm64/include/asm/page.h
> @@ -37,7 +37,8 @@ void copy_highpage(struct page *to, struct page *from);
>
>  typedef struct page *pgtable_t;
>
> -extern int pfn_valid(unsigned long);
> +int pfn_valid(unsigned long pfn);
> +int pfn_is_map_memory(unsigned long pfn);
>
>  #include <asm/memory.h>
>
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index c5d1f3c87dbd..470070073085 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -85,7 +85,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
>
>  static bool kvm_is_device_pfn(unsigned long pfn)
>  {
> -       return !pfn_valid(pfn);
> +       return !pfn_is_map_memory(pfn);
>  }
>
>  static void *stage2_memcache_zalloc_page(void *arg)
> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> index 16a2b2b1c54d..798f74f501d5 100644
> --- a/arch/arm64/mm/init.c
> +++ b/arch/arm64/mm/init.c
> @@ -255,6 +255,18 @@ int pfn_valid(unsigned long pfn)
>  }
>  EXPORT_SYMBOL(pfn_valid);
>
> +int pfn_is_map_memory(unsigned long pfn)
> +{
> +       phys_addr_t addr = PFN_PHYS(pfn);
> +
> +       /* avoid false positives for bogus PFNs, see comment in pfn_valid() */
> +       if (PHYS_PFN(addr) != pfn)
> +               return 0;
> +
> +       return memblock_is_map_memory(addr);
> +}
> +EXPORT_SYMBOL(pfn_is_map_memory);
> +
>  static phys_addr_t memory_limit = PHYS_ADDR_MAX;
>
>  /*
> diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
> index b5e83c46b23e..b7c81dacabf0 100644
> --- a/arch/arm64/mm/ioremap.c
> +++ b/arch/arm64/mm/ioremap.c
> @@ -43,7 +43,7 @@ static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
>         /*
>          * Don't allow RAM to be mapped.
>          */
> -       if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
> +       if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr))))
>                 return NULL;
>
>         area = get_vm_area_caller(size, VM_IOREMAP, caller);
> @@ -84,7 +84,7 @@ EXPORT_SYMBOL(iounmap);
>  void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
>  {
>         /* For normal memory we already have a cacheable mapping. */
> -       if (pfn_valid(__phys_to_pfn(phys_addr)))
> +       if (pfn_is_map_memory(__phys_to_pfn(phys_addr)))
>                 return (void __iomem *)__phys_to_virt(phys_addr);
>
>         return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 6dd9369e3ea0..ab5914cebd3c 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -82,7 +82,7 @@ void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
>  pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
>                               unsigned long size, pgprot_t vma_prot)
>  {
> -       if (!pfn_valid(pfn))
> +       if (!pfn_is_map_memory(pfn))
>                 return pgprot_noncached(vma_prot);
>         else if (file->f_flags & O_SYNC)
>                 return pgprot_writecombine(vma_prot);
> --
> 2.28.0
>
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Ard Biesheuvel <ardb@kernel.org>
To: Mike Rapoport <rppt@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	 Anshuman Khandual <anshuman.khandual@arm.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	 David Hildenbrand <david@redhat.com>,
	Marc Zyngier <maz@kernel.org>,
	Mark Rutland <mark.rutland@arm.com>,
	 Mike Rapoport <rppt@linux.ibm.com>,
	Will Deacon <will@kernel.org>,
	 kvmarm <kvmarm@lists.cs.columbia.edu>,
	 Linux ARM <linux-arm-kernel@lists.infradead.org>,
	 Linux Kernel Mailing List <linux-kernel@vger.kernel.org>,
	 Linux Memory Management List <linux-mm@kvack.org>
Subject: Re: [PATCH v4 3/4] arm64: decouple check whether pfn is in linear map from pfn_valid()
Date: Tue, 11 May 2021 12:25:09 +0200	[thread overview]
Message-ID: <CAMj1kXGE=mu4P3sL-2zuGMrV1QwkXJc+-Y4Pj4RV7YEvdTDPzw@mail.gmail.com> (raw)
In-Reply-To: <20210511100550.28178-4-rppt@kernel.org>

On Tue, 11 May 2021 at 12:06, Mike Rapoport <rppt@kernel.org> wrote:
>
> From: Mike Rapoport <rppt@linux.ibm.com>
>
> The intended semantics of pfn_valid() is to verify whether there is a
> struct page for the pfn in question and nothing else.
>
> Yet, on arm64 it is used to distinguish memory areas that are mapped in the
> linear map vs those that require ioremap() to access them.
>
> Introduce a dedicated pfn_is_map_memory() wrapper for
> memblock_is_map_memory() to perform such check and use it where
> appropriate.
>
> Using a wrapper allows to avoid cyclic include dependencies.
>
> While here also update style of pfn_valid() so that both pfn_valid() and
> pfn_is_map_memory() declarations will be consistent.
>
> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
> Acked-by: David Hildenbrand <david@redhat.com>

Acked-by: Ard Biesheuvel <ardb@kernel.org>

> ---
>  arch/arm64/include/asm/memory.h |  2 +-
>  arch/arm64/include/asm/page.h   |  3 ++-
>  arch/arm64/kvm/mmu.c            |  2 +-
>  arch/arm64/mm/init.c            | 12 ++++++++++++
>  arch/arm64/mm/ioremap.c         |  4 ++--
>  arch/arm64/mm/mmu.c             |  2 +-
>  6 files changed, 19 insertions(+), 6 deletions(-)
>
> diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
> index 87b90dc27a43..9027b7e16c4c 100644
> --- a/arch/arm64/include/asm/memory.h
> +++ b/arch/arm64/include/asm/memory.h
> @@ -369,7 +369,7 @@ static inline void *phys_to_virt(phys_addr_t x)
>
>  #define virt_addr_valid(addr)  ({                                      \
>         __typeof__(addr) __addr = __tag_reset(addr);                    \
> -       __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr));      \
> +       __is_lm_address(__addr) && pfn_is_map_memory(virt_to_pfn(__addr));      \
>  })
>
>  void dump_mem_limit(void);
> diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
> index 012cffc574e8..75ddfe671393 100644
> --- a/arch/arm64/include/asm/page.h
> +++ b/arch/arm64/include/asm/page.h
> @@ -37,7 +37,8 @@ void copy_highpage(struct page *to, struct page *from);
>
>  typedef struct page *pgtable_t;
>
> -extern int pfn_valid(unsigned long);
> +int pfn_valid(unsigned long pfn);
> +int pfn_is_map_memory(unsigned long pfn);
>
>  #include <asm/memory.h>
>
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index c5d1f3c87dbd..470070073085 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -85,7 +85,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
>
>  static bool kvm_is_device_pfn(unsigned long pfn)
>  {
> -       return !pfn_valid(pfn);
> +       return !pfn_is_map_memory(pfn);
>  }
>
>  static void *stage2_memcache_zalloc_page(void *arg)
> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
> index 16a2b2b1c54d..798f74f501d5 100644
> --- a/arch/arm64/mm/init.c
> +++ b/arch/arm64/mm/init.c
> @@ -255,6 +255,18 @@ int pfn_valid(unsigned long pfn)
>  }
>  EXPORT_SYMBOL(pfn_valid);
>
> +int pfn_is_map_memory(unsigned long pfn)
> +{
> +       phys_addr_t addr = PFN_PHYS(pfn);
> +
> +       /* avoid false positives for bogus PFNs, see comment in pfn_valid() */
> +       if (PHYS_PFN(addr) != pfn)
> +               return 0;
> +
> +       return memblock_is_map_memory(addr);
> +}
> +EXPORT_SYMBOL(pfn_is_map_memory);
> +
>  static phys_addr_t memory_limit = PHYS_ADDR_MAX;
>
>  /*
> diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
> index b5e83c46b23e..b7c81dacabf0 100644
> --- a/arch/arm64/mm/ioremap.c
> +++ b/arch/arm64/mm/ioremap.c
> @@ -43,7 +43,7 @@ static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
>         /*
>          * Don't allow RAM to be mapped.
>          */
> -       if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
> +       if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr))))
>                 return NULL;
>
>         area = get_vm_area_caller(size, VM_IOREMAP, caller);
> @@ -84,7 +84,7 @@ EXPORT_SYMBOL(iounmap);
>  void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
>  {
>         /* For normal memory we already have a cacheable mapping. */
> -       if (pfn_valid(__phys_to_pfn(phys_addr)))
> +       if (pfn_is_map_memory(__phys_to_pfn(phys_addr)))
>                 return (void __iomem *)__phys_to_virt(phys_addr);
>
>         return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 6dd9369e3ea0..ab5914cebd3c 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -82,7 +82,7 @@ void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
>  pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
>                               unsigned long size, pgprot_t vma_prot)
>  {
> -       if (!pfn_valid(pfn))
> +       if (!pfn_is_map_memory(pfn))
>                 return pgprot_noncached(vma_prot);
>         else if (file->f_flags & O_SYNC)
>                 return pgprot_writecombine(vma_prot);
> --
> 2.28.0
>

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2021-05-11 10:25 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-11 10:05 [PATCH v4 0/4] arm64: drop pfn_valid_within() and simplify pfn_valid() Mike Rapoport
2021-05-11 10:05 ` Mike Rapoport
2021-05-11 10:05 ` Mike Rapoport
2021-05-11 10:05 ` [PATCH v4 1/4] include/linux/mmzone.h: add documentation for pfn_valid() Mike Rapoport
2021-05-11 10:05   ` Mike Rapoport
2021-05-11 10:05   ` Mike Rapoport
2021-05-11 10:22   ` Ard Biesheuvel
2021-05-11 10:22     ` Ard Biesheuvel
2021-05-11 10:22     ` Ard Biesheuvel
2021-05-11 10:05 ` [PATCH v4 2/4] memblock: update initialization of reserved pages Mike Rapoport
2021-05-11 10:05   ` Mike Rapoport
2021-05-11 10:05   ` Mike Rapoport
2021-05-11 10:23   ` Ard Biesheuvel
2021-05-11 10:23     ` Ard Biesheuvel
2021-05-11 10:23     ` Ard Biesheuvel
2021-05-11 10:05 ` [PATCH v4 3/4] arm64: decouple check whether pfn is in linear map from pfn_valid() Mike Rapoport
2021-05-11 10:05   ` Mike Rapoport
2021-05-11 10:05   ` Mike Rapoport
2021-05-11 10:25   ` Ard Biesheuvel [this message]
2021-05-11 10:25     ` Ard Biesheuvel
2021-05-11 10:25     ` Ard Biesheuvel
2021-05-11 10:05 ` [PATCH v4 4/4] arm64: drop pfn_valid_within() and simplify pfn_valid() Mike Rapoport
2021-05-11 10:05   ` Mike Rapoport
2021-05-11 10:05   ` Mike Rapoport
2021-05-11 10:26   ` Ard Biesheuvel
2021-05-11 10:26     ` Ard Biesheuvel
2021-05-11 10:26     ` Ard Biesheuvel
2021-05-11 23:40   ` Andrew Morton
2021-05-11 23:40     ` Andrew Morton
2021-05-11 23:40     ` Andrew Morton
2021-05-12  5:31     ` Mike Rapoport
2021-05-12  5:31       ` Mike Rapoport
2021-05-12  5:31       ` Mike Rapoport
2021-05-12  3:13 ` [PATCH v4 0/4] " Kefeng Wang
2021-05-12  3:13   ` Kefeng Wang
2021-05-12  3:13   ` Kefeng Wang
2021-05-12  7:00 ` Ard Biesheuvel
2021-05-12  7:00   ` Ard Biesheuvel
2021-05-12  7:00   ` Ard Biesheuvel
2021-05-12  7:33   ` Mike Rapoport
2021-05-12  7:33     ` Mike Rapoport
2021-05-12  7:33     ` Mike Rapoport
2021-05-12  7:59     ` Ard Biesheuvel
2021-05-12  7:59       ` Ard Biesheuvel
2021-05-12  7:59       ` Ard Biesheuvel
2021-05-12  8:32       ` Mike Rapoport
2021-05-12  8:32         ` Mike Rapoport
2021-05-12  8:32         ` Mike Rapoport

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CAMj1kXGE=mu4P3sL-2zuGMrV1QwkXJc+-Y4Pj4RV7YEvdTDPzw@mail.gmail.com' \
    --to=ardb@kernel.org \
    --cc=akpm@linux-foundation.org \
    --cc=anshuman.khandual@arm.com \
    --cc=catalin.marinas@arm.com \
    --cc=david@redhat.com \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=rppt@kernel.org \
    --cc=rppt@linux.ibm.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.