diff mbox series

highmem: Don't disable preemption on RT in kmap_atomic()

Message ID 20210810091116.pocdmaatdcogvdso@linutronix.de (mailing list archive)
State New
Headers show
Series highmem: Don't disable preemption on RT in kmap_atomic() | expand

Commit Message

Sebastian Andrzej Siewior Aug. 10, 2021, 9:11 a.m. UTC
kmap_atomic() disables preemption and pagefaults for historical
reasons. The conversion to kmap_local(), which only disables
migration, cannot be done wholesale because quite some call sites need
to be updated to accommodate with the changed semantics.

On PREEMPT_RT enabled kernels the kmap_atomic() semantics are
problematic due to the implicit disabling of preemption which makes it
impossible to acquire 'sleeping' spinlocks within the kmap atomic
sections.

PREEMPT_RT replaces the preempt_disable() with a migrate_disable() for
more than a decade. It could be argued that this is a justification to
do this unconditionally, but PREEMPT_RT covers only a limited number of
architectures and it disables some functionality which limits the
coverage further.

Limit the replacement to PREEMPT_RT for now.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 include/linux/highmem-internal.h | 27 ++++++++++++++++++++++-----
 1 file changed, 22 insertions(+), 5 deletions(-)

Comments

Vlastimil Babka Aug. 10, 2021, 10:33 a.m. UTC | #1
On 8/10/21 11:11 AM, Sebastian Andrzej Siewior wrote:
> kmap_atomic() disables preemption and pagefaults for historical
> reasons. The conversion to kmap_local(), which only disables
> migration, cannot be done wholesale because quite some call sites need
> to be updated to accommodate with the changed semantics.
> 
> On PREEMPT_RT enabled kernels the kmap_atomic() semantics are
> problematic due to the implicit disabling of preemption which makes it
> impossible to acquire 'sleeping' spinlocks within the kmap atomic
> sections.
> 
> PREEMPT_RT replaces the preempt_disable() with a migrate_disable() for
> more than a decade. It could be argued that this is a justification to
> do this unconditionally, but PREEMPT_RT covers only a limited number of
> architectures and it disables some functionality which limits the
> coverage further.
> 
> Limit the replacement to PREEMPT_RT for now.
> 
> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

Note I use the same pattern in my SLUB series, but for performance reasons
(migrate_disable() is an unconditional function call etc). But I can guess what
would be the answer if I suggested this pattern to get a common shared wrapper,
so I won't :P

> ---
>  include/linux/highmem-internal.h | 27 ++++++++++++++++++++++-----
>  1 file changed, 22 insertions(+), 5 deletions(-)
> 
> diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
> index 7902c7d8b55f9..4aa1031d3e4c3 100644
> --- a/include/linux/highmem-internal.h
> +++ b/include/linux/highmem-internal.h
> @@ -90,7 +90,11 @@ static inline void __kunmap_local(void *vaddr)
>  
>  static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
>  {
> -	preempt_disable();
> +	if (IS_ENABLED(CONFIG_PREEMPT_RT))
> +		migrate_disable();
> +	else
> +		preempt_disable();
> +
>  	pagefault_disable();
>  	return __kmap_local_page_prot(page, prot);
>  }
> @@ -102,7 +106,11 @@ static inline void *kmap_atomic(struct page *page)
>  
>  static inline void *kmap_atomic_pfn(unsigned long pfn)
>  {
> -	preempt_disable();
> +	if (IS_ENABLED(CONFIG_PREEMPT_RT))
> +		migrate_disable();
> +	else
> +		preempt_disable();
> +
>  	pagefault_disable();
>  	return __kmap_local_pfn_prot(pfn, kmap_prot);
>  }
> @@ -111,7 +119,10 @@ static inline void __kunmap_atomic(void *addr)
>  {
>  	kunmap_local_indexed(addr);
>  	pagefault_enable();
> -	preempt_enable();
> +	if (IS_ENABLED(CONFIG_PREEMPT_RT))
> +		migrate_enable();
> +	else
> +		preempt_enable();
>  }
>  
>  unsigned int __nr_free_highpages(void);
> @@ -179,7 +190,10 @@ static inline void __kunmap_local(void *addr)
>  
>  static inline void *kmap_atomic(struct page *page)
>  {
> -	preempt_disable();
> +	if (IS_ENABLED(CONFIG_PREEMPT_RT))
> +		migrate_disable();
> +	else
> +		preempt_disable();
>  	pagefault_disable();
>  	return page_address(page);
>  }
> @@ -200,7 +214,10 @@ static inline void __kunmap_atomic(void *addr)
>  	kunmap_flush_on_unmap(addr);
>  #endif
>  	pagefault_enable();
> -	preempt_enable();
> +	if (IS_ENABLED(CONFIG_PREEMPT_RT))
> +		migrate_enable();
> +	else
> +		preempt_enable();
>  }
>  
>  static inline unsigned int nr_free_highpages(void) { return 0; }
>
diff mbox series

Patch

diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index 7902c7d8b55f9..4aa1031d3e4c3 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -90,7 +90,11 @@  static inline void __kunmap_local(void *vaddr)
 
 static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
-	preempt_disable();
+	if (IS_ENABLED(CONFIG_PREEMPT_RT))
+		migrate_disable();
+	else
+		preempt_disable();
+
 	pagefault_disable();
 	return __kmap_local_page_prot(page, prot);
 }
@@ -102,7 +106,11 @@  static inline void *kmap_atomic(struct page *page)
 
 static inline void *kmap_atomic_pfn(unsigned long pfn)
 {
-	preempt_disable();
+	if (IS_ENABLED(CONFIG_PREEMPT_RT))
+		migrate_disable();
+	else
+		preempt_disable();
+
 	pagefault_disable();
 	return __kmap_local_pfn_prot(pfn, kmap_prot);
 }
@@ -111,7 +119,10 @@  static inline void __kunmap_atomic(void *addr)
 {
 	kunmap_local_indexed(addr);
 	pagefault_enable();
-	preempt_enable();
+	if (IS_ENABLED(CONFIG_PREEMPT_RT))
+		migrate_enable();
+	else
+		preempt_enable();
 }
 
 unsigned int __nr_free_highpages(void);
@@ -179,7 +190,10 @@  static inline void __kunmap_local(void *addr)
 
 static inline void *kmap_atomic(struct page *page)
 {
-	preempt_disable();
+	if (IS_ENABLED(CONFIG_PREEMPT_RT))
+		migrate_disable();
+	else
+		preempt_disable();
 	pagefault_disable();
 	return page_address(page);
 }
@@ -200,7 +214,10 @@  static inline void __kunmap_atomic(void *addr)
 	kunmap_flush_on_unmap(addr);
 #endif
 	pagefault_enable();
-	preempt_enable();
+	if (IS_ENABLED(CONFIG_PREEMPT_RT))
+		migrate_enable();
+	else
+		preempt_enable();
 }
 
 static inline unsigned int nr_free_highpages(void) { return 0; }