diff mbox series

[v4,1/2] riscv: Safely remove entries from relocation list

Message ID 20231127-module_linking_freeing-v4-1-a2ca1d7027d0@rivosinc.com (mailing list archive)
State Accepted
Commit d8792a5734b0f3e58b898c2e2f910bfac48e9ee3
Headers show
Series riscv: Fix issues with module loading | expand

Checks

Context Check Description
conchuod/vmtest-for-next-PR success PR summary
conchuod/patch-1-test-1 success .github/scripts/patches/build_rv32_defconfig.sh
conchuod/patch-1-test-2 success .github/scripts/patches/build_rv64_clang_allmodconfig.sh
conchuod/patch-1-test-3 success .github/scripts/patches/build_rv64_gcc_allmodconfig.sh
conchuod/patch-1-test-4 success .github/scripts/patches/build_rv64_nommu_k210_defconfig.sh
conchuod/patch-1-test-5 success .github/scripts/patches/build_rv64_nommu_virt_defconfig.sh
conchuod/patch-1-test-6 success .github/scripts/patches/checkpatch.sh
conchuod/patch-1-test-7 success .github/scripts/patches/dtb_warn_rv64.sh
conchuod/patch-1-test-8 success .github/scripts/patches/header_inline.sh
conchuod/patch-1-test-9 success .github/scripts/patches/kdoc.sh
conchuod/patch-1-test-10 success .github/scripts/patches/module_param.sh
conchuod/patch-1-test-11 success .github/scripts/patches/verify_fixes.sh
conchuod/patch-1-test-12 success .github/scripts/patches/verify_signedoff.sh

Commit Message

Charlie Jenkins Nov. 27, 2023, 10:04 p.m. UTC
Use the safe versions of list and hlist iteration to safely remove
entries from the module relocation lists. To allow mutliple threads to
load modules concurrently, move relocation list pointers onto the stack
rather than using global variables.

Fixes: 8fd6c5142395 ("riscv: Add remaining module relocations")
Reported-by: Ron Economos <re@w6rz.net>
Closes: https://lore.kernel.org/linux-riscv/444de86a-7e7c-4de7-5d1d-c1c40eefa4ba@w6rz.net
Signed-off-by: Charlie Jenkins <charlie@rivosinc.com>
---
 arch/riscv/kernel/module.c | 110 +++++++++++++++++++++++++++++++++------------
 1 file changed, 82 insertions(+), 28 deletions(-)

Comments

Björn Töpel Nov. 29, 2023, 7:29 a.m. UTC | #1
Charlie Jenkins <charlie@rivosinc.com> writes:

> Use the safe versions of list and hlist iteration to safely remove
> entries from the module relocation lists. To allow mutliple threads to
> load modules concurrently, move relocation list pointers onto the stack
> rather than using global variables.
>
> Fixes: 8fd6c5142395 ("riscv: Add remaining module relocations")
> Reported-by: Ron Economos <re@w6rz.net>
> Closes: https://lore.kernel.org/linux-riscv/444de86a-7e7c-4de7-5d1d-c1c40eefa4ba@w6rz.net
> Signed-off-by: Charlie Jenkins <charlie@rivosinc.com>
> ---
>  arch/riscv/kernel/module.c | 110 +++++++++++++++++++++++++++++++++------------
>  1 file changed, 82 insertions(+), 28 deletions(-)
>
> diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
> index 56a8c78e9e21..53593fe58cd8 100644
> --- a/arch/riscv/kernel/module.c
> +++ b/arch/riscv/kernel/module.c
> @@ -40,15 +40,6 @@ struct relocation_handlers {
>  				  long buffer);
>  };
>  
> -unsigned int initialize_relocation_hashtable(unsigned int num_relocations);
> -void process_accumulated_relocations(struct module *me);
> -int add_relocation_to_accumulate(struct module *me, int type, void *location,
> -				 unsigned int hashtable_bits, Elf_Addr v);
> -
> -struct hlist_head *relocation_hashtable;
> -
> -struct list_head used_buckets_list;
> -
>  /*
>   * The auipc+jalr instruction pair can reach any PC-relative offset
>   * in the range [-2^31 - 2^11, 2^31 - 2^11)
> @@ -604,7 +595,10 @@ static const struct relocation_handlers reloc_handlers[] = {
>  	/* 192-255 nonstandard ABI extensions  */
>  };
>  
> -void process_accumulated_relocations(struct module *me)
> +static void
> +process_accumulated_relocations(struct module *me,

Nit/breaks my workflow ;-): Don't bother if you're not respinning for
other reasons. The linebreak after return type makes it harder to grep
the code (and also is not in line with the layout with rest of this).


Björn
Lad, Prabhakar Dec. 4, 2023, 8:30 p.m. UTC | #2
On Mon, Nov 27, 2023 at 10:08 PM Charlie Jenkins <charlie@rivosinc.com> wrote:
>
> Use the safe versions of list and hlist iteration to safely remove
> entries from the module relocation lists. To allow mutliple threads to
> load modules concurrently, move relocation list pointers onto the stack
> rather than using global variables.
>
> Fixes: 8fd6c5142395 ("riscv: Add remaining module relocations")
> Reported-by: Ron Economos <re@w6rz.net>
> Closes: https://lore.kernel.org/linux-riscv/444de86a-7e7c-4de7-5d1d-c1c40eefa4ba@w6rz.net
> Signed-off-by: Charlie Jenkins <charlie@rivosinc.com>
> ---
>  arch/riscv/kernel/module.c | 110 +++++++++++++++++++++++++++++++++------------
>  1 file changed, 82 insertions(+), 28 deletions(-)
>
This fixes issues seen on RZ/Five SMARC EVK.

Tested-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com> #On
RZ/Five SMARC EVK

Cheers,
Prabhakar

> diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
> index 56a8c78e9e21..53593fe58cd8 100644
> --- a/arch/riscv/kernel/module.c
> +++ b/arch/riscv/kernel/module.c
> @@ -40,15 +40,6 @@ struct relocation_handlers {
>                                   long buffer);
>  };
>
> -unsigned int initialize_relocation_hashtable(unsigned int num_relocations);
> -void process_accumulated_relocations(struct module *me);
> -int add_relocation_to_accumulate(struct module *me, int type, void *location,
> -                                unsigned int hashtable_bits, Elf_Addr v);
> -
> -struct hlist_head *relocation_hashtable;
> -
> -struct list_head used_buckets_list;
> -
>  /*
>   * The auipc+jalr instruction pair can reach any PC-relative offset
>   * in the range [-2^31 - 2^11, 2^31 - 2^11)
> @@ -604,7 +595,10 @@ static const struct relocation_handlers reloc_handlers[] = {
>         /* 192-255 nonstandard ABI extensions  */
>  };
>
> -void process_accumulated_relocations(struct module *me)
> +static void
> +process_accumulated_relocations(struct module *me,
> +                               struct hlist_head **relocation_hashtable,
> +                               struct list_head *used_buckets_list)
>  {
>         /*
>          * Only ADD/SUB/SET/ULEB128 should end up here.
> @@ -624,18 +618,25 @@ void process_accumulated_relocations(struct module *me)
>          *      - Each relocation entry for a location address
>          */
>         struct used_bucket *bucket_iter;
> +       struct used_bucket *bucket_iter_tmp;
>         struct relocation_head *rel_head_iter;
> +       struct hlist_node *rel_head_iter_tmp;
>         struct relocation_entry *rel_entry_iter;
> +       struct relocation_entry *rel_entry_iter_tmp;
>         int curr_type;
>         void *location;
>         long buffer;
>
> -       list_for_each_entry(bucket_iter, &used_buckets_list, head) {
> -               hlist_for_each_entry(rel_head_iter, bucket_iter->bucket, node) {
> +       list_for_each_entry_safe(bucket_iter, bucket_iter_tmp,
> +                                used_buckets_list, head) {
> +               hlist_for_each_entry_safe(rel_head_iter, rel_head_iter_tmp,
> +                                         bucket_iter->bucket, node) {
>                         buffer = 0;
>                         location = rel_head_iter->location;
> -                       list_for_each_entry(rel_entry_iter,
> -                                           rel_head_iter->rel_entry, head) {
> +                       list_for_each_entry_safe(rel_entry_iter,
> +                                                rel_entry_iter_tmp,
> +                                                rel_head_iter->rel_entry,
> +                                                head) {
>                                 curr_type = rel_entry_iter->type;
>                                 reloc_handlers[curr_type].reloc_handler(
>                                         me, &buffer, rel_entry_iter->value);
> @@ -648,11 +649,14 @@ void process_accumulated_relocations(struct module *me)
>                 kfree(bucket_iter);
>         }
>
> -       kfree(relocation_hashtable);
> +       kfree(*relocation_hashtable);
>  }
>
> -int add_relocation_to_accumulate(struct module *me, int type, void *location,
> -                                unsigned int hashtable_bits, Elf_Addr v)
> +static int add_relocation_to_accumulate(struct module *me, int type,
> +                                       void *location,
> +                                       unsigned int hashtable_bits, Elf_Addr v,
> +                                       struct hlist_head *relocation_hashtable,
> +                                       struct list_head *used_buckets_list)
>  {
>         struct relocation_entry *entry;
>         struct relocation_head *rel_head;
> @@ -661,6 +665,10 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
>         unsigned long hash;
>
>         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
> +
> +       if (!entry)
> +               return -ENOMEM;
> +
>         INIT_LIST_HEAD(&entry->head);
>         entry->type = type;
>         entry->value = v;
> @@ -669,7 +677,10 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
>
>         current_head = &relocation_hashtable[hash];
>
> -       /* Find matching location (if any) */
> +       /*
> +        * Search for the relocation_head for the relocations that happen at the
> +        * provided location
> +        */
>         bool found = false;
>         struct relocation_head *rel_head_iter;
>
> @@ -681,19 +692,45 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
>                 }
>         }
>
> +       /*
> +        * If there has not yet been any relocations at the provided location,
> +        * create a relocation_head for that location and populate it with this
> +        * relocation_entry.
> +        */
>         if (!found) {
>                 rel_head = kmalloc(sizeof(*rel_head), GFP_KERNEL);
> +
> +               if (!rel_head) {
> +                       kfree(entry);
> +                       return -ENOMEM;
> +               }
> +
>                 rel_head->rel_entry =
>                         kmalloc(sizeof(struct list_head), GFP_KERNEL);
> +
> +               if (!rel_head->rel_entry) {
> +                       kfree(entry);
> +                       kfree(rel_head);
> +                       return -ENOMEM;
> +               }
> +
>                 INIT_LIST_HEAD(rel_head->rel_entry);
>                 rel_head->location = location;
>                 INIT_HLIST_NODE(&rel_head->node);
>                 if (!current_head->first) {
>                         bucket =
>                                 kmalloc(sizeof(struct used_bucket), GFP_KERNEL);
> +
> +                       if (!bucket) {
> +                               kfree(entry);
> +                               kfree(rel_head);
> +                               kfree(rel_head->rel_entry);
> +                               return -ENOMEM;
> +                       }
> +
>                         INIT_LIST_HEAD(&bucket->head);
>                         bucket->bucket = current_head;
> -                       list_add(&bucket->head, &used_buckets_list);
> +                       list_add(&bucket->head, used_buckets_list);
>                 }
>                 hlist_add_head(&rel_head->node, current_head);
>         }
> @@ -704,7 +741,9 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
>         return 0;
>  }
>
> -unsigned int initialize_relocation_hashtable(unsigned int num_relocations)
> +static unsigned int
> +initialize_relocation_hashtable(unsigned int num_relocations,
> +                               struct hlist_head **relocation_hashtable)
>  {
>         /* Can safely assume that bits is not greater than sizeof(long) */
>         unsigned long hashtable_size = roundup_pow_of_two(num_relocations);
> @@ -720,12 +759,13 @@ unsigned int initialize_relocation_hashtable(unsigned int num_relocations)
>
>         hashtable_size <<= should_double_size;
>
> -       relocation_hashtable = kmalloc_array(hashtable_size,
> -                                            sizeof(*relocation_hashtable),
> -                                            GFP_KERNEL);
> -       __hash_init(relocation_hashtable, hashtable_size);
> +       *relocation_hashtable = kmalloc_array(hashtable_size,
> +                                             sizeof(*relocation_hashtable),
> +                                             GFP_KERNEL);
> +       if (!*relocation_hashtable)
> +               return -ENOMEM;
>
> -       INIT_LIST_HEAD(&used_buckets_list);
> +       __hash_init(*relocation_hashtable, hashtable_size);
>
>         return hashtable_bits;
>  }
> @@ -742,7 +782,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
>         Elf_Addr v;
>         int res;
>         unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
> -       unsigned int hashtable_bits = initialize_relocation_hashtable(num_relocations);
> +       struct hlist_head *relocation_hashtable;
> +       struct list_head used_buckets_list;
> +       unsigned int hashtable_bits;
> +
> +       hashtable_bits = initialize_relocation_hashtable(num_relocations,
> +                                                        &relocation_hashtable);
> +
> +       if (hashtable_bits < 0)
> +               return hashtable_bits;
> +
> +       INIT_LIST_HEAD(&used_buckets_list);
>
>         pr_debug("Applying relocate section %u to %u\n", relsec,
>                sechdrs[relsec].sh_info);
> @@ -823,14 +873,18 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
>                 }
>
>                 if (reloc_handlers[type].accumulate_handler)
> -                       res = add_relocation_to_accumulate(me, type, location, hashtable_bits, v);
> +                       res = add_relocation_to_accumulate(me, type, location,
> +                                                          hashtable_bits, v,
> +                                                          relocation_hashtable,
> +                                                          &used_buckets_list);
>                 else
>                         res = handler(me, location, v);
>                 if (res)
>                         return res;
>         }
>
> -       process_accumulated_relocations(me);
> +       process_accumulated_relocations(me, &relocation_hashtable,
> +                                       &used_buckets_list);
>
>         return 0;
>  }
>
> --
> 2.42.0
>
>
> _______________________________________________
> linux-riscv mailing list
> linux-riscv@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv
diff mbox series

Patch

diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 56a8c78e9e21..53593fe58cd8 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -40,15 +40,6 @@  struct relocation_handlers {
 				  long buffer);
 };
 
-unsigned int initialize_relocation_hashtable(unsigned int num_relocations);
-void process_accumulated_relocations(struct module *me);
-int add_relocation_to_accumulate(struct module *me, int type, void *location,
-				 unsigned int hashtable_bits, Elf_Addr v);
-
-struct hlist_head *relocation_hashtable;
-
-struct list_head used_buckets_list;
-
 /*
  * The auipc+jalr instruction pair can reach any PC-relative offset
  * in the range [-2^31 - 2^11, 2^31 - 2^11)
@@ -604,7 +595,10 @@  static const struct relocation_handlers reloc_handlers[] = {
 	/* 192-255 nonstandard ABI extensions  */
 };
 
-void process_accumulated_relocations(struct module *me)
+static void
+process_accumulated_relocations(struct module *me,
+				struct hlist_head **relocation_hashtable,
+				struct list_head *used_buckets_list)
 {
 	/*
 	 * Only ADD/SUB/SET/ULEB128 should end up here.
@@ -624,18 +618,25 @@  void process_accumulated_relocations(struct module *me)
 	 *	- Each relocation entry for a location address
 	 */
 	struct used_bucket *bucket_iter;
+	struct used_bucket *bucket_iter_tmp;
 	struct relocation_head *rel_head_iter;
+	struct hlist_node *rel_head_iter_tmp;
 	struct relocation_entry *rel_entry_iter;
+	struct relocation_entry *rel_entry_iter_tmp;
 	int curr_type;
 	void *location;
 	long buffer;
 
-	list_for_each_entry(bucket_iter, &used_buckets_list, head) {
-		hlist_for_each_entry(rel_head_iter, bucket_iter->bucket, node) {
+	list_for_each_entry_safe(bucket_iter, bucket_iter_tmp,
+				 used_buckets_list, head) {
+		hlist_for_each_entry_safe(rel_head_iter, rel_head_iter_tmp,
+					  bucket_iter->bucket, node) {
 			buffer = 0;
 			location = rel_head_iter->location;
-			list_for_each_entry(rel_entry_iter,
-					    rel_head_iter->rel_entry, head) {
+			list_for_each_entry_safe(rel_entry_iter,
+						 rel_entry_iter_tmp,
+						 rel_head_iter->rel_entry,
+						 head) {
 				curr_type = rel_entry_iter->type;
 				reloc_handlers[curr_type].reloc_handler(
 					me, &buffer, rel_entry_iter->value);
@@ -648,11 +649,14 @@  void process_accumulated_relocations(struct module *me)
 		kfree(bucket_iter);
 	}
 
-	kfree(relocation_hashtable);
+	kfree(*relocation_hashtable);
 }
 
-int add_relocation_to_accumulate(struct module *me, int type, void *location,
-				 unsigned int hashtable_bits, Elf_Addr v)
+static int add_relocation_to_accumulate(struct module *me, int type,
+					void *location,
+					unsigned int hashtable_bits, Elf_Addr v,
+					struct hlist_head *relocation_hashtable,
+					struct list_head *used_buckets_list)
 {
 	struct relocation_entry *entry;
 	struct relocation_head *rel_head;
@@ -661,6 +665,10 @@  int add_relocation_to_accumulate(struct module *me, int type, void *location,
 	unsigned long hash;
 
 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+
+	if (!entry)
+		return -ENOMEM;
+
 	INIT_LIST_HEAD(&entry->head);
 	entry->type = type;
 	entry->value = v;
@@ -669,7 +677,10 @@  int add_relocation_to_accumulate(struct module *me, int type, void *location,
 
 	current_head = &relocation_hashtable[hash];
 
-	/* Find matching location (if any) */
+	/*
+	 * Search for the relocation_head for the relocations that happen at the
+	 * provided location
+	 */
 	bool found = false;
 	struct relocation_head *rel_head_iter;
 
@@ -681,19 +692,45 @@  int add_relocation_to_accumulate(struct module *me, int type, void *location,
 		}
 	}
 
+	/*
+	 * If there has not yet been any relocations at the provided location,
+	 * create a relocation_head for that location and populate it with this
+	 * relocation_entry.
+	 */
 	if (!found) {
 		rel_head = kmalloc(sizeof(*rel_head), GFP_KERNEL);
+
+		if (!rel_head) {
+			kfree(entry);
+			return -ENOMEM;
+		}
+
 		rel_head->rel_entry =
 			kmalloc(sizeof(struct list_head), GFP_KERNEL);
+
+		if (!rel_head->rel_entry) {
+			kfree(entry);
+			kfree(rel_head);
+			return -ENOMEM;
+		}
+
 		INIT_LIST_HEAD(rel_head->rel_entry);
 		rel_head->location = location;
 		INIT_HLIST_NODE(&rel_head->node);
 		if (!current_head->first) {
 			bucket =
 				kmalloc(sizeof(struct used_bucket), GFP_KERNEL);
+
+			if (!bucket) {
+				kfree(entry);
+				kfree(rel_head);
+				kfree(rel_head->rel_entry);
+				return -ENOMEM;
+			}
+
 			INIT_LIST_HEAD(&bucket->head);
 			bucket->bucket = current_head;
-			list_add(&bucket->head, &used_buckets_list);
+			list_add(&bucket->head, used_buckets_list);
 		}
 		hlist_add_head(&rel_head->node, current_head);
 	}
@@ -704,7 +741,9 @@  int add_relocation_to_accumulate(struct module *me, int type, void *location,
 	return 0;
 }
 
-unsigned int initialize_relocation_hashtable(unsigned int num_relocations)
+static unsigned int
+initialize_relocation_hashtable(unsigned int num_relocations,
+				struct hlist_head **relocation_hashtable)
 {
 	/* Can safely assume that bits is not greater than sizeof(long) */
 	unsigned long hashtable_size = roundup_pow_of_two(num_relocations);
@@ -720,12 +759,13 @@  unsigned int initialize_relocation_hashtable(unsigned int num_relocations)
 
 	hashtable_size <<= should_double_size;
 
-	relocation_hashtable = kmalloc_array(hashtable_size,
-					     sizeof(*relocation_hashtable),
-					     GFP_KERNEL);
-	__hash_init(relocation_hashtable, hashtable_size);
+	*relocation_hashtable = kmalloc_array(hashtable_size,
+					      sizeof(*relocation_hashtable),
+					      GFP_KERNEL);
+	if (!*relocation_hashtable)
+		return -ENOMEM;
 
-	INIT_LIST_HEAD(&used_buckets_list);
+	__hash_init(*relocation_hashtable, hashtable_size);
 
 	return hashtable_bits;
 }
@@ -742,7 +782,17 @@  int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
 	Elf_Addr v;
 	int res;
 	unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
-	unsigned int hashtable_bits = initialize_relocation_hashtable(num_relocations);
+	struct hlist_head *relocation_hashtable;
+	struct list_head used_buckets_list;
+	unsigned int hashtable_bits;
+
+	hashtable_bits = initialize_relocation_hashtable(num_relocations,
+							 &relocation_hashtable);
+
+	if (hashtable_bits < 0)
+		return hashtable_bits;
+
+	INIT_LIST_HEAD(&used_buckets_list);
 
 	pr_debug("Applying relocate section %u to %u\n", relsec,
 	       sechdrs[relsec].sh_info);
@@ -823,14 +873,18 @@  int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
 		}
 
 		if (reloc_handlers[type].accumulate_handler)
-			res = add_relocation_to_accumulate(me, type, location, hashtable_bits, v);
+			res = add_relocation_to_accumulate(me, type, location,
+							   hashtable_bits, v,
+							   relocation_hashtable,
+							   &used_buckets_list);
 		else
 			res = handler(me, location, v);
 		if (res)
 			return res;
 	}
 
-	process_accumulated_relocations(me);
+	process_accumulated_relocations(me, &relocation_hashtable,
+					&used_buckets_list);
 
 	return 0;
 }