diff mbox series

exec: make de_thread alloc new signal struct earlier

Message ID AM6PR03MB5170B44BAD4B9402F29B8B58E4E10@AM6PR03MB5170.eurprd03.prod.outlook.com (mailing list archive)
State New, archived
Headers show
Series exec: make de_thread alloc new signal struct earlier | expand

Commit Message

Bernd Edlinger March 8, 2020, 12:58 p.m. UTC
It was pointed out that de_thread may return -ENOMEM
when it already terminated threads, and returning
an error from execve, except when a fatal signal is
being delivered is not an option any more.

Allocate the memory for the signal table earlier,
and make sure that -ENOMEM is returned before the
unrecoverable actions are started.

Signed-off-by: Bernd Edlinger <bernd.edlinger@hotmail.de>
---
Eric, what do you think, might this be helpful
to move the "point of no return" lower, and simplify
your patch?

 fs/exec.c | 31 +++++++++++++++++++++++--------
 1 file changed, 23 insertions(+), 8 deletions(-)

Comments

Eric W. Biederman March 8, 2020, 6:12 p.m. UTC | #1
Bernd Edlinger <bernd.edlinger@hotmail.de> writes:

> It was pointed out that de_thread may return -ENOMEM
> when it already terminated threads, and returning
> an error from execve, except when a fatal signal is
> being delivered is not an option any more.
>
> Allocate the memory for the signal table earlier,
> and make sure that -ENOMEM is returned before the
> unrecoverable actions are started.
>
> Signed-off-by: Bernd Edlinger <bernd.edlinger@hotmail.de>
> ---
> Eric, what do you think, might this be helpful
> to move the "point of no return" lower, and simplify
> your patch?

Good thinking but no.  In this case it is possible to move the entire
allocation lower.  As well as the posix timer cleanup.  That code is
actually much clearer outside of de_thread.  I will post a patch in that
direction in a moment.

It is something of a bad idea to move the new sighand allocation sooner
because in practice it does not happen.  It only exists to support the
CLONE_VM | CLONE_SIGHAND without CLONE_SIGNAL case which is not used
by the modern posix thread libraries.

There are just enough old executables floating out there that I don't
think we can remove the CLONE_SIGHAND case in general but I keep
dreaming about it.  We get a lot of complexity in the code to support
something that no one really does anymore.

Eric

>  fs/exec.c | 31 +++++++++++++++++++++++--------
>  1 file changed, 23 insertions(+), 8 deletions(-)
>
> diff --git a/fs/exec.c b/fs/exec.c
> index 74d88da..a0328dc 100644
> --- a/fs/exec.c
> +++ b/fs/exec.c
> @@ -1057,16 +1057,26 @@ static int exec_mmap(struct mm_struct *mm)
>   * disturbing other processes.  (Other processes might share the signal
>   * table via the CLONE_SIGHAND option to clone().)
>   */
> -static int de_thread(struct task_struct *tsk)
> +static int de_thread(void)
>  {
> +	struct task_struct *tsk = current;
>  	struct signal_struct *sig = tsk->signal;
>  	struct sighand_struct *oldsighand = tsk->sighand;
>  	spinlock_t *lock = &oldsighand->siglock;
> +	struct sighand_struct *newsighand = NULL;
>  
>  	if (thread_group_empty(tsk))
>  		goto no_thread_group;
>  
>  	/*
> +	 * This is the last time for an out of memory error.
> +	 * After this point only fatal signals are are okay.
> +	 */
> +	newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
> +	if (!newsighand)
> +		return -ENOMEM;
> +
> +	/*
>  	 * Kill all other threads in the thread group.
>  	 */
>  	spin_lock_irq(lock);
> @@ -1076,7 +1086,7 @@ static int de_thread(struct task_struct *tsk)
>  		 * return so that the signal is processed.
>  		 */
>  		spin_unlock_irq(lock);
> -		return -EAGAIN;
> +		goto err_free;
>  	}
>  
>  	sig->group_exit_task = tsk;
> @@ -1191,14 +1201,16 @@ static int de_thread(struct task_struct *tsk)
>  #endif
>  
>  	if (refcount_read(&oldsighand->count) != 1) {
> -		struct sighand_struct *newsighand;
>  		/*
>  		 * This ->sighand is shared with the CLONE_SIGHAND
>  		 * but not CLONE_THREAD task, switch to the new one.
>  		 */
> -		newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
> -		if (!newsighand)
> -			return -ENOMEM;
> +		if (!newsighand) {
> +			newsighand = kmem_cache_alloc(sighand_cachep,
> +						      GFP_KERNEL);
> +			if (!newsighand)
> +				return -ENOMEM;
> +		}
>  
>  		refcount_set(&newsighand->count, 1);
>  		memcpy(newsighand->action, oldsighand->action,
> @@ -1211,7 +1223,8 @@ static int de_thread(struct task_struct *tsk)
>  		write_unlock_irq(&tasklist_lock);
>  
>  		__cleanup_sighand(oldsighand);
> -	}
> +	} else if (newsighand)
> +		kmem_cache_free(sighand_cachep, newsighand);
>  
>  	BUG_ON(!thread_group_leader(tsk));
>  	return 0;
> @@ -1222,6 +1235,8 @@ static int de_thread(struct task_struct *tsk)
>  	sig->group_exit_task = NULL;
>  	sig->notify_count = 0;
>  	read_unlock(&tasklist_lock);
> +err_free:
> +	kmem_cache_free(sighand_cachep, newsighand);
>  	return -EAGAIN;
>  }
>  
> @@ -1262,7 +1277,7 @@ int flush_old_exec(struct linux_binprm * bprm)
>  	 * Make sure we have a private signal table and that
>  	 * we are unassociated from the previous thread group.
>  	 */
> -	retval = de_thread(current);
> +	retval = de_thread();
>  	if (retval)
>  		goto out;
diff mbox series

Patch

diff --git a/fs/exec.c b/fs/exec.c
index 74d88da..a0328dc 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1057,16 +1057,26 @@  static int exec_mmap(struct mm_struct *mm)
  * disturbing other processes.  (Other processes might share the signal
  * table via the CLONE_SIGHAND option to clone().)
  */
-static int de_thread(struct task_struct *tsk)
+static int de_thread(void)
 {
+	struct task_struct *tsk = current;
 	struct signal_struct *sig = tsk->signal;
 	struct sighand_struct *oldsighand = tsk->sighand;
 	spinlock_t *lock = &oldsighand->siglock;
+	struct sighand_struct *newsighand = NULL;
 
 	if (thread_group_empty(tsk))
 		goto no_thread_group;
 
 	/*
+	 * This is the last time for an out of memory error.
+	 * After this point only fatal signals are are okay.
+	 */
+	newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
+	if (!newsighand)
+		return -ENOMEM;
+
+	/*
 	 * Kill all other threads in the thread group.
 	 */
 	spin_lock_irq(lock);
@@ -1076,7 +1086,7 @@  static int de_thread(struct task_struct *tsk)
 		 * return so that the signal is processed.
 		 */
 		spin_unlock_irq(lock);
-		return -EAGAIN;
+		goto err_free;
 	}
 
 	sig->group_exit_task = tsk;
@@ -1191,14 +1201,16 @@  static int de_thread(struct task_struct *tsk)
 #endif
 
 	if (refcount_read(&oldsighand->count) != 1) {
-		struct sighand_struct *newsighand;
 		/*
 		 * This ->sighand is shared with the CLONE_SIGHAND
 		 * but not CLONE_THREAD task, switch to the new one.
 		 */
-		newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
-		if (!newsighand)
-			return -ENOMEM;
+		if (!newsighand) {
+			newsighand = kmem_cache_alloc(sighand_cachep,
+						      GFP_KERNEL);
+			if (!newsighand)
+				return -ENOMEM;
+		}
 
 		refcount_set(&newsighand->count, 1);
 		memcpy(newsighand->action, oldsighand->action,
@@ -1211,7 +1223,8 @@  static int de_thread(struct task_struct *tsk)
 		write_unlock_irq(&tasklist_lock);
 
 		__cleanup_sighand(oldsighand);
-	}
+	} else if (newsighand)
+		kmem_cache_free(sighand_cachep, newsighand);
 
 	BUG_ON(!thread_group_leader(tsk));
 	return 0;
@@ -1222,6 +1235,8 @@  static int de_thread(struct task_struct *tsk)
 	sig->group_exit_task = NULL;
 	sig->notify_count = 0;
 	read_unlock(&tasklist_lock);
+err_free:
+	kmem_cache_free(sighand_cachep, newsighand);
 	return -EAGAIN;
 }
 
@@ -1262,7 +1277,7 @@  int flush_old_exec(struct linux_binprm * bprm)
 	 * Make sure we have a private signal table and that
 	 * we are unassociated from the previous thread group.
 	 */
-	retval = de_thread(current);
+	retval = de_thread();
 	if (retval)
 		goto out;