diff mbox series

[v4,03/25] notifier: Add atomic/blocking_notifier_has_unique_priority()

Message ID 20211126180101.27818-4-digetx@gmail.com (mailing list archive)
State New, archived
Headers show
Series Introduce power-off+restart call chain API | expand

Commit Message

Dmitry Osipenko Nov. 26, 2021, 6 p.m. UTC
Add atomic/blocking_notifier_has_unique_priority() helpers which return
true if given handler has unique priority.

Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
---
 include/linux/notifier.h |  5 +++
 kernel/notifier.c        | 69 ++++++++++++++++++++++++++++++++++++++++
 2 files changed, 74 insertions(+)

Comments

Rafael J. Wysocki Dec. 10, 2021, 6:19 p.m. UTC | #1
On Fri, Nov 26, 2021 at 7:02 PM Dmitry Osipenko <digetx@gmail.com> wrote:
>
> Add atomic/blocking_notifier_has_unique_priority() helpers which return
> true if given handler has unique priority.
>
> Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
> ---
>  include/linux/notifier.h |  5 +++
>  kernel/notifier.c        | 69 ++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 74 insertions(+)
>
> diff --git a/include/linux/notifier.h b/include/linux/notifier.h
> index 924c9d7c8e73..2c4036f225e1 100644
> --- a/include/linux/notifier.h
> +++ b/include/linux/notifier.h
> @@ -175,6 +175,11 @@ int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
>
>  bool blocking_notifier_call_chain_is_empty(struct blocking_notifier_head *nh);
>
> +bool atomic_notifier_has_unique_priority(struct atomic_notifier_head *nh,
> +               struct notifier_block *nb);
> +bool blocking_notifier_has_unique_priority(struct blocking_notifier_head *nh,
> +               struct notifier_block *nb);
> +
>  #define NOTIFY_DONE            0x0000          /* Don't care */
>  #define NOTIFY_OK              0x0001          /* Suits me */
>  #define NOTIFY_STOP_MASK       0x8000          /* Don't call further */
> diff --git a/kernel/notifier.c b/kernel/notifier.c
> index b20cb7b9b1f0..7a325b742104 100644
> --- a/kernel/notifier.c
> +++ b/kernel/notifier.c
> @@ -122,6 +122,19 @@ static int notifier_call_chain_robust(struct notifier_block **nl,
>         return ret;
>  }
>
> +static int notifier_has_unique_priority(struct notifier_block **nl,
> +                                       struct notifier_block *n)
> +{
> +       while (*nl && (*nl)->priority >= n->priority) {
> +               if ((*nl)->priority == n->priority && *nl != n)
> +                       return false;
> +
> +               nl = &((*nl)->next);
> +       }
> +
> +       return true;
> +}
> +
>  /*
>   *     Atomic notifier chain routines.  Registration and unregistration
>   *     use a spinlock, and call_chain is synchronized by RCU (no locks).
> @@ -203,6 +216,30 @@ int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
>  EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
>  NOKPROBE_SYMBOL(atomic_notifier_call_chain);
>
> +/**
> + *     atomic_notifier_has_unique_priority - Checks whether notifier's priority is unique
> + *     @nh: Pointer to head of the atomic notifier chain
> + *     @n: Entry in notifier chain to check
> + *
> + *     Checks whether there is another notifier in the chain with the same priority.
> + *     Must be called in process context.
> + *
> + *     Returns true if priority is unique, false otherwise.
> + */
> +bool atomic_notifier_has_unique_priority(struct atomic_notifier_head *nh,
> +               struct notifier_block *n)
> +{
> +       unsigned long flags;
> +       bool ret;
> +
> +       spin_lock_irqsave(&nh->lock, flags);
> +       ret = notifier_has_unique_priority(&nh->head, n);
> +       spin_unlock_irqrestore(&nh->lock, flags);

This only works if the caller can prevent new entries from being added
to the list at this point or if the caller knows that they cannot be
added for some reason, but the kerneldoc doesn't mention this
limitation.

> +
> +       return ret;
> +}
> +EXPORT_SYMBOL_GPL(atomic_notifier_has_unique_priority);
> +
>  /*
>   *     Blocking notifier chain routines.  All access to the chain is
>   *     synchronized by an rwsem.
> @@ -336,6 +373,38 @@ bool blocking_notifier_call_chain_is_empty(struct blocking_notifier_head *nh)
>  }
>  EXPORT_SYMBOL_GPL(blocking_notifier_call_chain_is_empty);
>
> +/**
> + *     blocking_notifier_has_unique_priority - Checks whether notifier's priority is unique
> + *     @nh: Pointer to head of the blocking notifier chain
> + *     @n: Entry in notifier chain to check
> + *
> + *     Checks whether there is another notifier in the chain with the same priority.
> + *     Must be called in process context.
> + *
> + *     Returns true if priority is unique, false otherwise.
> + */
> +bool blocking_notifier_has_unique_priority(struct blocking_notifier_head *nh,
> +               struct notifier_block *n)
> +{
> +       bool ret;
> +
> +       /*
> +        * This code gets used during boot-up, when task switching is
> +        * not yet working and interrupts must remain disabled. At such
> +        * times we must not call down_read().
> +        */
> +       if (system_state != SYSTEM_BOOTING)

No, please don't do this, it makes the whole thing error-prone.

> +               down_read(&nh->rwsem);
> +
> +       ret = notifier_has_unique_priority(&nh->head, n);
> +
> +       if (system_state != SYSTEM_BOOTING)
> +               up_read(&nh->rwsem);

And still what if a new entry with a non-unique priority is added to
the chain at this point?

> +
> +       return ret;
> +}
> +EXPORT_SYMBOL_GPL(blocking_notifier_has_unique_priority);
> +
>  /*
>   *     Raw notifier chain routines.  There is no protection;
>   *     the caller must provide it.  Use at your own risk!
> --
> 2.33.1
>
Dmitry Osipenko Dec. 10, 2021, 6:52 p.m. UTC | #2
10.12.2021 21:19, Rafael J. Wysocki пишет:
...
>> +bool atomic_notifier_has_unique_priority(struct atomic_notifier_head *nh,
>> +               struct notifier_block *n)
>> +{
>> +       unsigned long flags;
>> +       bool ret;
>> +
>> +       spin_lock_irqsave(&nh->lock, flags);
>> +       ret = notifier_has_unique_priority(&nh->head, n);
>> +       spin_unlock_irqrestore(&nh->lock, flags);
> 
> This only works if the caller can prevent new entries from being added
> to the list at this point or if the caller knows that they cannot be
> added for some reason, but the kerneldoc doesn't mention this
> limitation.

I'll update the comment.

..
>> +bool blocking_notifier_has_unique_priority(struct blocking_notifier_head *nh,
>> +               struct notifier_block *n)
>> +{
>> +       bool ret;
>> +
>> +       /*
>> +        * This code gets used during boot-up, when task switching is
>> +        * not yet working and interrupts must remain disabled. At such
>> +        * times we must not call down_read().
>> +        */
>> +       if (system_state != SYSTEM_BOOTING)
> 
> No, please don't do this, it makes the whole thing error-prone.

What should I do then?

>> +               down_read(&nh->rwsem);
>> +
>> +       ret = notifier_has_unique_priority(&nh->head, n);
>> +
>> +       if (system_state != SYSTEM_BOOTING)
>> +               up_read(&nh->rwsem);
> 
> And still what if a new entry with a non-unique priority is added to
> the chain at this point?

If entry with a non-unique priority is added after the check, then
obviously it won't be detected. I don't understand the question. These
down/up_read() are the locks that prevent the race, if that's the question.
Rafael J. Wysocki Dec. 10, 2021, 7:05 p.m. UTC | #3
On Fri, Dec 10, 2021 at 7:52 PM Dmitry Osipenko <digetx@gmail.com> wrote:
>
> 10.12.2021 21:19, Rafael J. Wysocki пишет:
> ...
> >> +bool atomic_notifier_has_unique_priority(struct atomic_notifier_head *nh,
> >> +               struct notifier_block *n)
> >> +{
> >> +       unsigned long flags;
> >> +       bool ret;
> >> +
> >> +       spin_lock_irqsave(&nh->lock, flags);
> >> +       ret = notifier_has_unique_priority(&nh->head, n);
> >> +       spin_unlock_irqrestore(&nh->lock, flags);
> >
> > This only works if the caller can prevent new entries from being added
> > to the list at this point or if the caller knows that they cannot be
> > added for some reason, but the kerneldoc doesn't mention this
> > limitation.
>
> I'll update the comment.
>
> ..
> >> +bool blocking_notifier_has_unique_priority(struct blocking_notifier_head *nh,
> >> +               struct notifier_block *n)
> >> +{
> >> +       bool ret;
> >> +
> >> +       /*
> >> +        * This code gets used during boot-up, when task switching is
> >> +        * not yet working and interrupts must remain disabled. At such
> >> +        * times we must not call down_read().
> >> +        */
> >> +       if (system_state != SYSTEM_BOOTING)
> >
> > No, please don't do this, it makes the whole thing error-prone.
>
> What should I do then?

First of all, do you know of any users who may want to call this
during early initialization?  If so, then why may they want to do
that?

Depending on the above, I would consider adding a special mechanism for them.

> >> +               down_read(&nh->rwsem);
> >> +
> >> +       ret = notifier_has_unique_priority(&nh->head, n);
> >> +
> >> +       if (system_state != SYSTEM_BOOTING)
> >> +               up_read(&nh->rwsem);
> >
> > And still what if a new entry with a non-unique priority is added to
> > the chain at this point?
>
> If entry with a non-unique priority is added after the check, then
> obviously it won't be detected.

Why isn't this a problem?

> I don't understand the question. These
> down/up_read() are the locks that prevent the race, if that's the question.

Not really, they only prevent the race from occurring while
notifier_has_unique_priority() is running.

If anyone depends on this check for correctness, they need to lock the
rwsem, do the check, do the thing depending on the check while holding
the rwsem and then release the rwsem.  Otherwise it is racy.
Dmitry Osipenko Dec. 10, 2021, 7:33 p.m. UTC | #4
10.12.2021 22:05, Rafael J. Wysocki пишет:
> On Fri, Dec 10, 2021 at 7:52 PM Dmitry Osipenko <digetx@gmail.com> wrote:
>>
>> 10.12.2021 21:19, Rafael J. Wysocki пишет:
>> ...
>>>> +bool atomic_notifier_has_unique_priority(struct atomic_notifier_head *nh,
>>>> +               struct notifier_block *n)
>>>> +{
>>>> +       unsigned long flags;
>>>> +       bool ret;
>>>> +
>>>> +       spin_lock_irqsave(&nh->lock, flags);
>>>> +       ret = notifier_has_unique_priority(&nh->head, n);
>>>> +       spin_unlock_irqrestore(&nh->lock, flags);
>>>
>>> This only works if the caller can prevent new entries from being added
>>> to the list at this point or if the caller knows that they cannot be
>>> added for some reason, but the kerneldoc doesn't mention this
>>> limitation.
>>
>> I'll update the comment.
>>
>> ..
>>>> +bool blocking_notifier_has_unique_priority(struct blocking_notifier_head *nh,
>>>> +               struct notifier_block *n)
>>>> +{
>>>> +       bool ret;
>>>> +
>>>> +       /*
>>>> +        * This code gets used during boot-up, when task switching is
>>>> +        * not yet working and interrupts must remain disabled. At such
>>>> +        * times we must not call down_read().
>>>> +        */
>>>> +       if (system_state != SYSTEM_BOOTING)
>>>
>>> No, please don't do this, it makes the whole thing error-prone.
>>
>> What should I do then?
> 
> First of all, do you know of any users who may want to call this
> during early initialization?  If so, then why may they want to do
> that?

I'll need to carefully review all those dozens of platform restart
handlers to answer this question.

> Depending on the above, I would consider adding a special mechanism for them.

Please notice that every blocking_notifier_*() function has this
SYSTEM_BOOTING check, it's not my invention. Notifier API needs to be
generic.

>>>> +               down_read(&nh->rwsem);
>>>> +
>>>> +       ret = notifier_has_unique_priority(&nh->head, n);
>>>> +
>>>> +       if (system_state != SYSTEM_BOOTING)
>>>> +               up_read(&nh->rwsem);
>>>
>>> And still what if a new entry with a non-unique priority is added to
>>> the chain at this point?
>>
>> If entry with a non-unique priority is added after the check, then
>> obviously it won't be detected.
> 
> Why isn't this a problem?>> I don't understand the question. These
>> down/up_read() are the locks that prevent the race, if that's the question.
> 
> Not really, they only prevent the race from occurring while
> notifier_has_unique_priority() is running.
> 
> If anyone depends on this check for correctness, they need to lock the
> rwsem, do the check, do the thing depending on the check while holding
> the rwsem and then release the rwsem.  Otherwise it is racy.
> 

It's fine that it's a bit "racy" since in the context of this series. We
always do the check after adding new entry, so it's not a problem.

There are two options:

1. Use blocking_notifier_has_unique_priority() like it's done in this
patchset. Remove it after all drivers are converted to the new API and
add blocking_notifier_chain_register_unique().

2. Add blocking_notifier_chain_register_unique(), but don't let it fail
the registration of non-unique entries until all drivers are converted
to the new API.
Dmitry Osipenko Dec. 10, 2021, 8:16 p.m. UTC | #5
10.12.2021 22:33, Dmitry Osipenko пишет:
>> Not really, they only prevent the race from occurring while
>> notifier_has_unique_priority() is running.
>>
>> If anyone depends on this check for correctness, they need to lock the
>> rwsem, do the check, do the thing depending on the check while holding
>> the rwsem and then release the rwsem.  Otherwise it is racy.
>>
> It's fine that it's a bit "racy" since in the context of this series. We
> always do the check after adding new entry, so it's not a problem.
> 
> There are two options:
> 
> 1. Use blocking_notifier_has_unique_priority() like it's done in this
> patchset. Remove it after all drivers are converted to the new API and
> add blocking_notifier_chain_register_unique().
> 
> 2. Add blocking_notifier_chain_register_unique(), but don't let it fail
> the registration of non-unique entries until all drivers are converted
> to the new API.

There is third, perhaps the best option:

3. Add blocking_notifier_chain_register_unique() and fall back to
blocking_notifier_chain_register() if unique fails, do it until all
drivers are converted to the new API.
diff mbox series

Patch

diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 924c9d7c8e73..2c4036f225e1 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -175,6 +175,11 @@  int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
 
 bool blocking_notifier_call_chain_is_empty(struct blocking_notifier_head *nh);
 
+bool atomic_notifier_has_unique_priority(struct atomic_notifier_head *nh,
+		struct notifier_block *nb);
+bool blocking_notifier_has_unique_priority(struct blocking_notifier_head *nh,
+		struct notifier_block *nb);
+
 #define NOTIFY_DONE		0x0000		/* Don't care */
 #define NOTIFY_OK		0x0001		/* Suits me */
 #define NOTIFY_STOP_MASK	0x8000		/* Don't call further */
diff --git a/kernel/notifier.c b/kernel/notifier.c
index b20cb7b9b1f0..7a325b742104 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -122,6 +122,19 @@  static int notifier_call_chain_robust(struct notifier_block **nl,
 	return ret;
 }
 
+static int notifier_has_unique_priority(struct notifier_block **nl,
+					struct notifier_block *n)
+{
+	while (*nl && (*nl)->priority >= n->priority) {
+		if ((*nl)->priority == n->priority && *nl != n)
+			return false;
+
+		nl = &((*nl)->next);
+	}
+
+	return true;
+}
+
 /*
  *	Atomic notifier chain routines.  Registration and unregistration
  *	use a spinlock, and call_chain is synchronized by RCU (no locks).
@@ -203,6 +216,30 @@  int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
 NOKPROBE_SYMBOL(atomic_notifier_call_chain);
 
+/**
+ *	atomic_notifier_has_unique_priority - Checks whether notifier's priority is unique
+ *	@nh: Pointer to head of the atomic notifier chain
+ *	@n: Entry in notifier chain to check
+ *
+ *	Checks whether there is another notifier in the chain with the same priority.
+ *	Must be called in process context.
+ *
+ *	Returns true if priority is unique, false otherwise.
+ */
+bool atomic_notifier_has_unique_priority(struct atomic_notifier_head *nh,
+		struct notifier_block *n)
+{
+	unsigned long flags;
+	bool ret;
+
+	spin_lock_irqsave(&nh->lock, flags);
+	ret = notifier_has_unique_priority(&nh->head, n);
+	spin_unlock_irqrestore(&nh->lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(atomic_notifier_has_unique_priority);
+
 /*
  *	Blocking notifier chain routines.  All access to the chain is
  *	synchronized by an rwsem.
@@ -336,6 +373,38 @@  bool blocking_notifier_call_chain_is_empty(struct blocking_notifier_head *nh)
 }
 EXPORT_SYMBOL_GPL(blocking_notifier_call_chain_is_empty);
 
+/**
+ *	blocking_notifier_has_unique_priority - Checks whether notifier's priority is unique
+ *	@nh: Pointer to head of the blocking notifier chain
+ *	@n: Entry in notifier chain to check
+ *
+ *	Checks whether there is another notifier in the chain with the same priority.
+ *	Must be called in process context.
+ *
+ *	Returns true if priority is unique, false otherwise.
+ */
+bool blocking_notifier_has_unique_priority(struct blocking_notifier_head *nh,
+		struct notifier_block *n)
+{
+	bool ret;
+
+	/*
+	 * This code gets used during boot-up, when task switching is
+	 * not yet working and interrupts must remain disabled. At such
+	 * times we must not call down_read().
+	 */
+	if (system_state != SYSTEM_BOOTING)
+		down_read(&nh->rwsem);
+
+	ret = notifier_has_unique_priority(&nh->head, n);
+
+	if (system_state != SYSTEM_BOOTING)
+		up_read(&nh->rwsem);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(blocking_notifier_has_unique_priority);
+
 /*
  *	Raw notifier chain routines.  There is no protection;
  *	the caller must provide it.  Use at your own risk!