diff mbox series

[RFC,2/2] elevator: restore the old io scheduler if failed to switch to the new one

Message ID 29281ffcdd756bdbdfcee8769cd8b2eb867b74e2.1668772991.git.nickyc975@zju.edu.cn (mailing list archive)
State New, archived
Headers show
Series elevator: restore old io scheduler on failure in elevator_switch | expand

Commit Message

Jinlong Chen Nov. 18, 2022, 12:09 p.m. UTC
If we failed to switch to the new io scheduler, we should try to restore
the old one instead of just switching to none.

This also makes elevator_switch match its document.

Signed-off-by: Jinlong Chen <nickyc975@zju.edu.cn>
---
 block/elevator.c | 29 +++++++++++++++++++++++++----
 1 file changed, 25 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/block/elevator.c b/block/elevator.c
index 517857a9a68f..b7bd0b8468bd 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -672,6 +672,7 @@  static int __elevator_apply(struct request_queue *q, struct elevator_type *e)
  */
 int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 {
+	struct elevator_type *old_e = NULL;
 	int ret;
 
 	lockdep_assert_held(&q->sysfs_lock);
@@ -680,17 +681,37 @@  int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 	blk_mq_quiesce_queue(q);
 
 	if (q->elevator) {
+		old_e = q->elevator->type;
+		/*
+		 * Keep a reference so we can fallback on failure.
+		 */
+		__elevator_get(old_e);
 		elv_unregister_queue(q);
 		elevator_exit(q);
 	}
 
 	ret = __elevator_apply(q, new_e);
-	if (ret)
-		goto out_unfreeze;
+	if (likely(!ret)) {
+		blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
+	} else if (old_e) {
+		int err;
+
+		err = __elevator_apply(q, old_e);
+		if (unlikely(err)) {
+			blk_add_trace_msg(q,
+				"elv switch failed: %s (%d), fallback failed: %s (%d)",
+				new_e->elevator_name, ret, old_e->elevator_name, err
+			);
+		}
+	}
 
-	blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
+	if (old_e) {
+		/*
+		 * Done, release the reference we kept.
+		 */
+		elevator_put(old_e);
+	}
 
-out_unfreeze:
 	blk_mq_unquiesce_queue(q);
 	blk_mq_unfreeze_queue(q);
 	return ret;