diff mbox series

block and io_uring: typo fixes

Message ID 20240908164723.36468-1-constantin.pestka@c-pestka.de (mailing list archive)
State New, archived
Headers show
Series block and io_uring: typo fixes | expand

Commit Message

CPestka Sept. 8, 2024, 4:47 p.m. UTC
Signed-off-by: Constantin Pestka <constantin.pestka@c-pestka.de>
---
 block/Kconfig.iosched         | 2 +-
 block/genhd.c                 | 8 ++++----
 include/uapi/linux/io_uring.h | 6 +++---
 io_uring/io_uring.c           | 8 ++++----
 io_uring/uring_cmd.c          | 4 ++--
 5 files changed, 14 insertions(+), 14 deletions(-)
diff mbox series

Patch

diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 27f11320b8d1..1ecd19f9506b 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -20,7 +20,7 @@  config IOSCHED_BFQ
 	tristate "BFQ I/O scheduler"
 	select BLK_ICQ
 	help
-	BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
+	BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth
 	of the device among all processes according to their weights,
 	regardless of the device parameters and with any workload. It
 	also guarantees a low latency to interactive and soft
diff --git a/block/genhd.c b/block/genhd.c
index 1c05dd4c6980..8c93fb977a59 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -352,7 +352,7 @@  int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode)
 
 	/*
 	 * If the device is opened exclusively by current thread already, it's
-	 * safe to scan partitons, otherwise, use bd_prepare_to_claim() to
+	 * safe to scan partitions, otherwise, use bd_prepare_to_claim() to
 	 * synchronize with other exclusive openers and other partition
 	 * scanners.
 	 */
@@ -374,7 +374,7 @@  int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode)
 	/*
 	 * If blkdev_get_by_dev() failed early, GD_NEED_PART_SCAN is still set,
 	 * and this will cause that re-assemble partitioned raid device will
-	 * creat partition for underlying disk.
+	 * create partition for underlying disk.
 	 */
 	clear_bit(GD_NEED_PART_SCAN, &disk->state);
 	if (!(mode & BLK_OPEN_EXCL))
@@ -607,7 +607,7 @@  static void __blk_mark_disk_dead(struct gendisk *disk)
  * blk_mark_disk_dead - mark a disk as dead
  * @disk: disk to mark as dead
  *
- * Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
+ * Mark a disk as dead (e.g. surprise removed) and don't accept any new I/O
  * to this disk.
  */
 void blk_mark_disk_dead(struct gendisk *disk)
@@ -732,7 +732,7 @@  EXPORT_SYMBOL(del_gendisk);
  * invalidate_disk - invalidate the disk
  * @disk: the struct gendisk to invalidate
  *
- * A helper to invalidates the disk. It will clean the disk's associated
+ * A helper to invalidate the disk. It will clean the disk's associated
  * buffer/page caches and reset its internal states so that the disk
  * can be reused by the drivers.
  *
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index a275f91d2ac0..69cbdb1df9d4 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -318,7 +318,7 @@  enum io_uring_op {
  * ASYNC_CANCEL flags.
  *
  * IORING_ASYNC_CANCEL_ALL	Cancel all requests that match the given key
- * IORING_ASYNC_CANCEL_FD	Key off 'fd' for cancelation rather than the
+ * IORING_ASYNC_CANCEL_FD	Key off 'fd' for cancellation rather than the
  *				request 'user_data'
  * IORING_ASYNC_CANCEL_ANY	Match any request
  * IORING_ASYNC_CANCEL_FD_FIXED	'fd' passed in is a fixed descriptor
@@ -361,7 +361,7 @@  enum io_uring_op {
  *				result 	will be the number of buffers send, with
  *				the starting buffer ID in cqe->flags as per
  *				usual for provided buffer usage. The buffers
- *				will be	contigious from the starting buffer ID.
+ *				will be	contiguous from the starting buffer ID.
  */
 #define IORING_RECVSEND_POLL_FIRST	(1U << 0)
 #define IORING_RECV_MULTISHOT		(1U << 1)
@@ -594,7 +594,7 @@  enum io_uring_register_op {
 	IORING_REGISTER_PBUF_RING		= 22,
 	IORING_UNREGISTER_PBUF_RING		= 23,
 
-	/* sync cancelation API */
+	/* sync cancellation API */
 	IORING_REGISTER_SYNC_CANCEL		= 24,
 
 	/* register a range of fixed file slots for automatic slot allocation */
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 1aca501efaf6..41e5f00d7f01 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1137,7 +1137,7 @@  static inline void io_req_local_work_add(struct io_kiocb *req,
 	BUILD_BUG_ON(IO_CQ_WAKE_FORCE <= IORING_MAX_CQ_ENTRIES);
 
 	/*
-	 * We don't know how many reuqests is there in the link and whether
+	 * We don't know how many requests is there in the link and whether
 	 * they can even be queued lazily, fall back to non-lazy.
 	 */
 	if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK))
@@ -1177,7 +1177,7 @@  static inline void io_req_local_work_add(struct io_kiocb *req,
 	 * in set_current_state() on the io_cqring_wait() side. It's used
 	 * to ensure that either we see updated ->cq_wait_nr, or waiters
 	 * going to sleep will observe the work added to the list, which
-	 * is similar to the wait/wawke task state sync.
+	 * is similar to the wait/wake task state sync.
 	 */
 
 	if (!head) {
@@ -2842,7 +2842,7 @@  static __cold void io_tctx_exit_cb(struct callback_head *cb)
 	 * When @in_cancel, we're in cancellation and it's racy to remove the
 	 * node. It'll be removed by the end of cancellation, just ignore it.
 	 * tctx can be NULL if the queueing of this task_work raced with
-	 * work cancelation off the exec path.
+	 * work cancellation off the exec path.
 	 */
 	if (tctx && !atomic_read(&tctx->in_cancel))
 		io_uring_del_tctx_node((unsigned long)work->ctx);
@@ -3141,7 +3141,7 @@  __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
 		if (!tctx_inflight(tctx, !cancel_all))
 			break;
 
-		/* read completions before cancelations */
+		/* read completions before cancellations */
 		inflight = tctx_inflight(tctx, false);
 		if (!inflight)
 			break;
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index 8391c7c7c1ec..b89623012d52 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -93,7 +93,7 @@  static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
 }
 
 /*
- * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
+ * Mark this command as cancellable, then io_uring_try_cancel_uring_cmd()
  * will try to cancel this issued command by sending ->uring_cmd() with
  * issue_flags of IO_URING_F_CANCEL.
  *
@@ -120,7 +120,7 @@  static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
 {
 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
 
-	/* task_work executor checks the deffered list completion */
+	/* task_work executor checks the deferred list completion */
 	ioucmd->task_work_cb(ioucmd, IO_URING_F_COMPLETE_DEFER);
 }