@@ -651,3 +651,56 @@ void rnbd_clt_destroy_sysfs_files(void)
device_destroy(rnbd_dev_class, MKDEV(0, 0));
class_destroy(rnbd_dev_class);
}
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+void rnbd_clt_fault_inject_init(struct rnbd_clt_fault_inject *fault_inject,
+ const char *dev_name)
+{
+ rnbd_fault_inject_init(&fault_inject->fj, dev_name, -EBUSY);
+ /* injection points */
+ rnbd_fault_inject_add(fault_inject->fj.dir,
+ "fail-request", &fault_inject->fail_request);
+ rnbd_fault_inject_add(fault_inject->fj.dir,
+ "fail-unmap", &fault_inject->fail_unmap);
+}
+
+void rnbd_clt_fault_inject_final(struct rnbd_clt_fault_inject *fault_inject)
+{
+ rnbd_fault_inject_final(&fault_inject->fj);
+}
+
+int rnbd_clt_should_fail_request(struct request *req)
+{
+ struct rnbd_clt_dev *dev = req->rq_disk->private_data;
+ struct rnbd_clt_fault_inject *fault_inject = &dev->fault_inject;
+
+ if (fault_inject->fail_request && should_fail(&fault_inject->fj.attr, 1))
+ return fault_inject->fj.status;
+ return 0;
+}
+
+int rnbd_clt_should_fail_unmap(struct rnbd_clt_dev *dev)
+{
+ struct rnbd_clt_fault_inject *fault_inject = &dev->fault_inject;
+
+ if (fault_inject->fail_unmap && should_fail(&fault_inject->fj.attr, 1))
+ return fault_inject->fj.status;
+ return 0;
+}
+#else
+void rnbd_clt_fault_inject_init(struct rnbd_clt_fault_inject *fault_inj,
+ const char *dev_name)
+{
+}
+void rnbd_clt_fault_inject_final(struct rnbd_clt_fault_inject *fault_inject)
+{
+}
+int rnbd_clt_should_fail_request(struct request *req)
+{
+ return 0;
+}
+int rnbd_clt_should_fail_unmap(struct rnbd_clt_dev *dev)
+{
+ return 0;
+}
+#endif
@@ -411,6 +411,11 @@ static void msg_io_conf(void *priv, int errno)
struct rnbd_clt_dev *dev = iu->dev;
struct request *rq = iu->rq;
int rw = rq_data_dir(rq);
+ int fail_err = 0;
+
+ fail_err = rnbd_clt_should_fail_request(rq);
+ if (unlikely(fail_err)) /* over-write error */
+ errno = fail_err;
iu->errno = errno;
@@ -1161,6 +1166,7 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
}
blk_mq_start_request(rq);
+
err = rnbd_client_xfer_request(dev, rq, iu);
if (likely(err == 0))
return BLK_STS_OK;
@@ -1545,6 +1551,8 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
goto send_close;
}
+ rnbd_clt_fault_inject_init(&dev->fault_inject, dev->gd->disk_name);
+
rnbd_clt_info(dev,
"map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d, wc: %d, fua: %d)\n",
dev->gd->disk_name, dev->nsectors,
@@ -1599,8 +1607,16 @@ int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
struct rnbd_clt_session *sess = dev->sess;
int refcount, ret = 0;
bool was_mapped;
+ int fail_err = 0;
mutex_lock(&dev->lock);
+
+ fail_err = rnbd_clt_should_fail_unmap(dev);
+ if (unlikely(fail_err)) {
+ ret = fail_err;
+ goto err;
+ }
+
if (dev->dev_state == DEV_STATE_UNMAPPED) {
rnbd_clt_info(dev, "Device is already being unmapped\n");
ret = -EALREADY;
@@ -1618,6 +1634,7 @@ int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
dev->dev_state = DEV_STATE_UNMAPPED;
mutex_unlock(&dev->lock);
+ rnbd_clt_fault_inject_final(&dev->fault_inject);
delete_dev(dev);
destroy_sysfs(dev, sysfs_self);
destroy_gen_disk(dev);
@@ -107,6 +107,14 @@ struct rnbd_queue {
struct blk_mq_hw_ctx *hctx;
};
+struct rnbd_clt_fault_inject {
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+ struct rnbd_fault_inject fj;
+ bool fail_unmap;
+ bool fail_request;
+#endif
+};
+
struct rnbd_clt_dev {
struct rnbd_clt_session *sess;
struct request_queue *queue;
@@ -139,6 +147,7 @@ struct rnbd_clt_dev {
char *blk_symlink_name;
refcount_t refcount;
struct work_struct unmap_on_rmmod_work;
+ struct rnbd_clt_fault_inject fault_inject;
};
/* rnbd-clt.c */
@@ -163,4 +172,10 @@ void rnbd_clt_destroy_default_group(void);
void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev);
+void rnbd_clt_fault_inject_init(struct rnbd_clt_fault_inject *fault_inj,
+ const char *dev_name);
+void rnbd_clt_fault_inject_final(struct rnbd_clt_fault_inject *fault_inject);
+int rnbd_clt_should_fail_request(struct request *req);
+int rnbd_clt_should_fail_unmap(struct rnbd_clt_dev *dev);
+int rnbd_clt_should_fail_request_timeout(struct request *req);
#endif /* RNBD_CLT_H */