@@ -15,11 +15,14 @@
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/pm_runtime.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sizes.h>
#include <linux/types.h>
+#define INTEL_DG_NVM_RPM_TIMEOUT 500
+
struct intel_dg_nvm {
struct kref refcnt;
struct mtd_info mtd;
@@ -460,6 +463,7 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
loff_t from;
size_t len;
size_t total_len;
+ int ret = 0;
if (WARN_ON(!nvm))
return -EINVAL;
@@ -474,20 +478,28 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
total_len = info->len;
addr = info->addr;
+ ret = pm_runtime_resume_and_get(mtd->dev.parent);
+ if (ret < 0) {
+ dev_err(&mtd->dev, "rpm: get failed %d\n", ret);
+ return ret;
+ }
+
guard(mutex)(&nvm->lock);
while (total_len > 0) {
if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(total_len, SZ_4K)) {
dev_err(&mtd->dev, "unaligned erase %llx %zx\n", addr, total_len);
info->fail_addr = addr;
- return -ERANGE;
+ ret = -ERANGE;
+ goto out;
}
idx = idg_nvm_get_region(nvm, addr);
if (idx >= nvm->nregions) {
dev_err(&mtd->dev, "out of range");
info->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
- return -ERANGE;
+ ret = -ERANGE;
+ goto out;
}
from = addr - nvm->regions[idx].offset;
@@ -503,14 +515,18 @@ static int intel_dg_mtd_erase(struct mtd_info *mtd, struct erase_info *info)
if (bytes < 0) {
dev_dbg(&mtd->dev, "erase failed with %zd\n", bytes);
info->fail_addr += nvm->regions[idx].offset;
- return bytes;
+ ret = bytes;
+ goto out;
}
addr += len;
total_len -= len;
}
- return 0;
+out:
+ pm_runtime_mark_last_busy(mtd->dev.parent);
+ pm_runtime_put_autosuspend(mtd->dev.parent);
+ return ret;
}
static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
@@ -539,17 +555,25 @@ static int intel_dg_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
if (len > nvm->regions[idx].size - from)
len = nvm->regions[idx].size - from;
+ ret = pm_runtime_resume_and_get(mtd->dev.parent);
+ if (ret < 0) {
+ dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
+ return ret;
+ }
+
guard(mutex)(&nvm->lock);
ret = idg_read(nvm, region, from, len, buf);
if (ret < 0) {
dev_dbg(&mtd->dev, "read failed with %zd\n", ret);
- return ret;
+ } else {
+ *retlen = ret;
+ ret = 0;
}
- *retlen = ret;
-
- return 0;
+ pm_runtime_mark_last_busy(mtd->dev.parent);
+ pm_runtime_put_autosuspend(mtd->dev.parent);
+ return ret;
}
static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
@@ -578,17 +602,25 @@ static int intel_dg_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
if (len > nvm->regions[idx].size - to)
len = nvm->regions[idx].size - to;
+ ret = pm_runtime_resume_and_get(mtd->dev.parent);
+ if (ret < 0) {
+ dev_err(&mtd->dev, "rpm: get failed %zd\n", ret);
+ return ret;
+ }
+
guard(mutex)(&nvm->lock);
ret = idg_write(nvm, region, to, len, buf);
if (ret < 0) {
dev_dbg(&mtd->dev, "write failed with %zd\n", ret);
- return ret;
+ } else {
+ *retlen = ret;
+ ret = 0;
}
- *retlen = ret;
-
- return 0;
+ pm_runtime_mark_last_busy(mtd->dev.parent);
+ pm_runtime_put_autosuspend(mtd->dev.parent);
+ return ret;
}
static void intel_dg_nvm_release(struct kref *kref)
@@ -720,6 +752,17 @@ static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
n++;
}
+ devm_pm_runtime_enable(device);
+
+ pm_runtime_set_autosuspend_delay(device, INTEL_DG_NVM_RPM_TIMEOUT);
+ pm_runtime_use_autosuspend(device);
+
+ ret = pm_runtime_resume_and_get(device);
+ if (ret < 0) {
+ dev_err(device, "rpm: get failed %d\n", ret);
+ goto err_norpm;
+ }
+
nvm->base = devm_ioremap_resource(device, &invm->bar);
if (IS_ERR(nvm->base)) {
dev_err(device, "mmio not mapped\n");
@@ -742,9 +785,12 @@ static int intel_dg_mtd_probe(struct auxiliary_device *aux_dev,
dev_set_drvdata(&aux_dev->dev, nvm);
+ pm_runtime_put(device);
return 0;
err:
+ pm_runtime_put(device);
+err_norpm:
kref_put(&nvm->refcnt, intel_dg_nvm_release);
return ret;
}