@@ -603,8 +603,9 @@ static int btt_freelist_init(struct arena_info *arena)
static bool ent_is_padding(struct log_entry *ent)
{
- return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
- && (ent->seq == 0);
+ return (ent->lba == 0) &&
+ (ent->old_map == 0) && (ent->new_map == 0) &&
+ (ent->seq == 0);
}
/*
@@ -1337,8 +1338,8 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
arena->freelist[lane].has_err = 1;
- if (mutex_is_locked(&arena->err_lock)
- || arena->freelist[lane].has_err) {
+ if (mutex_is_locked(&arena->err_lock) ||
+ arena->freelist[lane].has_err) {
nd_region_release_lane(btt->nd_region, lane);
ret = arena_clear_freelist_error(arena, lane);
@@ -189,8 +189,8 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
/* make sure we are in the region */
- if (ctx->phys < nd_region->ndr_start
- || (ctx->phys + ctx->cleared) > ndr_end)
+ if (ctx->phys < nd_region->ndr_start ||
+ (ctx->phys + ctx->cleared) > ndr_end)
return 0;
sector = (ctx->phys - nd_region->ndr_start) / 512;
@@ -274,8 +274,8 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
}
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
- if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
- && !(flags & NVDIMM_IO_ATOMIC)) {
+ if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512) &&
+ !(flags & NVDIMM_IO_ATOMIC)) {
long cleared;
might_sleep();
@@ -437,10 +437,11 @@ static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
if (a == &dev_attr_security.attr) {
/* Are there any state mutation ops (make writable)? */
- if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
- || nvdimm->sec.ops->change_key
- || nvdimm->sec.ops->erase
- || nvdimm->sec.ops->overwrite)
+ if (nvdimm->sec.ops->freeze ||
+ nvdimm->sec.ops->disable ||
+ nvdimm->sec.ops->change_key ||
+ nvdimm->sec.ops->erase ||
+ nvdimm->sec.ops->overwrite)
return a->mode;
return 0444;
}
@@ -516,8 +517,9 @@ int nvdimm_security_setup_events(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
- if (!nvdimm->sec.flags || !nvdimm->sec.ops
- || !nvdimm->sec.ops->overwrite)
+ if (!nvdimm->sec.flags ||
+ !nvdimm->sec.ops ||
+ !nvdimm->sec.ops->overwrite)
return 0;
nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
if (!nvdimm->sec.overwrite_state)
@@ -589,8 +591,8 @@ int alias_dpa_busy(struct device *dev, void *data)
* (i.e. BLK is allocated after all aliased PMEM).
*/
if (info->res) {
- if (info->res->start >= nd_mapping->start
- && info->res->start < map_end)
+ if (info->res->start >= nd_mapping->start &&
+ info->res->start < map_end)
/* pass */;
else
return 0;
@@ -604,9 +606,8 @@ int alias_dpa_busy(struct device *dev, void *data)
for_each_dpa_resource(ndd, res) {
if (strncmp(res->name, "pmem", 4) != 0)
continue;
- if ((res->start >= blk_start && res->start < map_end)
- || (res->end >= blk_start
- && res->end <= map_end)) {
+ if ((res->start >= blk_start && res->start < map_end) ||
+ (res->end >= blk_start && res->end <= map_end)) {
new = max(blk_start, min(map_end + 1, res->end + 1));
if (new != blk_start) {
blk_start = new;
@@ -187,8 +187,8 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
}
size = __le64_to_cpu(nsindex[i]->mysize);
- if (size > sizeof_namespace_index(ndd)
- || size < sizeof(struct nd_namespace_index)) {
+ if (size > sizeof_namespace_index(ndd) ||
+ size < sizeof(struct nd_namespace_index)) {
dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
continue;
}
@@ -839,8 +839,8 @@ static int __pmem_label_update(struct nd_region *nd_region,
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
if (!label_ent->label)
continue;
- if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
- || memcmp(nspm->uuid, label_ent->label->uuid,
+ if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags) ||
+ memcmp(nspm->uuid, label_ent->label->uuid,
NSLABEL_UUID_LEN) == 0)
reap_victim(nd_mapping, label_ent);
}
@@ -598,8 +598,8 @@ static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
return;
/* allocation needs to be contiguous with the existing namespace */
- if (valid->start == exist->end + 1
- || valid->end == exist->start - 1)
+ if (valid->start == exist->end + 1 ||
+ valid->end == exist->start - 1)
return;
invalid:
@@ -777,9 +777,10 @@ static int merge_dpa(struct nd_region *nd_region,
struct resource *next = res->sibling;
resource_size_t end = res->start + resource_size(res);
- if (!next || strcmp(res->name, label_id->id) != 0
- || strcmp(next->name, label_id->id) != 0
- || end != next->start)
+ if (!next ||
+ strcmp(res->name, label_id->id) != 0 ||
+ strcmp(next->name, label_id->id) != 0 ||
+ end != next->start)
continue;
end += resource_size(next);
nvdimm_free_dpa(ndd, next);
@@ -1459,8 +1460,8 @@ static int btt_claim_class(struct device *dev)
loop_bitmask |= 1;
else {
/* check whether existing labels are v1.1 or v1.2 */
- if (__le16_to_cpu(nsindex->major) == 1
- && __le16_to_cpu(nsindex->minor) == 1)
+ if (__le16_to_cpu(nsindex->major) == 1 &&
+ __le16_to_cpu(nsindex->minor) == 1)
loop_bitmask |= 2;
else
loop_bitmask |= 4;
@@ -1658,11 +1659,12 @@ static umode_t namespace_visible(struct kobject *kobj,
return a->mode;
}
- if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
- || a == &dev_attr_holder.attr
- || a == &dev_attr_holder_class.attr
- || a == &dev_attr_force_raw.attr
- || a == &dev_attr_mode.attr)
+ if (a == &dev_attr_nstype.attr ||
+ a == &dev_attr_size.attr ||
+ a == &dev_attr_holder.attr ||
+ a == &dev_attr_holder_class.attr ||
+ a == &dev_attr_force_raw.attr ||
+ a == &dev_attr_mode.attr)
return a->mode;
return 0;
@@ -1818,9 +1820,9 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
continue;
- if (namespace_label_has(ndd, type_guid)
- && !guid_equal(&nd_set->type_guid,
- &nd_label->type_guid)) {
+ if (namespace_label_has(ndd, type_guid) &&
+ !guid_equal(&nd_set->type_guid,
+ &nd_label->type_guid)) {
dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
&nd_set->type_guid,
&nd_label->type_guid);
@@ -1882,8 +1884,8 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
hw_end = hw_start + nd_mapping->size;
pmem_start = __le64_to_cpu(nd_label->dpa);
pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
- if (pmem_start >= hw_start && pmem_start < hw_end
- && pmem_end <= hw_end && pmem_end > hw_start)
+ if (pmem_start >= hw_start && pmem_start < hw_end &&
+ pmem_end <= hw_end && pmem_end > hw_start)
/* pass */;
else {
dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
@@ -2049,8 +2051,8 @@ struct resource *nsblk_add_resource(struct nd_region *nd_region,
return NULL;
nsblk->res = (struct resource **) res;
for_each_dpa_resource(ndd, res)
- if (strcmp(res->name, label_id.id) == 0
- && res->start == start) {
+ if (strcmp(res->name, label_id.id) == 0 &&
+ res->start == start) {
nsblk->res[nsblk->num_resources++] = res;
return res;
}
@@ -74,14 +74,14 @@ static ssize_t mode_store(struct device *dev,
else {
size_t n = len - 1;
- if (strncmp(buf, "pmem\n", n) == 0
- || strncmp(buf, "pmem", n) == 0) {
+ if (strncmp(buf, "pmem\n", n) == 0 ||
+ strncmp(buf, "pmem", n) == 0) {
nd_pfn->mode = PFN_MODE_PMEM;
- } else if (strncmp(buf, "ram\n", n) == 0
- || strncmp(buf, "ram", n) == 0)
+ } else if (strncmp(buf, "ram\n", n) == 0 ||
+ strncmp(buf, "ram", n) == 0)
nd_pfn->mode = PFN_MODE_RAM;
- else if (strncmp(buf, "none\n", n) == 0
- || strncmp(buf, "none", n) == 0)
+ else if (strncmp(buf, "none\n", n) == 0 ||
+ strncmp(buf, "none", n) == 0)
nd_pfn->mode = PFN_MODE_NONE;
else
rc = -EINVAL;
@@ -529,8 +529,9 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
return -EBUSY;
}
- if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align))
- || !IS_ALIGNED(offset, PAGE_SIZE)) {
+ if ((align &&
+ !IS_ALIGNED(nsio->res.start + offset + start_pad, align)) ||
+ !IS_ALIGNED(offset, PAGE_SIZE)) {
dev_err(&nd_pfn->dev,
"bad offset: %#llx dax disabled align: %#lx\n",
offset, align);
@@ -506,8 +506,9 @@ static int nd_pmem_probe(struct device *dev)
return pmem_attach_disk(dev, ndns);
/* if we find a valid info-block we'll come back as that personality */
- if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
- || nd_dax_probe(dev, ndns) == 0)
+ if (nd_btt_probe(dev, ndns) == 0 ||
+ nd_pfn_probe(dev, ndns) == 0 ||
+ nd_dax_probe(dev, ndns) == 0)
return -ENXIO;
/* ...otherwise we're just a raw pmem device */
@@ -16,9 +16,9 @@ static int nd_region_probe(struct device *dev)
struct nd_region_data *ndrd;
struct nd_region *nd_region = to_nd_region(dev);
- if (nd_region->num_lanes > num_online_cpus()
- && nd_region->num_lanes < num_possible_cpus()
- && !test_and_set_bit(0, &once)) {
+ if (nd_region->num_lanes > num_online_cpus() &&
+ nd_region->num_lanes < num_possible_cpus() &&
+ !test_and_set_bit(0, &once)) {
dev_dbg(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
num_online_cpus(), nd_region->num_lanes,
num_possible_cpus());
@@ -660,13 +660,13 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
return a->mode;
}
- if (a != &dev_attr_set_cookie.attr
- && a != &dev_attr_available_size.attr)
+ if (a != &dev_attr_set_cookie.attr &&
+ a != &dev_attr_available_size.attr)
return a->mode;
- if ((type == ND_DEVICE_NAMESPACE_PMEM
- || type == ND_DEVICE_NAMESPACE_BLK)
- && a == &dev_attr_available_size.attr)
+ if ((type == ND_DEVICE_NAMESPACE_PMEM ||
+ type == ND_DEVICE_NAMESPACE_BLK) &&
+ a == &dev_attr_available_size.attr)
return a->mode;
else if (is_memory(dev) && nd_set)
return a->mode;
@@ -688,8 +688,9 @@ u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
if (!nd_set)
return 0;
- if (nsindex && __le16_to_cpu(nsindex->major) == 1
- && __le16_to_cpu(nsindex->minor) == 1)
+ if (nsindex &&
+ __le16_to_cpu(nsindex->major) == 1 &&
+ __le16_to_cpu(nsindex->minor) == 1)
return nd_set->cookie1;
return nd_set->cookie2;
}
@@ -1002,8 +1003,8 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
if (test_bit(NDD_UNARMED, &nvdimm->flags))
ro = 1;
- if (test_bit(NDD_NOBLK, &nvdimm->flags)
- && dev_type == &nd_blk_device_type) {
+ if (test_bit(NDD_NOBLK, &nvdimm->flags) &&
+ dev_type == &nd_blk_device_type) {
dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n",
caller, dev_name(&nvdimm->dev), i);
return NULL;
@@ -1186,8 +1187,8 @@ int nvdimm_has_flush(struct nd_region *nd_region)
int i;
/* no nvdimm or pmem api == flushing capability unknown */
- if (nd_region->ndr_mappings == 0
- || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
+ if (nd_region->ndr_mappings == 0 ||
+ !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
return -ENXIO;
for (i = 0; i < nd_region->ndr_mappings; i++) {
@@ -173,8 +173,9 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
- if (!nvdimm->sec.ops || !nvdimm->sec.ops->unlock
- || !nvdimm->sec.flags)
+ if (!nvdimm->sec.ops ||
+ !nvdimm->sec.ops->unlock ||
+ !nvdimm->sec.flags)
return -EIO;
if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
@@ -246,8 +247,9 @@ static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
- if (!nvdimm->sec.ops || !nvdimm->sec.ops->disable
- || !nvdimm->sec.flags)
+ if (!nvdimm->sec.ops ||
+ !nvdimm->sec.ops->disable ||
+ !nvdimm->sec.flags)
return -EOPNOTSUPP;
rc = check_security_state(nvdimm);
@@ -281,8 +283,9 @@ static int security_update(struct nvdimm *nvdimm, unsigned int keyid,
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
- if (!nvdimm->sec.ops || !nvdimm->sec.ops->change_key
- || !nvdimm->sec.flags)
+ if (!nvdimm->sec.ops ||
+ !nvdimm->sec.ops->change_key ||
+ !nvdimm->sec.flags)
return -EOPNOTSUPP;
rc = check_security_state(nvdimm);
@@ -330,16 +333,17 @@ static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
- if (!nvdimm->sec.ops || !nvdimm->sec.ops->erase
- || !nvdimm->sec.flags)
+ if (!nvdimm->sec.ops ||
+ !nvdimm->sec.ops->erase ||
+ !nvdimm->sec.flags)
return -EOPNOTSUPP;
rc = check_security_state(nvdimm);
if (rc)
return rc;
- if (!test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.ext_flags)
- && pass_type == NVDIMM_MASTER) {
+ if (!test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.ext_flags) &&
+ pass_type == NVDIMM_MASTER) {
dev_dbg(dev,
"Attempt to secure erase in wrong master state.\n");
return -EOPNOTSUPP;
@@ -371,8 +375,9 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
- if (!nvdimm->sec.ops || !nvdimm->sec.ops->overwrite
- || !nvdimm->sec.flags)
+ if (!nvdimm->sec.ops ||
+ !nvdimm->sec.ops->overwrite ||
+ !nvdimm->sec.flags)
return -EOPNOTSUPP;
if (dev->driver == NULL) {
@@ -427,8 +432,9 @@ void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
tmo = nvdimm->sec.overwrite_tmo;
- if (!nvdimm->sec.ops || !nvdimm->sec.ops->query_overwrite
- || !nvdimm->sec.flags)
+ if (!nvdimm->sec.ops ||
+ !nvdimm->sec.ops->query_overwrite ||
+ !nvdimm->sec.flags)
return;
rc = nvdimm->sec.ops->query_overwrite(nvdimm);
Make the logical continuation style more like the rest of the kernel. No change in object files. Signed-off-by: Joe Perches <joe@perches.com> --- drivers/nvdimm/btt.c | 9 +++++---- drivers/nvdimm/bus.c | 4 ++-- drivers/nvdimm/claim.c | 4 ++-- drivers/nvdimm/dimm_devs.c | 23 ++++++++++++----------- drivers/nvdimm/label.c | 8 ++++---- drivers/nvdimm/namespace_devs.c | 40 +++++++++++++++++++++------------------- drivers/nvdimm/pfn_devs.c | 17 +++++++++-------- drivers/nvdimm/pmem.c | 5 +++-- drivers/nvdimm/region.c | 6 +++--- drivers/nvdimm/region_devs.c | 23 ++++++++++++----------- drivers/nvdimm/security.c | 34 ++++++++++++++++++++-------------- 11 files changed, 93 insertions(+), 80 deletions(-)