}
nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ guard(nvdimm_bus)(&nvdimm_bus->dev);
badblocks_populate(&nvdimm_bus->badrange, bb, range);
- nvdimm_bus_unlock(&nvdimm_bus->dev);
}
EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
rc = nd_size_select_store(dev, buf, &nd_btt->lbasize,
btt_lbasize_supported);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc ? rc : len;
}
struct device_attribute *attr, char *buf)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
- rc = sprintf(buf, "%s\n", nd_btt->ndns
+ guard(nvdimm_bus)(dev);
+ return sprintf(buf, "%s\n", nd_btt->ndns
? dev_name(&nd_btt->ndns->dev) : "");
- nvdimm_bus_unlock(dev);
- return rc;
}
static ssize_t namespace_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc;
}
return -ENODEV;
}
- nvdimm_bus_lock(&ndns->dev);
- btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns);
- nvdimm_bus_unlock(&ndns->dev);
+ scoped_guard(nvdimm_bus, &ndns->dev)
+ btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns);
if (!btt_dev)
return -ENOMEM;
btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL);
static void nvdimm_bus_probe_start(struct nvdimm_bus *nvdimm_bus)
{
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ guard(nvdimm_bus)(&nvdimm_bus->dev);
nvdimm_bus->probe_active++;
- nvdimm_bus_unlock(&nvdimm_bus->dev);
}
static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
{
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ guard(nvdimm_bus)(&nvdimm_bus->dev);
if (--nvdimm_bus->probe_active == 0)
wake_up(&nvdimm_bus->wait);
- nvdimm_bus_unlock(&nvdimm_bus->dev);
}
static int nvdimm_bus_probe(struct device *dev)
if (!ndns)
return;
- get_device(&ndns->dev);
- nvdimm_bus_lock(&ndns->dev);
+
+ struct device *ndev __free(put_device) = get_device(&ndns->dev);
+ guard(nvdimm_bus)(ndev);
__nd_detach_ndns(dev, _ndns);
- nvdimm_bus_unlock(&ndns->dev);
- put_device(&ndns->dev);
}
bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
struct nvdimm_map *nvdimm_map = data;
struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus;
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ guard(nvdimm_bus)(&nvdimm_bus->dev);
kref_put(&nvdimm_map->kref, nvdimm_map_release);
- nvdimm_bus_unlock(&nvdimm_bus->dev);
}
/**
{
struct nvdimm_map *nvdimm_map;
- nvdimm_bus_lock(dev);
- nvdimm_map = find_nvdimm_map(dev, offset);
- if (!nvdimm_map)
- nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
- else
- kref_get(&nvdimm_map->kref);
- nvdimm_bus_unlock(dev);
+ scoped_guard(nvdimm_bus, dev) {
+ nvdimm_map = find_nvdimm_map(dev, offset);
+ if (!nvdimm_map)
+ nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
+ else
+ kref_get(&nvdimm_map->kref);
+ }
if (!nvdimm_map)
return NULL;
return -ENODEV;
}
- nvdimm_bus_lock(&ndns->dev);
- nd_dax = nd_dax_alloc(nd_region);
- dax_dev = nd_dax_devinit(nd_dax, ndns);
- nvdimm_bus_unlock(&ndns->dev);
- if (!dax_dev)
- return -ENOMEM;
+ scoped_guard(nvdimm_bus, &ndns->dev) {
+ nd_dax = nd_dax_alloc(nd_region);
+ dax_dev = nd_dax_devinit(nd_dax, ndns);
+ if (!dax_dev)
+ return -ENOMEM;
+ }
pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
nd_pfn = &nd_dax->nd_pfn;
nd_pfn->pfn_sb = pfn_sb;
{
struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
- nvdimm_bus_lock(dev);
- dev_set_drvdata(dev, NULL);
- nvdimm_bus_unlock(dev);
+ scoped_guard(nvdimm_bus, dev)
+ dev_set_drvdata(dev, NULL);
put_ndd(ndd);
}
struct resource *res, *_r;
dev_dbg(dev, "trace\n");
- nvdimm_bus_lock(dev);
- for_each_dpa_resource_safe(ndd, res, _r)
- nvdimm_free_dpa(ndd, res);
- nvdimm_bus_unlock(dev);
+ scoped_guard(nvdimm_bus, dev) {
+ for_each_dpa_resource_safe(ndd, res, _r)
+ nvdimm_free_dpa(ndd, res);
+ }
kvfree(ndd->data);
kfree(ndd);
static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
{
struct device *dev;
- ssize_t rc;
u32 nfree;
if (!ndd)
return -ENXIO;
dev = ndd->dev;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
nfree = nd_label_nfree(ndd);
if (nfree - 1 > nfree) {
dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
nfree = 0;
} else
nfree--;
- rc = sprintf(buf, "%d\n", nfree);
- nvdimm_bus_unlock(dev);
- return rc;
+ return sprintf(buf, "%d\n", nfree);
}
static ssize_t available_slots_show(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- ssize_t rc;
-
/*
* Require all userspace triggered security management to be
* done while probing is idle and the DIMM is not in active use
* in any region.
*/
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
- rc = nvdimm_security_store(dev, buf, len);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
-
- return rc;
+ return nvdimm_security_store(dev, buf, len);
}
static DEVICE_ATTR_RW(security);
if (!nvdimm->fw_ops)
return -EOPNOTSUPP;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
result = nvdimm->fw_ops->activate_result(nvdimm);
- nvdimm_bus_unlock(dev);
switch (result) {
case NVDIMM_FWA_RESULT_NONE:
if (!nvdimm->fw_ops)
return -EOPNOTSUPP;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
state = nvdimm->fw_ops->activate_state(nvdimm);
- nvdimm_bus_unlock(dev);
switch (state) {
case NVDIMM_FWA_IDLE:
else
return -EINVAL;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
rc = nvdimm->fw_ops->arm(nvdimm, arg);
- nvdimm_bus_unlock(dev);
if (rc < 0)
return rc;
if (!nvdimm->fw_ops)
return 0;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
cap = nd_desc->fw_ops->capability(nd_desc);
- nvdimm_bus_unlock(dev);
if (cap < NVDIMM_FWA_CAP_QUIESCE)
return 0;
bool dev_put = false;
/* We are shutting down. Make state frozen artificially. */
- nvdimm_bus_lock(dev);
- set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
- if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags))
- dev_put = true;
- nvdimm_bus_unlock(dev);
+ scoped_guard(nvdimm_bus, dev) {
+ set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
+ dev_put = test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
+ }
cancel_delayed_work_sync(&nvdimm->dwork);
if (dev_put)
put_device(dev);
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __alt_name_store(dev, buf, len);
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc < 0 ? rc : len;
}
if (rc)
return rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __size_store(dev, val);
if (rc >= 0)
dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
-
return rc < 0 ? rc : len;
}
resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
{
- resource_size_t size;
-
- nvdimm_bus_lock(&ndns->dev);
- size = __nvdimm_namespace_capacity(ndns);
- nvdimm_bus_unlock(&ndns->dev);
-
- return size;
+ guard(nvdimm_bus)(&ndns->dev);
+ return __nvdimm_namespace_capacity(ndns);
}
EXPORT_SYMBOL(nvdimm_namespace_capacity);
} else
return -ENXIO;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
if (to_ndns(dev)->claim)
rc = -EBUSY;
kfree(uuid);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc < 0 ? rc : len;
}
} else
return -ENXIO;
- device_lock(dev);
- nvdimm_bus_lock(dev);
- if (to_ndns(dev)->claim)
- rc = -EBUSY;
- if (rc >= 0)
- rc = nd_size_select_store(dev, buf, lbasize, supported);
- if (rc >= 0)
- rc = nd_namespace_label_update(nd_region, dev);
- dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
+ if (to_ndns(dev)->claim) {
+ dev_dbg(dev, "namespace %s already claimed\n", dev_name(dev));
+ return -EBUSY;
+ }
+
+ rc = nd_size_select_store(dev, buf, lbasize, supported);
+ if (rc < 0) {
+ dev_dbg(dev, "size select fail: %zd tried: %s%s", rc,
buf, buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ return rc;
+ }
+
+ rc = nd_namespace_label_update(nd_region, dev);
+ if (rc < 0) {
+ dev_dbg(dev, "label update fail: %zd tried: %s%s",
+ rc, buf, buf[len - 1] == '\n' ? "" : "\n");
+ return rc;
+ }
+
+ dev_dbg(dev, "wrote: %s%s", buf, buf[len - 1] == '\n' ? "" : "\n");
- return rc ? rc : len;
+ return len;
}
static DEVICE_ATTR_RW(sector_size);
int count = 0, i;
u32 flags = 0;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
}
if (!uuid)
- goto out;
+ return sprintf(buf, "%d\n", count);
nd_label_gen_id(&label_id, uuid, flags);
for (i = 0; i < nd_region->ndr_mappings; i++) {
if (strcmp(res->name, label_id.id) == 0)
count++;
}
- out:
- nvdimm_bus_unlock(dev);
return sprintf(buf, "%d\n", count);
}
struct nd_region *nd_region = to_nd_region(dev->parent);
int rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __holder_class_store(dev, buf);
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc < 0 ? rc : len;
}
nd_region);
}
-int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
+static int create_relevant_namespaces(struct nd_region *nd_region, int *type,
+ struct device ***devs)
{
- struct device **devs = NULL;
- int i, rc = 0, type;
+ int rc;
- *err = 0;
- nvdimm_bus_lock(&nd_region->dev);
+ guard(nvdimm_bus)(&nd_region->dev);
rc = init_active_labels(nd_region);
- if (rc) {
- nvdimm_bus_unlock(&nd_region->dev);
+ if (rc)
return rc;
- }
- type = nd_region_to_nstype(nd_region);
- switch (type) {
+ *type = nd_region_to_nstype(nd_region);
+ switch (*type) {
case ND_DEVICE_NAMESPACE_IO:
- devs = create_namespace_io(nd_region);
+ *devs = create_namespace_io(nd_region);
break;
case ND_DEVICE_NAMESPACE_PMEM:
- devs = create_namespaces(nd_region);
- break;
- default:
+ *devs = create_namespaces(nd_region);
break;
}
- nvdimm_bus_unlock(&nd_region->dev);
+
+ return 0;
+}
+
+int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
+{
+ struct device **devs = NULL;
+ int i, rc = 0, type;
+
+ *err = 0;
+ rc = create_relevant_namespaces(nd_region, &type, &devs);
+ if (rc)
+ return rc;
if (!devs)
return -ENODEV;
u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
void nvdimm_bus_lock(struct device *dev);
void nvdimm_bus_unlock(struct device *dev);
+DEFINE_GUARD(nvdimm_bus, struct device *,
+ if (_T) nvdimm_bus_lock(_T), if (_T) nvdimm_bus_unlock(_T));
+
bool is_nvdimm_bus_locked(struct device *dev);
void nvdimm_check_and_set_ro(struct gendisk *disk);
void nvdimm_drvdata_release(struct kref *kref);
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc = 0;
+ size_t n = len - 1;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
if (dev->driver)
- rc = -EBUSY;
- else {
- size_t n = len - 1;
-
- if (strncmp(buf, "pmem\n", n) == 0
- || strncmp(buf, "pmem", n) == 0) {
- nd_pfn->mode = PFN_MODE_PMEM;
- } else if (strncmp(buf, "ram\n", n) == 0
- || strncmp(buf, "ram", n) == 0)
- nd_pfn->mode = PFN_MODE_RAM;
- else if (strncmp(buf, "none\n", n) == 0
- || strncmp(buf, "none", n) == 0)
- nd_pfn->mode = PFN_MODE_NONE;
- else
- rc = -EINVAL;
- }
+ return -EBUSY;
+
+ if (strncmp(buf, "pmem\n", n) == 0
+ || strncmp(buf, "pmem", n) == 0) {
+ nd_pfn->mode = PFN_MODE_PMEM;
+ } else if (strncmp(buf, "ram\n", n) == 0
+ || strncmp(buf, "ram", n) == 0)
+ nd_pfn->mode = PFN_MODE_RAM;
+ else if (strncmp(buf, "none\n", n) == 0
+ || strncmp(buf, "none", n) == 0)
+ nd_pfn->mode = PFN_MODE_NONE;
+ else
+ rc = -EINVAL;
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc ? rc : len;
}
unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
ssize_t rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
rc = nd_size_select_store(dev, buf, &nd_pfn->align,
nd_pfn_supported_alignments(aligns));
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc ? rc : len;
}
struct device_attribute *attr, char *buf)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
- rc = sprintf(buf, "%s\n", nd_pfn->ndns
+ guard(nvdimm_bus)(dev);
+ return sprintf(buf, "%s\n", nd_pfn->ndns
? dev_name(&nd_pfn->ndns->dev) : "");
- nvdimm_bus_unlock(dev);
- return rc;
}
static ssize_t namespace_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc;
}
return -ENODEV;
}
- nvdimm_bus_lock(&ndns->dev);
- nd_pfn = nd_pfn_alloc(nd_region);
- pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
- nvdimm_bus_unlock(&ndns->dev);
+ scoped_guard(nvdimm_bus, &ndns->dev) {
+ nd_pfn = nd_pfn_alloc(nd_region);
+ pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
+ }
if (!pfn_dev)
return -ENOMEM;
pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
device_for_each_child(dev, NULL, child_unregister);
/* flush attribute readers and disable */
- nvdimm_bus_lock(dev);
- nd_region->ns_seed = NULL;
- nd_region->btt_seed = NULL;
- nd_region->pfn_seed = NULL;
- nd_region->dax_seed = NULL;
- dev_set_drvdata(dev, NULL);
- nvdimm_bus_unlock(dev);
+ scoped_guard(nvdimm_bus, dev) {
+ nd_region->ns_seed = NULL;
+ nd_region->btt_seed = NULL;
+ nd_region->pfn_seed = NULL;
+ nd_region->dax_seed = NULL;
+ dev_set_drvdata(dev, NULL);
+ }
/*
* Note, this assumes device_lock() context to not race
return 0;
}
-int nd_region_activate(struct nd_region *nd_region)
+static int get_flush_data(struct nd_region *nd_region, size_t *size, int *num_flush)
{
- int i, j, rc, num_flush = 0;
- struct nd_region_data *ndrd;
- struct device *dev = &nd_region->dev;
size_t flush_data_size = sizeof(void *);
+ int _num_flush = 0;
+ int i;
- nvdimm_bus_lock(&nd_region->dev);
+ guard(nvdimm_bus)(&nd_region->dev);
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
- if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
- nvdimm_bus_unlock(&nd_region->dev);
+ if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags))
return -EBUSY;
- }
/* at least one null hint slot per-dimm for the "no-hint" case */
flush_data_size += sizeof(void *);
- num_flush = min_not_zero(num_flush, nvdimm->num_flush);
+ _num_flush = min_not_zero(_num_flush, nvdimm->num_flush);
if (!nvdimm->num_flush)
continue;
flush_data_size += nvdimm->num_flush * sizeof(void *);
}
- nvdimm_bus_unlock(&nd_region->dev);
+
+ *size = flush_data_size;
+ *num_flush = _num_flush;
+
+ return 0;
+}
+
+int nd_region_activate(struct nd_region *nd_region)
+{
+ int i, j, rc, num_flush;
+ struct nd_region_data *ndrd;
+ struct device *dev = &nd_region->dev;
+ size_t flush_data_size;
+
+ rc = get_flush_data(nd_region, &flush_data_size, &num_flush);
+ if (rc)
+ return rc;
rc = nd_region_invalidate_memregion(nd_region);
if (rc)
* the v1.1 namespace label cookie definition. To read all this
* data we need to wait for probing to settle.
*/
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
if (nd_region->ndr_mappings) {
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
nsindex));
}
}
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
if (rc)
return rc;
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- unsigned long long available = 0;
/*
* Flush in-flight updates and grab a snapshot of the available
* memory nvdimm_bus_lock() is dropped, but that's userspace's
* problem to not race itself.
*/
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
- available = nd_region_available_dpa(nd_region);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
- return sprintf(buf, "%llu\n", available);
+ return sprintf(buf, "%llu\n", nd_region_available_dpa(nd_region));
}
static DEVICE_ATTR_RO(available_size);
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- unsigned long long available = 0;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
- available = nd_region_allocatable_dpa(nd_region);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
- return sprintf(buf, "%llu\n", available);
+ return sprintf(buf, "%llu\n", nd_region_allocatable_dpa(nd_region));
}
static DEVICE_ATTR_RO(max_available_extent);
struct device_attribute *attr, char *buf)
{
struct nd_region_data *ndrd = dev_get_drvdata(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
- if (ndrd)
- rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
- else
- rc = -ENXIO;
- nvdimm_bus_unlock(dev);
+ guard(nvdimm_bus)(dev);
+ if (!ndrd)
+ return -ENXIO;
- return rc;
+ return sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
}
static DEVICE_ATTR_RO(init_namespaces);
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (nd_region->ns_seed)
- rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
- else
- rc = sprintf(buf, "\n");
- nvdimm_bus_unlock(dev);
- return rc;
+ return sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
+
+ return sprintf(buf, "\n");
}
static DEVICE_ATTR_RO(namespace_seed);
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (nd_region->btt_seed)
- rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
- else
- rc = sprintf(buf, "\n");
- nvdimm_bus_unlock(dev);
+ return sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
- return rc;
+ return sprintf(buf, "\n");
}
static DEVICE_ATTR_RO(btt_seed);
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (nd_region->pfn_seed)
- rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
- else
- rc = sprintf(buf, "\n");
- nvdimm_bus_unlock(dev);
+ return sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
- return rc;
+ return sprintf(buf, "\n");
}
static DEVICE_ATTR_RO(pfn_seed);
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (nd_region->dax_seed)
- rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
- else
- rc = sprintf(buf, "\n");
- nvdimm_bus_unlock(dev);
+ return sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
- return rc;
+ return sprintf(buf, "\n");
}
static DEVICE_ATTR_RO(dax_seed);
* times ensure it does not change for the duration of the
* allocation.
*/
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
nd_region->align = val;
- nvdimm_bus_unlock(dev);
return len;
}
*/
void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
{
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (nd_region->ns_seed == dev) {
nd_region_create_ns_seed(nd_region);
} else if (is_nd_btt(dev)) {
if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
nd_region_create_ns_seed(nd_region);
}
- nvdimm_bus_unlock(dev);
}
/**
int nvdimm_security_unlock(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
- int rc;
- nvdimm_bus_lock(dev);
- rc = __nvdimm_security_unlock(nvdimm);
- nvdimm_bus_unlock(dev);
- return rc;
+ guard(nvdimm_bus)(dev);
+ return __nvdimm_security_unlock(nvdimm);
}
static int check_security_state(struct nvdimm *nvdimm)
struct nvdimm *nvdimm =
container_of(work, typeof(*nvdimm), dwork.work);
- nvdimm_bus_lock(&nvdimm->dev);
+ guard(nvdimm_bus)(&nvdimm->dev);
__nvdimm_security_overwrite_query(nvdimm);
- nvdimm_bus_unlock(&nvdimm->dev);
}
#define OPS \