* original ptbl is freed using RCU callback.
*
* LOCKING:
- * Matching bd_mutex locked or the caller is the only user of @disk.
+ * disk->mutex locked or the caller is the only user of @disk.
*/
static void disk_replace_part_tbl(struct gendisk *disk,
struct disk_part_tbl *new_ptbl)
* uses RCU to allow unlocked dereferencing for stats and other stuff.
*
* LOCKING:
- * Matching bd_mutex locked or the caller is the only user of @disk.
+ * disk->mutex locked or the caller is the only user of @disk.
* Might sleep.
*
* RETURNS:
if (!disk->part0.dkstats)
goto out_bdput;
+ mutex_init(&disk->mutex);
init_rwsem(&disk->lookup_sem);
disk->node_id = node_id;
if (disk_expand_part_tbl(disk, 0)) {
* doesn't clear the events from @disk->ev.
*
* CONTEXT:
- * If @mask is non-zero must be called with bdev->bd_mutex held.
+ * If @mask is non-zero must be called with disk->mutex held.
*/
void disk_flush_events(struct gendisk *disk, unsigned int mask)
{
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->mutex);
ret = bdev_disk_changed(bdev, false);
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->mutex);
return ret;
}
}
/*
- * Must be called either with bd_mutex held, before a disk can be opened or
+ * Must be called either with disk->mutex held, before a disk can be opened or
* after all disk users are gone.
*/
void delete_partition(struct hd_struct *part)
static DEVICE_ATTR(whole_disk, 0444, whole_disk_show, NULL);
/*
- * Must be called either with bd_mutex held, before a disk can be opened or
+ * Must be called either with disk->mutex held, before a disk can be opened or
* after all disk users are gone.
*/
static struct hd_struct *add_partition(struct gendisk *disk, int partno,
{
struct hd_struct *part;
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->mutex);
if (partition_overlaps(bdev->bd_disk, start, length, -1)) {
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->mutex);
return -EBUSY;
}
part = add_partition(bdev->bd_disk, partno, start, length,
ADDPART_FLAG_NONE, NULL);
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->mutex);
return PTR_ERR_OR_ZERO(part);
}
if (!bdevp)
return -ENXIO;
- mutex_lock(&bdevp->bd_mutex);
- mutex_lock_nested(&bdev->bd_mutex, 1);
+ mutex_lock(&bdev->bd_disk->mutex);
ret = -ENXIO;
part = disk_get_part(bdev->bd_disk, partno);
delete_partition(part);
ret = 0;
out_unlock:
- mutex_unlock(&bdev->bd_mutex);
- mutex_unlock(&bdevp->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->mutex);
bdput(bdevp);
if (part)
disk_put_part(part);
if (!bdevp)
goto out_put_part;
- mutex_lock(&bdevp->bd_mutex);
- mutex_lock_nested(&bdev->bd_mutex, 1);
+ mutex_lock(&bdev->bd_disk->mutex);
ret = -EINVAL;
if (start != part->start_sect)
ret = 0;
out_unlock:
- mutex_unlock(&bdevp->bd_mutex);
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->mutex);
bdput(bdevp);
out_put_part:
disk_put_part(part);
{
int rc;
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->mutex);
rc = bdev_disk_changed(bdev, false);
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->mutex);
if (rc)
pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
__func__, lo->lo_number, lo->lo_file_name, rc);
mutex_unlock(&loop_ctl_mutex);
/*
* We must drop file reference outside of loop_ctl_mutex as dropping
- * the file ref can take bd_mutex which creates circular locking
+ * the file ref can take disk->mutex which creates circular locking
* dependency.
*/
fput(old_file);
mutex_unlock(&loop_ctl_mutex);
if (partscan) {
/*
- * bd_mutex has been held already in release path, so don't
+ * disk->mutex has been held already in release path, so don't
* acquire it if this function is called in such case.
*
* If the reread partition isn't from release path, lo_refcnt
* current holder is released.
*/
if (!release)
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->mutex);
err = bdev_disk_changed(bdev, false);
if (!release)
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->mutex);
if (err)
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
__func__, lo_number, err);
* Need not hold loop_ctl_mutex to fput backing file.
* Calling fput holding loop_ctl_mutex triggers a circular
* lock dependency possibility warning as fput can take
- * bd_mutex which is usually taken before loop_ctl_mutex.
+ * disk->mutex which is usually taken before loop_ctl_mutex.
*/
if (filp)
fput(filp);
return;
}
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&info->gd->mutex);
if (bdev->bd_openers) {
xenbus_dev_error(xbdev, -EBUSY,
xenbus_frontend_closed(xbdev);
}
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&info->gd->mutex);
bdput(bdev);
}
* isn't closed yet, we let release take care of it.
*/
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&info->gd->mutex);
info = disk->private_data;
dev_warn(disk_to_dev(disk),
mutex_unlock(&blkfront_mutex);
}
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&info->gd->mutex);
bdput(bdev);
return 0;
if (!bdev)
return -ENOMEM;
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&zram->disk->mutex);
if (bdev->bd_openers)
ret = -EBUSY;
else
zram_reset_device(zram);
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&zram->disk->mutex);
bdput(bdev);
return ret ? ret : len;
/*
* zram is claimed so open request will be failed
*/
- bool claim; /* Protected by bdev->bd_mutex */
+ bool claim; /* Protected by bdev->bd_disk->mutex */
struct file *backing_dev;
#ifdef CONFIG_ZRAM_WRITEBACK
spinlock_t wb_limit_lock;
/* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
* that we are never stopping an array while it is open.
* 'reconfig_mutex' protects all other reconfiguration.
- * These locks are separate due to conflicting interactions
- * with bdev->bd_mutex.
- * Lock ordering is:
- * reconfig_mutex -> bd_mutex
- * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
+ * These locks are separate due to historically conflicting
+ * interactions with block layer locks.
*/
struct mutex open_mutex;
struct mutex reconfig_mutex;
return -ENODEV;
}
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->mutex);
rc = bdev_disk_changed(bdev, false);
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->mutex);
if (rc)
DBF_DEV_EVENT(DBF_ERR, block->base,
"scan partitions error, rc %d", rc);
bdev = block->bdev;
block->bdev = NULL;
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->mutex);
blk_drop_partitions(bdev);
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->mutex);
/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
blkdev_put(bdev, FMODE_READ);
* In the latter case @inode and @filp carry an abridged amount
* of information as noted above.
*
- * Locking: called with bdev->bd_mutex held.
+ * Locking: called with bdev->bd_disk->mutex held.
**/
static int sd_open(struct block_device *bdev, fmode_t mode)
{
* Note: may block (uninterruptible) if error recovery is underway
* on this disk.
*
- * Locking: called with bdev->bd_mutex held.
+ * Locking: called with bdev->bd_disk->mutex held.
**/
static void sd_release(struct gendisk *disk, fmode_t mode)
{
struct block_device *bdev = &ei->bdev;
memset(bdev, 0, sizeof(*bdev));
- mutex_init(&bdev->bd_mutex);
#ifdef CONFIG_SYSFS
INIT_LIST_HEAD(&bdev->bd_holder_disks);
#endif
struct bd_holder_disk *holder;
int ret = 0;
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->mutex);
WARN_ON_ONCE(!bdev->bd_holder);
out_free:
kfree(holder);
out_unlock:
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(bd_link_disk_holder);
{
struct bd_holder_disk *holder;
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->mutex);
holder = bd_find_holder_disk(bdev, disk);
kfree(holder);
}
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->mutex);
}
EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
#endif
struct gendisk *disk = bdev->bd_disk;
int ret;
- lockdep_assert_held(&bdev->bd_mutex);
+ lockdep_assert_held(&bdev->bd_disk->mutex);
clear_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
return -ENXIO;
}
-/*
- * bd_mutex locking:
- *
- * mutex_lock(part->bd_mutex)
- * mutex_lock_nested(whole->bd_mutex, 1)
- */
-
static int __blkdev_get(struct block_device *bdev, fmode_t mode, void *holder,
int for_part)
{
if (ret)
goto out;
- if (!for_part && (mode & FMODE_EXCL)) {
- WARN_ON_ONCE(!holder);
- ret = bd_prepare_to_claim(bdev, holder);
- if (ret)
- goto out_put_disk;
+ if (!for_part) {
+ if (mode & FMODE_EXCL) {
+ WARN_ON_ONCE(!holder);
+ ret = bd_prepare_to_claim(bdev, holder);
+ if (ret)
+ goto out_put_disk;
+ }
+
+ disk_block_events(disk);
+ mutex_lock(&disk->mutex);
}
- disk_block_events(disk);
- mutex_lock_nested(&bdev->bd_mutex, for_part);
if (!bdev->bd_openers) {
first_open = true;
unblock_events = false;
}
}
- mutex_unlock(&bdev->bd_mutex);
- if (unblock_events)
- disk_unblock_events(disk);
+ if (!for_part) {
+ mutex_unlock(&disk->mutex);
+
+ if (unblock_events)
+ disk_unblock_events(disk);
+ }
+
/* only one opener holds the module reference */
if (!first_open)
if (bdev_is_partition(bdev))
__blkdev_put(bdev_whole(bdev), mode, 1);
out_unlock_bdev:
- if (!for_part && (mode & FMODE_EXCL))
- bd_abort_claiming(bdev, holder);
- mutex_unlock(&bdev->bd_mutex);
- disk_unblock_events(disk);
+ if (!for_part) {
+ if (mode & FMODE_EXCL)
+ bd_abort_claiming(bdev, holder);
+ mutex_unlock(&disk->mutex);
+ disk_unblock_events(disk);
+ }
out_put_disk:
module_put(disk->fops->owner);
if (need_restart)
if (bdev->bd_openers == 1)
sync_blockdev(bdev);
- mutex_lock_nested(&bdev->bd_mutex, for_part);
if (for_part)
bdev->bd_part_count--;
+ else
+ mutex_lock(&disk->mutex);
if (!--bdev->bd_openers) {
WARN_ON_ONCE(bdev->bd_holders);
module_put(disk->fops->owner);
}
- mutex_unlock(&bdev->bd_mutex);
+ if (!for_part)
+ mutex_unlock(&disk->mutex);
bdput(bdev);
if (victim)
__blkdev_put(victim, mode, 1);
void blkdev_put(struct block_device *bdev, fmode_t mode)
{
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->mutex);
if (mode & FMODE_EXCL) {
struct block_device *whole = bdev_whole(bdev);
/*
* Release a claim on the device. The holder fields
- * are protected with bdev_lock. bd_mutex is to
+ * are protected with bdev_lock. disk->mutex is to
* synchronize disk_holder unlinking.
*/
spin_lock(&bdev_lock);
*/
disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE);
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->mutex);
__blkdev_put(bdev, mode, 0);
}
old_inode = inode;
bdev = I_BDEV(inode);
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock(&bdev->bd_disk->mutex);
if (bdev->bd_openers)
func(bdev, arg);
- mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdev->bd_disk->mutex);
spin_lock(&blockdev_superblock->s_inode_list_lock);
}
lockdep_assert_held(&uuid_mutex);
/*
* The device_list_mutex cannot be taken here in case opening the
- * underlying device takes further locks like bd_mutex.
+ * underlying device takes further locks like disk->mutex.
*
* We also don't need the lock here as this is called during mount and
* exclusion is provided by uuid_mutex
}
/*
- * s_umount nests inside bd_mutex during
+ * s_umount nests inside disk->mutex during
* __invalidate_device(). blkdev_put() acquires
- * bd_mutex and can't be called under s_umount. Drop
+ * disk->mutex and can't be called under s_umount. Drop
* s_umount temporarily. This is safe as we're
* holding an active reference.
*/
}
/*
- * s_umount nests inside bd_mutex during
+ * s_umount nests inside disk->mutex during
* __invalidate_device(). blkdev_put() acquires
- * bd_mutex and can't be called under s_umount. Drop
+ * disk->mutex and can't be called under s_umount. Drop
* s_umount temporarily. This is safe as we're
* holding an active reference.
*/
int bd_openers;
struct inode * bd_inode; /* will die */
struct super_block * bd_super;
- struct mutex bd_mutex; /* open/close mutex */
void * bd_claiming;
void * bd_holder;
int bd_holders;
unsigned long state;
#define GD_NEED_PART_SCAN 0
struct rw_semaphore lookup_sem;
+ struct mutex mutex; /* open/close mutex */
struct kobject *slave_dir;
struct timer_rand_state *random;