dm: cleanup open_table_device

JIRA: https://issues.redhat.com/browse/RHEL-29262
Conflicts: context difference because we have ported commit e405cd2d35
	("block: use the holder as indication for exclusive opens")

commit b9a785d2dc6567b2fd9fc60057a6a945a276927a
Author: Christoph Hellwig <hch@lst.de>
Date:   Tue Nov 15 22:10:47 2022 +0800

    dm: cleanup open_table_device

    Move all the logic for allocation the table_device and linking it into
    the list into the open_table_device.  This keeps the code tidy and
    ensures that the table_devices only exist in fully initialized state.

    Signed-off-by: Christoph Hellwig <hch@lst.de>
    Signed-off-by: Yu Kuai <yukuai3@huawei.com>
    Reviewed-by: Mike Snitzer <snitzer@kernel.org>
    Link: https://lore.kernel.org/r/20221115141054.1051801-4-yukuai1@huaweicloud.com
    Signed-off-by: Jens Axboe <axboe@kernel.dk>

Signed-off-by: Ming Lei <ming.lei@redhat.com>
This commit is contained in:
Ming Lei 2022-11-15 22:10:47 +08:00
parent 07e1f83214
commit d76d74a0e4
1 changed files with 28 additions and 30 deletions

View File

@ -720,28 +720,41 @@ static char *_dm_claim_ptr = "I belong to device-mapper";
/*
* Open a table device so we can use it as a map destination.
*/
static int open_table_device(struct table_device *td, dev_t dev,
struct mapped_device *md)
static struct table_device *open_table_device(struct mapped_device *md,
dev_t dev, fmode_t mode)
{
struct table_device *td;
struct block_device *bdev;
u64 part_off;
int r;
BUG_ON(td->dm_dev.bdev);
td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
if (!td)
return ERR_PTR(-ENOMEM);
refcount_set(&td->count, 1);
bdev = blkdev_get_by_dev(dev, td->dm_dev.mode, _dm_claim_ptr, NULL);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
r = bd_link_disk_holder(bdev, dm_disk(md));
if (r) {
blkdev_put(bdev, _dm_claim_ptr);
return r;
bdev = blkdev_get_by_dev(dev, mode, _dm_claim_ptr, NULL);
if (IS_ERR(bdev)) {
r = PTR_ERR(bdev);
goto out_free_td;
}
r = bd_link_disk_holder(bdev, dm_disk(md));
if (r)
goto out_blkdev_put;
td->dm_dev.mode = mode;
td->dm_dev.bdev = bdev;
td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL);
return 0;
format_dev_t(td->dm_dev.name, dev);
list_add(&td->list, &md->table_devices);
return td;
out_blkdev_put:
blkdev_put(bdev, _dm_claim_ptr);
out_free_td:
kfree(td);
return ERR_PTR(r);
}
/*
@ -774,31 +787,16 @@ static struct table_device *find_table_device(struct list_head *l, dev_t dev,
int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode,
struct dm_dev **result)
{
int r;
struct table_device *td;
mutex_lock(&md->table_devices_lock);
td = find_table_device(&md->table_devices, dev, mode);
if (!td) {
td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
if (!td) {
td = open_table_device(md, dev, mode);
if (IS_ERR(td)) {
mutex_unlock(&md->table_devices_lock);
return -ENOMEM;
return PTR_ERR(td);
}
td->dm_dev.mode = mode;
td->dm_dev.bdev = NULL;
if ((r = open_table_device(td, dev, md))) {
mutex_unlock(&md->table_devices_lock);
kfree(td);
return r;
}
format_dev_t(td->dm_dev.name, dev);
refcount_set(&td->count, 1);
list_add(&td->list, &md->table_devices);
} else {
refcount_inc(&td->count);
}