Split struct dm_dev in two and publish the part that other targets need in
include/linux/device-mapper.h.
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
unsigned int count = 0;
struct list_head *tmp;
size_t len, needed;
unsigned int count = 0;
struct list_head *tmp;
size_t len, needed;
+ struct dm_dev_internal *dd;
struct dm_target_deps *deps;
deps = get_result_buffer(param, param_size, &len);
struct dm_target_deps *deps;
deps = get_result_buffer(param, param_size, &len);
deps->count = count;
count = 0;
list_for_each_entry (dd, dm_table_get_devices(table), list)
deps->count = count;
count = 0;
list_for_each_entry (dd, dm_table_get_devices(table), list)
- deps->dev[count++] = huge_encode_dev(dd->bdev->bd_dev);
+ deps->dev[count++] = huge_encode_dev(dd->dm_dev.bdev->bd_dev);
param->data_size = param->data_start + needed;
}
param->data_size = param->data_start + needed;
}
struct list_head *tmp, *next;
list_for_each_safe(tmp, next, devices) {
struct list_head *tmp, *next;
list_for_each_safe(tmp, next, devices) {
- struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
+ struct dm_dev_internal *dd =
+ list_entry(tmp, struct dm_dev_internal, list);
/*
* See if we've already got a device in the list.
*/
/*
* See if we've already got a device in the list.
*/
-static struct dm_dev *find_device(struct list_head *l, dev_t dev)
+static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
+ struct dm_dev_internal *dd;
list_for_each_entry (dd, l, list)
list_for_each_entry (dd, l, list)
- if (dd->bdev->bd_dev == dev)
+ if (dd->dm_dev.bdev->bd_dev == dev)
/*
* Open a device so we can use it as a map destination.
*/
/*
* Open a device so we can use it as a map destination.
*/
-static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md)
+static int open_dev(struct dm_dev_internal *d, dev_t dev,
+ struct mapped_device *md)
{
static char *_claim_ptr = "I belong to device-mapper";
struct block_device *bdev;
int r;
{
static char *_claim_ptr = "I belong to device-mapper";
struct block_device *bdev;
int r;
+ BUG_ON(d->dm_dev.bdev);
- bdev = open_by_devnum(dev, d->mode);
+ bdev = open_by_devnum(dev, d->dm_dev.mode);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
if (r)
blkdev_put(bdev);
else
if (IS_ERR(bdev))
return PTR_ERR(bdev);
r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
if (r)
blkdev_put(bdev);
else
return r;
}
/*
* Close a device that we've been using.
*/
return r;
}
/*
* Close a device that we've been using.
*/
-static void close_dev(struct dm_dev *d, struct mapped_device *md)
+static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
- bd_release_from_disk(d->bdev, dm_disk(md));
- blkdev_put(d->bdev);
- d->bdev = NULL;
+ bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
+ blkdev_put(d->dm_dev.bdev);
+ d->dm_dev.bdev = NULL;
}
/*
* If possible, this checks an area of a destination device is valid.
*/
}
/*
* If possible, this checks an area of a destination device is valid.
*/
-static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
+static int check_device_area(struct dm_dev_internal *dd, sector_t start,
+ sector_t len)
- sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
+ sector_t dev_size = dd->dm_dev.bdev->bd_inode->i_size >> SECTOR_SHIFT;
* careful to leave things as they were if we fail to reopen the
* device.
*/
* careful to leave things as they were if we fail to reopen the
* device.
*/
-static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md)
+static int upgrade_mode(struct dm_dev_internal *dd, int new_mode,
+ struct mapped_device *md)
- struct dm_dev dd_copy;
- dev_t dev = dd->bdev->bd_dev;
+ struct dm_dev_internal dd_copy;
+ dev_t dev = dd->dm_dev.bdev->bd_dev;
- dd->mode |= new_mode;
- dd->bdev = NULL;
+ dd->dm_dev.mode |= new_mode;
+ dd->dm_dev.bdev = NULL;
r = open_dev(dd, dev, md);
if (!r)
close_dev(&dd_copy, md);
r = open_dev(dd, dev, md);
if (!r)
close_dev(&dd_copy, md);
{
int r;
dev_t uninitialized_var(dev);
{
int r;
dev_t uninitialized_var(dev);
+ struct dm_dev_internal *dd;
unsigned int major, minor;
BUG_ON(!t);
unsigned int major, minor;
BUG_ON(!t);
- dd->mode = mode;
- dd->bdev = NULL;
+ dd->dm_dev.mode = mode;
+ dd->dm_dev.bdev = NULL;
if ((r = open_dev(dd, dev, t->md))) {
kfree(dd);
return r;
}
if ((r = open_dev(dd, dev, t->md))) {
kfree(dd);
return r;
}
- format_dev_t(dd->name, dev);
+ format_dev_t(dd->dm_dev.name, dev);
atomic_set(&dd->count, 0);
list_add(&dd->list, &t->devices);
atomic_set(&dd->count, 0);
list_add(&dd->list, &t->devices);
- } else if (dd->mode != (mode | dd->mode)) {
+ } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
r = upgrade_mode(dd, mode, t->md);
if (r)
return r;
r = upgrade_mode(dd, mode, t->md);
if (r)
return r;
if (!check_device_area(dd, start, len)) {
DMWARN("device %s too small for target", path);
if (!check_device_area(dd, start, len)) {
DMWARN("device %s too small for target", path);
+ dm_put_device(ti, &dd->dm_dev);
/*
* Decrement a devices use count and remove it if necessary.
*/
/*
* Decrement a devices use count and remove it if necessary.
*/
-void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
+void dm_put_device(struct dm_target *ti, struct dm_dev *d)
+ struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
+ dm_dev);
+
if (atomic_dec_and_test(&dd->count)) {
close_dev(dd, ti->table->md);
list_del(&dd->list);
if (atomic_dec_and_test(&dd->count)) {
close_dev(dd, ti->table->md);
list_del(&dd->list);
int dm_table_any_congested(struct dm_table *t, int bdi_bits)
{
int dm_table_any_congested(struct dm_table *t, int bdi_bits)
{
+ struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
int r = 0;
list_for_each_entry(dd, devices, list) {
struct list_head *devices = dm_table_get_devices(t);
int r = 0;
list_for_each_entry(dd, devices, list) {
- struct request_queue *q = bdev_get_queue(dd->bdev);
+ struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
r |= bdi_congested(&q->backing_dev_info, bdi_bits);
}
r |= bdi_congested(&q->backing_dev_info, bdi_bits);
}
void dm_table_unplug_all(struct dm_table *t)
{
void dm_table_unplug_all(struct dm_table *t)
{
+ struct dm_dev_internal *dd;
struct list_head *devices = dm_table_get_devices(t);
list_for_each_entry(dd, devices, list) {
struct list_head *devices = dm_table_get_devices(t);
list_for_each_entry(dd, devices, list) {
- struct request_queue *q = bdev_get_queue(dd->bdev);
+ struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
/*
* List of devices that a metadevice uses and should open/close.
*/
/*
* List of devices that a metadevice uses and should open/close.
*/
+struct dm_dev_internal {
- int mode;
- struct block_device *bdev;
- char name[16];
struct dm_target;
struct dm_table;
struct dm_target;
struct dm_table;
struct mapped_device;
struct bio_vec;
struct mapped_device;
struct bio_vec;
*/
void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev);
*/
void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev);
+struct dm_dev {
+ struct block_device *bdev;
+ int mode;
+ char name[16];
+};
+
/*
* Constructors should call these functions to ensure destination devices
* are opened/closed correctly.
/*
* Constructors should call these functions to ensure destination devices
* are opened/closed correctly.