Lines Matching +full:c +full:- +full:version +full:- +full:name
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
4 * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved.
9 #include "dm-core.h"
10 #include "dm-ima.h"
19 #include <linux/dm-ioctl.h>
28 #define DM_DRIVER_EMAIL "dm-devel@lists.linux.dev"
39 *---------------------------------------------------------------
41 * name or uuid.
42 *---------------------------------------------------------------
50 char *name; member
75 * Protects use of mdptr to obtain hash cell name and uuid from mapped device.
85 *---------------------------------------------------------------
86 * Code for looking up a device by name
87 *---------------------------------------------------------------
95 int c; in __get_name_cell() local
97 c = strcmp(hc->name, str); in __get_name_cell()
98 if (!c) { in __get_name_cell()
99 dm_get(hc->md); in __get_name_cell()
102 n = c >= 0 ? n->rb_left : n->rb_right; in __get_name_cell()
114 int c; in __get_uuid_cell() local
116 c = strcmp(hc->uuid, str); in __get_uuid_cell()
117 if (!c) { in __get_uuid_cell()
118 dm_get(hc->md); in __get_uuid_cell()
121 n = c >= 0 ? n->rb_left : n->rb_right; in __get_uuid_cell()
129 if (hc->name_set) { in __unlink_name()
130 hc->name_set = false; in __unlink_name()
131 rb_erase(&hc->name_node, &name_rb_tree); in __unlink_name()
137 if (hc->uuid_set) { in __unlink_uuid()
138 hc->uuid_set = false; in __unlink_uuid()
139 rb_erase(&hc->uuid_node, &uuid_rb_tree); in __unlink_uuid()
149 new_hc->name_set = true; in __link_name()
156 int c; in __link_name() local
158 c = strcmp(hc->name, new_hc->name); in __link_name()
159 BUG_ON(!c); in __link_name()
161 n = c >= 0 ? &hc->name_node.rb_left : &hc->name_node.rb_right; in __link_name()
164 rb_link_node(&new_hc->name_node, parent, n); in __link_name()
165 rb_insert_color(&new_hc->name_node, &name_rb_tree); in __link_name()
174 new_hc->uuid_set = true; in __link_uuid()
181 int c; in __link_uuid() local
183 c = strcmp(hc->uuid, new_hc->uuid); in __link_uuid()
184 BUG_ON(!c); in __link_uuid()
186 n = c > 0 ? &hc->uuid_node.rb_left : &hc->uuid_node.rb_right; in __link_uuid()
189 rb_link_node(&new_hc->uuid_node, parent, n); in __link_uuid()
190 rb_insert_color(&new_hc->uuid_node, &uuid_rb_tree); in __link_uuid()
212 *---------------------------------------------------------------
214 *---------------------------------------------------------------
216 static struct hash_cell *alloc_cell(const char *name, const char *uuid, in alloc_cell() argument
225 hc->name = kstrdup(name, GFP_KERNEL); in alloc_cell()
226 if (!hc->name) { in alloc_cell()
232 hc->uuid = NULL; in alloc_cell()
235 hc->uuid = kstrdup(uuid, GFP_KERNEL); in alloc_cell()
236 if (!hc->uuid) { in alloc_cell()
237 kfree(hc->name); in alloc_cell()
243 hc->name_set = hc->uuid_set = false; in alloc_cell()
244 hc->md = md; in alloc_cell()
245 hc->new_map = NULL; in alloc_cell()
252 kfree(hc->name); in free_cell()
253 kfree(hc->uuid); in free_cell()
262 static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md) in dm_hash_insert() argument
269 cell = alloc_cell(name, uuid, md); in dm_hash_insert()
271 return -ENOMEM; in dm_hash_insert()
277 hc = __get_name_cell(name); in dm_hash_insert()
279 dm_put(hc->md); in dm_hash_insert()
289 dm_put(hc->md); in dm_hash_insert()
305 return -EBUSY; in dm_hash_insert()
319 dm_set_mdptr(hc->md, NULL); in __hash_remove()
322 table = dm_get_live_table(hc->md, &srcu_idx); in __hash_remove()
325 dm_put_live_table(hc->md, srcu_idx); in __hash_remove()
328 if (hc->new_map) in __hash_remove()
329 table = hc->new_map; in __hash_remove()
330 dm_put(hc->md); in __hash_remove()
351 md = hc->md; in dm_hash_remove_all()
397 hc->uuid = new_uuid; in __set_cell_uuid()
404 * Changes the name of a hash_cell and returns the old name for
412 * Rename and move the name cell. in __change_cell_name()
415 old_name = hc->name; in __change_cell_name()
418 hc->name = new_name; in __change_cell_name()
433 unsigned int change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; in dm_hash_rename()
441 return ERR_PTR(-ENOMEM); in dm_hash_rename()
455 change_uuid ? "uuid" : "name", in dm_hash_rename()
456 param->name, new); in dm_hash_rename()
457 dm_put(hc->md); in dm_hash_rename()
460 return ERR_PTR(-EBUSY); in dm_hash_rename()
466 hc = __get_name_cell(param->name); in dm_hash_rename()
468 DMERR("Unable to rename non-existent device, %s to %s%s", in dm_hash_rename()
469 param->name, change_uuid ? "uuid " : "", new); in dm_hash_rename()
472 return ERR_PTR(-ENXIO); in dm_hash_rename()
478 if (change_uuid && hc->uuid) { in dm_hash_rename()
481 param->name, new, hc->uuid); in dm_hash_rename()
482 dm_put(hc->md); in dm_hash_rename()
485 return ERR_PTR(-EINVAL); in dm_hash_rename()
496 table = dm_get_live_table(hc->md, &srcu_idx); in dm_hash_rename()
499 dm_put_live_table(hc->md, srcu_idx); in dm_hash_rename()
501 if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr, false)) in dm_hash_rename()
502 param->flags |= DM_UEVENT_GENERATED_FLAG; in dm_hash_rename()
504 md = hc->md; in dm_hash_rename()
520 *---------------------------------------------------------------
522 *---------------------------------------------------------------
532 dm_hash_remove_all(true, !!(param->flags & DM_DEFERRED_REMOVE), false); in remove_all()
533 param->data_size = 0; in remove_all()
538 * Round up the ptr to an 8-byte boundary.
557 param->data_start = align_ptr(param + 1) - (void *) param; in get_result_buffer()
559 if (param->data_start < param_size) in get_result_buffer()
560 *len = param_size - param->data_start; in get_result_buffer()
564 return ((void *) param) + param->data_start; in get_result_buffer()
572 val = hc->name; in filter_device()
580 val = hc->uuid ? hc->uuid : ""; in filter_device()
608 if (!filter_device(hc, param->name, param->uuid)) in list_devices()
610 needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1); in list_devices()
612 if (param->flags & DM_UUID_FLAG && hc->uuid) in list_devices()
613 needed += align_val(strlen(hc->uuid) + 1); in list_devices()
620 if (len < needed || len < sizeof(nl->dev)) { in list_devices()
621 param->flags |= DM_BUFFER_FULL_FLAG; in list_devices()
624 param->data_size = param->data_start + needed; in list_devices()
626 nl->dev = 0; /* Flags no data */ in list_devices()
635 if (!filter_device(hc, param->name, param->uuid)) in list_devices()
638 old_nl->next = (uint32_t) ((void *) nl - in list_devices()
640 disk = dm_disk(hc->md); in list_devices()
641 nl->dev = huge_encode_dev(disk_devt(disk)); in list_devices()
642 nl->next = 0; in list_devices()
643 strcpy(nl->name, hc->name); in list_devices()
646 event_nr = align_ptr(nl->name + strlen(hc->name) + 1); in list_devices()
647 event_nr[0] = dm_get_event_nr(hc->md); in list_devices()
650 if (param->flags & DM_UUID_FLAG) { in list_devices()
651 if (hc->uuid) { in list_devices()
653 strcpy(uuid_ptr, hc->uuid); in list_devices()
654 uuid_ptr = align_ptr(uuid_ptr + strlen(hc->uuid) + 1); in list_devices()
665 BUG_ON((char *)nl - (char *)orig_nl != needed); in list_devices()
677 *needed += strlen(tt->name) + 1; in list_version_get_needed()
685 /* Check space - it might have changed since the first iteration */ in list_version_get_info()
686 if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 > info->end) { in list_version_get_info()
687 info->flags = DM_BUFFER_FULL_FLAG; in list_version_get_info()
691 if (info->old_vers) in list_version_get_info()
692 info->old_vers->next = (uint32_t) ((void *)info->vers - (void *)info->old_vers); in list_version_get_info()
694 info->vers->version[0] = tt->version[0]; in list_version_get_info()
695 info->vers->version[1] = tt->version[1]; in list_version_get_info()
696 info->vers->version[2] = tt->version[2]; in list_version_get_info()
697 info->vers->next = 0; in list_version_get_info()
698 strcpy(info->vers->name, tt->name); in list_version_get_info()
700 info->old_vers = info->vers; in list_version_get_info()
701 info->vers = align_ptr((void *)(info->vers + 1) + strlen(tt->name) + 1); in list_version_get_info()
704 static int __list_versions(struct dm_ioctl *param, size_t param_size, const char *name) in __list_versions() argument
711 if (name) { in __list_versions()
712 tt = dm_get_target_type(name); in __list_versions()
714 return -EINVAL; in __list_versions()
731 param->flags |= DM_BUFFER_FULL_FLAG; in __list_versions()
734 param->data_size = param->data_start + needed; in __list_versions()
749 param->flags |= iter_info.flags; in __list_versions()
764 return __list_versions(param, param_size, param->name); in get_target_version()
767 static int check_name(const char *name) in check_name() argument
769 if (strchr(name, '/')) { in check_name()
770 DMERR("device name cannot contain '/'"); in check_name()
771 return -EINVAL; in check_name()
774 if (strcmp(name, DM_CONTROL_NODE) == 0 || in check_name()
775 strcmp(name, ".") == 0 || in check_name()
776 strcmp(name, "..") == 0) { in check_name()
777 DMERR("device name cannot be \"%s\", \".\", or \"..\"", DM_CONTROL_NODE); in check_name()
778 return -EINVAL; in check_name()
804 table = hc->new_map; in dm_get_inactive_table()
816 return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ? in dm_get_live_or_inactive_table()
830 param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | in __dev_status()
834 param->flags |= DM_SUSPEND_FLAG; in __dev_status()
837 param->flags |= DM_INTERNAL_SUSPEND_FLAG; in __dev_status()
840 param->flags |= DM_DEFERRED_REMOVE; in __dev_status()
842 param->dev = huge_encode_dev(disk_devt(disk)); in __dev_status()
849 param->open_count = dm_open_count(md); in __dev_status()
851 param->event_nr = dm_get_event_nr(md); in __dev_status()
852 param->target_count = 0; in __dev_status()
856 if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) { in __dev_status()
858 param->flags |= DM_READONLY_FLAG; in __dev_status()
859 param->target_count = table->num_targets; in __dev_status()
862 param->flags |= DM_ACTIVE_PRESENT_FLAG; in __dev_status()
866 if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) { in __dev_status()
872 param->flags |= DM_READONLY_FLAG; in __dev_status()
873 param->target_count = table->num_targets; in __dev_status()
884 r = check_name(param->name); in dev_create()
888 if (param->flags & DM_PERSISTENT_DEV_FLAG) in dev_create()
889 m = MINOR(huge_decode_dev(param->dev)); in dev_create()
895 r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); in dev_create()
902 param->flags &= ~DM_INACTIVE_PRESENT_FLAG; in dev_create()
912 * Always use UUID for lookups if it's present, otherwise use name or dev.
918 if (*param->uuid) { in __find_device_hash_cell()
919 if (*param->name || param->dev) { in __find_device_hash_cell()
920 DMERR("Invalid ioctl structure: uuid %s, name %s, dev %llx", in __find_device_hash_cell()
921 param->uuid, param->name, (unsigned long long)param->dev); in __find_device_hash_cell()
925 hc = __get_uuid_cell(param->uuid); in __find_device_hash_cell()
928 } else if (*param->name) { in __find_device_hash_cell()
929 if (param->dev) { in __find_device_hash_cell()
930 DMERR("Invalid ioctl structure: name %s, dev %llx", in __find_device_hash_cell()
931 param->name, (unsigned long long)param->dev); in __find_device_hash_cell()
935 hc = __get_name_cell(param->name); in __find_device_hash_cell()
938 } else if (param->dev) { in __find_device_hash_cell()
939 hc = __get_dev_cell(param->dev); in __find_device_hash_cell()
946 * Sneakily write in both the name and the uuid in __find_device_hash_cell()
949 strscpy(param->name, hc->name, sizeof(param->name)); in __find_device_hash_cell()
950 if (hc->uuid) in __find_device_hash_cell()
951 strscpy(param->uuid, hc->uuid, sizeof(param->uuid)); in __find_device_hash_cell()
953 param->uuid[0] = '\0'; in __find_device_hash_cell()
955 if (hc->new_map) in __find_device_hash_cell()
956 param->flags |= DM_INACTIVE_PRESENT_FLAG; in __find_device_hash_cell()
958 param->flags &= ~DM_INACTIVE_PRESENT_FLAG; in __find_device_hash_cell()
971 md = hc->md; in find_device()
990 return -ENXIO; in dev_remove()
993 md = hc->md; in dev_remove()
998 r = dm_lock_for_deletion(md, !!(param->flags & DM_DEFERRED_REMOVE), false); in dev_remove()
1000 if (r == -EBUSY && param->flags & DM_DEFERRED_REMOVE) { in dev_remove()
1005 DMDEBUG_LIMIT("unable to remove open device %s", hc->name); in dev_remove()
1019 param->flags &= ~DM_DEFERRED_REMOVE; in dev_remove()
1023 if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr, false)) in dev_remove()
1024 param->flags |= DM_UEVENT_GENERATED_FLAG; in dev_remove()
1041 return -EINVAL; in invalid_str()
1047 char *new_data = (char *) param + param->data_start; in dev_rename()
1049 unsigned int change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; in dev_rename()
1051 if (new_data < param->data || in dev_rename()
1053 strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) { in dev_rename()
1054 DMERR("Invalid new mapped device name or uuid string supplied."); in dev_rename()
1055 return -EINVAL; in dev_rename()
1076 int r = -EINVAL, x; in dev_set_geometry()
1080 char *geostr = (char *) param + param->data_start; in dev_set_geometry()
1085 return -ENXIO; in dev_set_geometry()
1087 if (geostr < param->data || in dev_set_geometry()
1093 x = sscanf(geostr, "%lu %lu %lu %lu%c", indata, in dev_set_geometry()
1113 param->data_size = 0; in dev_set_geometry()
1128 return -ENXIO; in do_suspend()
1130 if (param->flags & DM_SKIP_LOCKFS_FLAG) in do_suspend()
1132 if (param->flags & DM_NOFLUSH_FLAG) in do_suspend()
1164 return -ENXIO; in do_resume()
1167 md = hc->md; in do_resume()
1169 new_map = hc->new_map; in do_resume()
1170 hc->new_map = NULL; in do_resume()
1171 param->flags &= ~DM_INACTIVE_PRESENT_FLAG; in do_resume()
1180 if (param->flags & DM_SKIP_LOCKFS_FLAG) in do_resume()
1182 if (param->flags & DM_NOFLUSH_FLAG) in do_resume()
1189 if (hc && !hc->new_map) { in do_resume()
1190 hc->new_map = new_map; in do_resume()
1193 r = -ENXIO; in do_resume()
1228 if (!dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr, need_resize_uevent)) in do_resume()
1229 param->flags |= DM_UEVENT_GENERATED_FLAG; in do_resume()
1235 * read-side critical section already. in do_resume()
1253 if (param->flags & DM_SUSPEND_FLAG) in dev_suspend()
1269 return -ENXIO; in dev_status()
1292 if (param->flags & DM_STATUS_TABLE_FLAG) in retrieve_status()
1294 else if (param->flags & DM_IMA_MEASUREMENT_FLAG) in retrieve_status()
1300 num_targets = table->num_targets; in retrieve_status()
1305 remaining = len - (outptr - outbuf); in retrieve_status()
1307 param->flags |= DM_BUFFER_FULL_FLAG; in retrieve_status()
1313 spec->status = 0; in retrieve_status()
1314 spec->sector_start = ti->begin; in retrieve_status()
1315 spec->length = ti->len; in retrieve_status()
1316 strscpy_pad(spec->target_type, ti->type->name, in retrieve_status()
1317 sizeof(spec->target_type)); in retrieve_status()
1320 remaining = len - (outptr - outbuf); in retrieve_status()
1322 param->flags |= DM_BUFFER_FULL_FLAG; in retrieve_status()
1327 if (ti->type->status) { in retrieve_status()
1328 if (param->flags & DM_NOFLUSH_FLAG) in retrieve_status()
1330 ti->type->status(ti, type, status_flags, outptr, remaining); in retrieve_status()
1336 param->flags |= DM_BUFFER_FULL_FLAG; in retrieve_status()
1341 used = param->data_start + (outptr - outbuf); in retrieve_status()
1344 spec->next = outptr - outbuf; in retrieve_status()
1348 param->data_size = used; in retrieve_status()
1350 param->target_count = num_targets; in retrieve_status()
1365 return -ENXIO; in dev_wait()
1370 if (dm_wait_event(md, param->event_nr)) { in dev_wait()
1371 r = -ERESTARTSYS; in dev_wait()
1399 struct dm_file *priv = filp->private_data; in dev_arm_poll()
1401 priv->global_event_nr = atomic_read(&dm_global_event_nr); in dev_arm_poll()
1410 if (param->flags & DM_READONLY_FLAG) in get_mode()
1420 "struct dm_target_spec must not require more than 8-byte alignment"); in next_target()
1427 size_t remaining = end - (char *)last; in next_target()
1431 * NUL-terminator of the target itself. in next_target()
1433 if (remaining - sizeof(struct dm_target_spec) <= next) { in next_target()
1435 return -EINVAL; in next_target()
1439 DMERR("Next dm_target_spec (offset %u) is not %zu-byte aligned", in next_target()
1441 return -EINVAL; in next_target()
1456 uint32_t next = param->data_start; in populate_table()
1461 if (!param->target_count) { in populate_table()
1463 return -EINVAL; in populate_table()
1466 for (i = 0; i < param->target_count; i++) { in populate_table()
1472 return -EINVAL; in populate_table()
1481 nul_terminator = memchr(target_params, 0, (size_t)(end - target_params)); in populate_table()
1483 DMERR("%s: target parameters not NUL-terminated", __func__); in populate_table()
1484 return -EINVAL; in populate_table()
1488 min_size = (size_t)(nul_terminator - (const char *)spec) + 1; in populate_table()
1490 r = dm_table_add_target(table, spec->target_type, in populate_table()
1491 (sector_t) spec->sector_start, in populate_table()
1492 (sector_t) spec->length, in populate_table()
1499 next = spec->next; in populate_table()
1524 return -ENXIO; in table_load()
1526 r = dm_table_create(&t, get_mode(param), param->target_count, md); in table_load()
1530 /* Protect md->type and md->queue against concurrent table loads. */ in table_load()
1543 immutable_target_type->name); in table_load()
1544 r = -EINVAL; in table_load()
1549 /* setup md->queue to reflect md's type (may block) */ in table_load()
1558 r = -EINVAL; in table_load()
1570 r = -ENXIO; in table_load()
1574 if (hc->new_map) in table_load()
1575 old_map = hc->new_map; in table_load()
1576 hc->new_map = t; in table_load()
1579 param->flags |= DM_INACTIVE_PRESENT_FLAG; in table_load()
1614 return -ENXIO; in table_clear()
1617 if (hc->new_map) { in table_clear()
1618 old_map = hc->new_map; in table_clear()
1619 hc->new_map = NULL; in table_clear()
1623 md = hc->md; in table_clear()
1626 param->flags &= ~DM_INACTIVE_PRESENT_FLAG; in table_clear()
1651 down_read(&table->devices_lock); in retrieve_deps()
1666 param->flags |= DM_BUFFER_FULL_FLAG; in retrieve_deps()
1673 deps->count = count; in retrieve_deps()
1676 deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev); in retrieve_deps()
1678 param->data_size = param->data_start + needed; in retrieve_deps()
1681 up_read(&table->devices_lock); in retrieve_deps()
1692 return -ENXIO; in table_deps()
1718 return -ENXIO; in table_status()
1733 * Process device-mapper dependent messages. Messages prefixed with '@'
1749 return -EINVAL; in message_for_md()
1759 return -EINVAL; in message_for_md()
1772 struct dm_target_msg *tmsg = (void *) param + param->data_start; in target_message()
1779 return -ENXIO; in target_message()
1781 if (tmsg < (struct dm_target_msg *) param->data || in target_message()
1782 invalid_str(tmsg->message, (void *) param + param_size)) { in target_message()
1784 r = -EINVAL; in target_message()
1788 r = dm_split_args(&argc, &argv, tmsg->message); in target_message()
1796 r = -EINVAL; in target_message()
1809 r = -ENXIO; in target_message()
1813 ti = dm_table_find_target(table, tmsg->sector); in target_message()
1816 r = -EINVAL; in target_message()
1817 } else if (ti->type->message) in target_message()
1818 r = ti->type->message(ti, argc, argv, result, maxlen); in target_message()
1821 r = -EINVAL; in target_message()
1833 param->flags |= DM_DATA_OUT_FLAG; in target_message()
1835 param->flags |= DM_BUFFER_FULL_FLAG; in target_message()
1837 param->data_size = param->data_start + strlen(result) + 1; in target_message()
1855 *---------------------------------------------------------------
1857 *---------------------------------------------------------------
1866 {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */ in lookup_ioctl()
1900 * As well as checking the version compatibility this always
1901 * copies the kernel interface version out.
1908 /* Make certain version is first member of dm_ioctl struct */ in check_version()
1909 BUILD_BUG_ON(offsetof(struct dm_ioctl, version) != 0); in check_version()
1911 if (copy_from_user(kernel_params->version, user->version, sizeof(kernel_params->version))) in check_version()
1912 return -EFAULT; in check_version()
1914 if ((kernel_params->version[0] != DM_VERSION_MAJOR) || in check_version()
1915 (kernel_params->version[1] > DM_VERSION_MINOR)) { in check_version()
1919 kernel_params->version[0], in check_version()
1920 kernel_params->version[1], in check_version()
1921 kernel_params->version[2], in check_version()
1923 r = -EINVAL; in check_version()
1927 * Fill in the kernel version. in check_version()
1929 kernel_params->version[0] = DM_VERSION_MAJOR; in check_version()
1930 kernel_params->version[1] = DM_VERSION_MINOR; in check_version()
1931 kernel_params->version[2] = DM_VERSION_PATCHLEVEL; in check_version()
1932 if (copy_to_user(user->version, kernel_params->version, sizeof(kernel_params->version))) in check_version()
1933 return -EFAULT; in check_version()
1957 /* check_version() already copied version from userspace, avoid TOCTOU */ in copy_params()
1958 if (copy_from_user((char *)param_kernel + sizeof(param_kernel->version), in copy_params()
1959 (char __user *)user + sizeof(param_kernel->version), in copy_params()
1960 minimum_data_size - sizeof(param_kernel->version))) in copy_params()
1961 return -EFAULT; in copy_params()
1963 if (unlikely(param_kernel->data_size < minimum_data_size) || in copy_params()
1964 unlikely(param_kernel->data_size > DM_MAX_TARGETS * DM_MAX_TARGET_PARAMS)) { in copy_params()
1966 param_kernel->data_size); in copy_params()
1967 return -EINVAL; in copy_params()
1970 secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG; in copy_params()
1976 dmi->data_size = minimum_data_size; in copy_params()
1986 dmi = kvmalloc(param_kernel->data_size, GFP_NOIO | __GFP_HIGH); in copy_params()
1989 if (secure_data && clear_user(user, param_kernel->data_size)) in copy_params()
1990 return -EFAULT; in copy_params()
1991 return -ENOMEM; in copy_params()
1999 if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size, in copy_params()
2000 param_kernel->data_size - minimum_data_size)) in copy_params()
2004 if (secure_data && clear_user(user, param_kernel->data_size)) in copy_params()
2011 free_params(dmi, param_kernel->data_size, *param_flags); in copy_params()
2013 return -EFAULT; in copy_params()
2019 param->flags &= ~DM_BUFFER_FULL_FLAG; in validate_params()
2020 param->flags &= ~DM_UEVENT_GENERATED_FLAG; in validate_params()
2021 param->flags &= ~DM_SECURE_DATA_FLAG; in validate_params()
2022 param->flags &= ~DM_DATA_OUT_FLAG; in validate_params()
2031 if (!*param->name) { in validate_params()
2032 DMERR("name not supplied when creating device"); in validate_params()
2033 return -EINVAL; in validate_params()
2035 } else if (*param->uuid && *param->name) { in validate_params()
2036 DMERR("only supply one of name or uuid, cmd(%u)", cmd); in validate_params()
2037 return -EINVAL; in validate_params()
2041 param->name[DM_NAME_LEN - 1] = '\0'; in validate_params()
2042 param->uuid[DM_UUID_LEN - 1] = '\0'; in validate_params()
2060 return -EACCES; in ctl_ioctl()
2063 return -ENOTTY; in ctl_ioctl()
2068 * Check the interface version passed in. This also in ctl_ioctl()
2069 * writes out the kernel's interface version. in ctl_ioctl()
2076 * Nothing more to do for the version command. in ctl_ioctl()
2084 return -ENOTTY; in ctl_ioctl()
2095 input_param_size = param->data_size; in ctl_ioctl()
2100 param->data_size = offsetof(struct dm_ioctl, data); in ctl_ioctl()
2103 if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) && in ctl_ioctl()
2113 if (!r && copy_to_user(user, param, param->data_size)) in ctl_ioctl()
2114 r = -EFAULT; in ctl_ioctl()
2144 priv = filp->private_data = kmalloc(sizeof(struct dm_file), GFP_KERNEL); in dm_open()
2146 return -ENOMEM; in dm_open()
2148 priv->global_event_nr = atomic_read(&dm_global_event_nr); in dm_open()
2155 kfree(filp->private_data); in dm_release()
2161 struct dm_file *priv = filp->private_data; in dm_poll()
2166 if ((int)(atomic_read(&dm_global_event_nr) - priv->global_event_nr) > 0) in dm_poll()
2184 .name = DM_NAME,
2218 * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers
2220 * @name: Buffer (size DM_NAME_LEN) for name
2223 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) in dm_copy_name_and_uuid() argument
2229 return -ENXIO; in dm_copy_name_and_uuid()
2234 r = -ENXIO; in dm_copy_name_and_uuid()
2238 if (name) in dm_copy_name_and_uuid()
2239 strcpy(name, hc->name); in dm_copy_name_and_uuid()
2241 strcpy(uuid, hc->uuid ? : ""); in dm_copy_name_and_uuid()
2251 * dm_early_create - create a mapped device in early boot.
2263 * @dmi->target_count.
2276 if (!dmi->target_count) in dm_early_create()
2277 return -EINVAL; in dm_early_create()
2279 r = check_name(dmi->name); in dm_early_create()
2283 if (dmi->flags & DM_PERSISTENT_DEV_FLAG) in dm_early_create()
2284 m = MINOR(huge_decode_dev(dmi->dev)); in dm_early_create()
2292 r = dm_hash_insert(dmi->name, *dmi->uuid ? dmi->uuid : NULL, md); in dm_early_create()
2297 r = dm_table_create(&t, get_mode(dmi), dmi->target_count, md); in dm_early_create()
2302 for (i = 0; i < dmi->target_count; i++) { in dm_early_create()
2303 r = dm_table_add_target(t, spec_array[i]->target_type, in dm_early_create()
2304 (sector_t) spec_array[i]->sector_start, in dm_early_create()
2305 (sector_t) spec_array[i]->length, in dm_early_create()
2318 /* setup md->queue to reflect md's type (may block) */ in dm_early_create()
2332 set_disk_ro(dm_disk(md), !!(dmi->flags & DM_READONLY_FLAG)); in dm_early_create()
2339 DMINFO("%s (%s) is ready", md->disk->disk_name, dmi->name); in dm_early_create()
2347 (void) __hash_remove(__get_name_cell(dmi->name)); in dm_early_create()