1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * RDMA Network Block Driver
4 *
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8 */
9
10 #undef pr_fmt
11 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
12
13 #include <linux/types.h>
14 #include <linux/ctype.h>
15 #include <linux/parser.h>
16 #include <linux/module.h>
17 #include <linux/in6.h>
18 #include <linux/fs.h>
19 #include <linux/uaccess.h>
20 #include <linux/device.h>
21 #include <rdma/ib.h>
22 #include <rdma/rdma_cm.h>
23
24 #include "rnbd-clt.h"
25
26 static struct device *rnbd_dev;
27 static const struct class rnbd_dev_class = {
28 .name = "rnbd-client",
29 };
30 static struct kobject *rnbd_devs_kobj;
31
32 enum {
33 RNBD_OPT_ERR = 0,
34 RNBD_OPT_DEST_PORT = 1 << 0,
35 RNBD_OPT_PATH = 1 << 1,
36 RNBD_OPT_DEV_PATH = 1 << 2,
37 RNBD_OPT_ACCESS_MODE = 1 << 3,
38 RNBD_OPT_SESSNAME = 1 << 6,
39 RNBD_OPT_NR_POLL_QUEUES = 1 << 7,
40 };
41
42 static const unsigned int rnbd_opt_mandatory[] = {
43 RNBD_OPT_DEV_PATH,
44 RNBD_OPT_SESSNAME,
45 };
46
47 static const match_table_t rnbd_opt_tokens = {
48 {RNBD_OPT_PATH, "path=%s" },
49 {RNBD_OPT_DEV_PATH, "device_path=%s" },
50 {RNBD_OPT_DEST_PORT, "dest_port=%d" },
51 {RNBD_OPT_ACCESS_MODE, "access_mode=%s" },
52 {RNBD_OPT_SESSNAME, "sessname=%s" },
53 {RNBD_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
54 {RNBD_OPT_ERR, NULL },
55 };
56
57 struct rnbd_map_options {
58 char *sessname;
59 struct rtrs_addr *paths;
60 size_t *path_cnt;
61 char *pathname;
62 u16 *dest_port;
63 enum rnbd_access_mode *access_mode;
64 u32 *nr_poll_queues;
65 };
66
rnbd_clt_parse_map_options(const char * buf,size_t max_path_cnt,struct rnbd_map_options * opt)67 static int rnbd_clt_parse_map_options(const char *buf, size_t max_path_cnt,
68 struct rnbd_map_options *opt)
69 {
70 char *options, *sep_opt;
71 char *p;
72 substring_t args[MAX_OPT_ARGS];
73 int opt_mask = 0;
74 int token;
75 int ret = -EINVAL;
76 int nr_poll_queues = 0;
77 int dest_port = 0;
78 int p_cnt = 0;
79 int i;
80
81 options = kstrdup(buf, GFP_KERNEL);
82 if (!options)
83 return -ENOMEM;
84
85 sep_opt = strstrip(options);
86 while ((p = strsep(&sep_opt, " ")) != NULL) {
87 if (!*p)
88 continue;
89
90 token = match_token(p, rnbd_opt_tokens, args);
91 opt_mask |= token;
92
93 switch (token) {
94 case RNBD_OPT_SESSNAME:
95 p = match_strdup(args);
96 if (!p) {
97 ret = -ENOMEM;
98 goto out;
99 }
100 if (strlen(p) > NAME_MAX) {
101 pr_err("map_device: sessname too long\n");
102 ret = -EINVAL;
103 kfree(p);
104 goto out;
105 }
106 strscpy(opt->sessname, p, NAME_MAX);
107 kfree(p);
108 break;
109
110 case RNBD_OPT_PATH:
111 if (p_cnt >= max_path_cnt) {
112 pr_err("map_device: too many (> %zu) paths provided\n",
113 max_path_cnt);
114 ret = -ENOMEM;
115 goto out;
116 }
117 p = match_strdup(args);
118 if (!p) {
119 ret = -ENOMEM;
120 goto out;
121 }
122
123 ret = rtrs_addr_to_sockaddr(p, strlen(p),
124 *opt->dest_port,
125 &opt->paths[p_cnt]);
126 if (ret) {
127 pr_err("Can't parse path %s: %d\n", p, ret);
128 kfree(p);
129 goto out;
130 }
131
132 p_cnt++;
133
134 kfree(p);
135 break;
136
137 case RNBD_OPT_DEV_PATH:
138 p = match_strdup(args);
139 if (!p) {
140 ret = -ENOMEM;
141 goto out;
142 }
143 if (strlen(p) > NAME_MAX) {
144 pr_err("map_device: Device path too long\n");
145 ret = -EINVAL;
146 kfree(p);
147 goto out;
148 }
149 strscpy(opt->pathname, p, NAME_MAX);
150 kfree(p);
151 break;
152
153 case RNBD_OPT_DEST_PORT:
154 if (match_int(args, &dest_port) || dest_port < 0 ||
155 dest_port > 65535) {
156 pr_err("bad destination port number parameter '%d'\n",
157 dest_port);
158 ret = -EINVAL;
159 goto out;
160 }
161 *opt->dest_port = dest_port;
162 break;
163
164 case RNBD_OPT_ACCESS_MODE:
165 p = match_strdup(args);
166 if (!p) {
167 ret = -ENOMEM;
168 goto out;
169 }
170
171 if (!strcmp(p, "ro")) {
172 *opt->access_mode = RNBD_ACCESS_RO;
173 } else if (!strcmp(p, "rw")) {
174 *opt->access_mode = RNBD_ACCESS_RW;
175 } else if (!strcmp(p, "migration")) {
176 *opt->access_mode = RNBD_ACCESS_MIGRATION;
177 } else {
178 pr_err("map_device: Invalid access_mode: '%s'\n",
179 p);
180 ret = -EINVAL;
181 kfree(p);
182 goto out;
183 }
184
185 kfree(p);
186 break;
187
188 case RNBD_OPT_NR_POLL_QUEUES:
189 if (match_int(args, &nr_poll_queues) || nr_poll_queues < -1 ||
190 nr_poll_queues > (int)nr_cpu_ids) {
191 pr_err("bad nr_poll_queues parameter '%d'\n",
192 nr_poll_queues);
193 ret = -EINVAL;
194 goto out;
195 }
196 if (nr_poll_queues == -1)
197 nr_poll_queues = nr_cpu_ids;
198 *opt->nr_poll_queues = nr_poll_queues;
199 break;
200
201 default:
202 pr_err("map_device: Unknown parameter or missing value '%s'\n",
203 p);
204 ret = -EINVAL;
205 goto out;
206 }
207 }
208
209 for (i = 0; i < ARRAY_SIZE(rnbd_opt_mandatory); i++) {
210 if ((opt_mask & rnbd_opt_mandatory[i])) {
211 ret = 0;
212 } else {
213 pr_err("map_device: Parameters missing\n");
214 ret = -EINVAL;
215 break;
216 }
217 }
218
219 out:
220 *opt->path_cnt = p_cnt;
221 kfree(options);
222 return ret;
223 }
224
state_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)225 static ssize_t state_show(struct kobject *kobj,
226 struct kobj_attribute *attr, char *page)
227 {
228 struct rnbd_clt_dev *dev;
229
230 dev = container_of(kobj, struct rnbd_clt_dev, kobj);
231
232 switch (dev->dev_state) {
233 case DEV_STATE_INIT:
234 return sysfs_emit(page, "init\n");
235 case DEV_STATE_MAPPED:
236 /* TODO fix cli tool before changing to proper state */
237 return sysfs_emit(page, "open\n");
238 case DEV_STATE_MAPPED_DISCONNECTED:
239 /* TODO fix cli tool before changing to proper state */
240 return sysfs_emit(page, "closed\n");
241 case DEV_STATE_UNMAPPED:
242 return sysfs_emit(page, "unmapped\n");
243 default:
244 return sysfs_emit(page, "unknown\n");
245 }
246 }
247
248 static struct kobj_attribute rnbd_clt_state_attr = __ATTR_RO(state);
249
nr_poll_queues_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)250 static ssize_t nr_poll_queues_show(struct kobject *kobj,
251 struct kobj_attribute *attr, char *page)
252 {
253 struct rnbd_clt_dev *dev;
254
255 dev = container_of(kobj, struct rnbd_clt_dev, kobj);
256
257 return sysfs_emit(page, "%d\n", dev->nr_poll_queues);
258 }
259
260 static struct kobj_attribute rnbd_clt_nr_poll_queues =
261 __ATTR_RO(nr_poll_queues);
262
mapping_path_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)263 static ssize_t mapping_path_show(struct kobject *kobj,
264 struct kobj_attribute *attr, char *page)
265 {
266 struct rnbd_clt_dev *dev;
267
268 dev = container_of(kobj, struct rnbd_clt_dev, kobj);
269
270 return sysfs_emit(page, "%s\n", dev->pathname);
271 }
272
273 static struct kobj_attribute rnbd_clt_mapping_path_attr =
274 __ATTR_RO(mapping_path);
275
access_mode_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)276 static ssize_t access_mode_show(struct kobject *kobj,
277 struct kobj_attribute *attr, char *page)
278 {
279 struct rnbd_clt_dev *dev;
280
281 dev = container_of(kobj, struct rnbd_clt_dev, kobj);
282
283 return sysfs_emit(page, "%s\n", rnbd_access_modes[dev->access_mode].str);
284 }
285
286 static struct kobj_attribute rnbd_clt_access_mode =
287 __ATTR_RO(access_mode);
288
rnbd_clt_unmap_dev_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)289 static ssize_t rnbd_clt_unmap_dev_show(struct kobject *kobj,
290 struct kobj_attribute *attr, char *page)
291 {
292 return sysfs_emit(page, "Usage: echo <normal|force> > %s\n",
293 attr->attr.name);
294 }
295
rnbd_clt_unmap_dev_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)296 static ssize_t rnbd_clt_unmap_dev_store(struct kobject *kobj,
297 struct kobj_attribute *attr,
298 const char *buf, size_t count)
299 {
300 struct rnbd_clt_dev *dev;
301 char *opt, *options;
302 bool force;
303 int err;
304
305 opt = kstrdup(buf, GFP_KERNEL);
306 if (!opt)
307 return -ENOMEM;
308
309 options = strstrip(opt);
310 dev = container_of(kobj, struct rnbd_clt_dev, kobj);
311 if (sysfs_streq(options, "normal")) {
312 force = false;
313 } else if (sysfs_streq(options, "force")) {
314 force = true;
315 } else {
316 rnbd_clt_err(dev,
317 "unmap_device: Invalid value: %s\n",
318 options);
319 err = -EINVAL;
320 goto out;
321 }
322
323 rnbd_clt_info(dev, "Unmapping device, option: %s.\n",
324 force ? "force" : "normal");
325
326 /*
327 * We take explicit module reference only for one reason: do not
328 * race with lockless rnbd_destroy_sessions().
329 */
330 if (!try_module_get(THIS_MODULE)) {
331 err = -ENODEV;
332 goto out;
333 }
334 err = rnbd_clt_unmap_device(dev, force, &attr->attr);
335 if (err) {
336 if (err != -EALREADY)
337 rnbd_clt_err(dev, "unmap_device: %d\n", err);
338 goto module_put;
339 }
340
341 /*
342 * Here device can be vanished!
343 */
344
345 err = count;
346
347 module_put:
348 module_put(THIS_MODULE);
349 out:
350 kfree(opt);
351
352 return err;
353 }
354
355 static struct kobj_attribute rnbd_clt_unmap_device_attr =
356 __ATTR(unmap_device, 0644, rnbd_clt_unmap_dev_show,
357 rnbd_clt_unmap_dev_store);
358
rnbd_clt_resize_dev_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)359 static ssize_t rnbd_clt_resize_dev_show(struct kobject *kobj,
360 struct kobj_attribute *attr,
361 char *page)
362 {
363 return sysfs_emit(page, "Usage: echo <new size in sectors> > %s\n",
364 attr->attr.name);
365 }
366
rnbd_clt_resize_dev_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)367 static ssize_t rnbd_clt_resize_dev_store(struct kobject *kobj,
368 struct kobj_attribute *attr,
369 const char *buf, size_t count)
370 {
371 int ret;
372 unsigned long sectors;
373 struct rnbd_clt_dev *dev;
374
375 dev = container_of(kobj, struct rnbd_clt_dev, kobj);
376
377 ret = kstrtoul(buf, 0, §ors);
378 if (ret)
379 return ret;
380
381 ret = rnbd_clt_resize_disk(dev, sectors);
382 if (ret)
383 return ret;
384
385 return count;
386 }
387
388 static struct kobj_attribute rnbd_clt_resize_dev_attr =
389 __ATTR(resize, 0644, rnbd_clt_resize_dev_show,
390 rnbd_clt_resize_dev_store);
391
rnbd_clt_remap_dev_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)392 static ssize_t rnbd_clt_remap_dev_show(struct kobject *kobj,
393 struct kobj_attribute *attr, char *page)
394 {
395 return sysfs_emit(page, "Usage: echo <1> > %s\n", attr->attr.name);
396 }
397
rnbd_clt_remap_dev_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)398 static ssize_t rnbd_clt_remap_dev_store(struct kobject *kobj,
399 struct kobj_attribute *attr,
400 const char *buf, size_t count)
401 {
402 struct rnbd_clt_dev *dev;
403 char *opt, *options;
404 int err;
405
406 opt = kstrdup(buf, GFP_KERNEL);
407 if (!opt)
408 return -ENOMEM;
409
410 options = strstrip(opt);
411 dev = container_of(kobj, struct rnbd_clt_dev, kobj);
412 if (!sysfs_streq(options, "1")) {
413 rnbd_clt_err(dev,
414 "remap_device: Invalid value: %s\n",
415 options);
416 err = -EINVAL;
417 goto out;
418 }
419 err = rnbd_clt_remap_device(dev);
420 if (likely(!err))
421 err = count;
422
423 out:
424 kfree(opt);
425
426 return err;
427 }
428
429 static struct kobj_attribute rnbd_clt_remap_device_attr =
430 __ATTR(remap_device, 0644, rnbd_clt_remap_dev_show,
431 rnbd_clt_remap_dev_store);
432
session_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)433 static ssize_t session_show(struct kobject *kobj, struct kobj_attribute *attr,
434 char *page)
435 {
436 struct rnbd_clt_dev *dev;
437
438 dev = container_of(kobj, struct rnbd_clt_dev, kobj);
439
440 return sysfs_emit(page, "%s\n", dev->sess->sessname);
441 }
442
443 static struct kobj_attribute rnbd_clt_session_attr =
444 __ATTR_RO(session);
445
446 static struct attribute *rnbd_dev_attrs[] = {
447 &rnbd_clt_unmap_device_attr.attr,
448 &rnbd_clt_resize_dev_attr.attr,
449 &rnbd_clt_remap_device_attr.attr,
450 &rnbd_clt_mapping_path_attr.attr,
451 &rnbd_clt_state_attr.attr,
452 &rnbd_clt_session_attr.attr,
453 &rnbd_clt_access_mode.attr,
454 &rnbd_clt_nr_poll_queues.attr,
455 NULL,
456 };
457 ATTRIBUTE_GROUPS(rnbd_dev);
458
rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev * dev)459 void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
460 {
461 /*
462 * The module unload rnbd_client_exit path is racing with unmapping of
463 * the last single device from the sysfs manually
464 * i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because
465 * of sysfs link already was removed already.
466 */
467 if (dev->blk_symlink_name) {
468 if (try_module_get(THIS_MODULE)) {
469 sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
470 module_put(THIS_MODULE);
471 }
472 /* It should be freed always. */
473 kfree(dev->blk_symlink_name);
474 dev->blk_symlink_name = NULL;
475 }
476 }
477
rnbd_dev_release(struct kobject * kobj)478 static void rnbd_dev_release(struct kobject *kobj)
479 {
480 struct rnbd_clt_dev *dev = container_of(kobj, struct rnbd_clt_dev, kobj);
481
482 kfree(dev);
483 }
484
485 static const struct kobj_type rnbd_dev_ktype = {
486 .sysfs_ops = &kobj_sysfs_ops,
487 .default_groups = rnbd_dev_groups,
488 .release = rnbd_dev_release,
489 };
490
rnbd_clt_add_dev_kobj(struct rnbd_clt_dev * dev)491 static int rnbd_clt_add_dev_kobj(struct rnbd_clt_dev *dev)
492 {
493 int ret;
494 struct kobject *gd_kobj = &disk_to_dev(dev->gd)->kobj;
495
496 ret = kobject_init_and_add(&dev->kobj, &rnbd_dev_ktype, gd_kobj, "%s",
497 "rnbd");
498 if (ret) {
499 rnbd_clt_err(dev, "Failed to create device sysfs dir, err: %d\n",
500 ret);
501 kobject_put(&dev->kobj);
502 }
503 kobject_uevent(gd_kobj, KOBJ_ONLINE);
504
505 return ret;
506 }
507
rnbd_clt_map_device_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)508 static ssize_t rnbd_clt_map_device_show(struct kobject *kobj,
509 struct kobj_attribute *attr,
510 char *page)
511 {
512 return sysfs_emit(page,
513 "Usage: echo \"[dest_port=server port number] sessname=<name of the rtrs session> path=<[srcaddr@]dstaddr> [path=<[srcaddr@]dstaddr>] device_path=<full path on remote side> [access_mode=<ro|rw|migration>] [nr_poll_queues=<number of queues>]\" > %s\n\naddr ::= [ ip:<ipv4> | ip:<ipv6> | gid:<gid> ]\n",
514 attr->attr.name);
515 }
516
rnbd_clt_get_path_name(struct rnbd_clt_dev * dev,char * buf,size_t len)517 static int rnbd_clt_get_path_name(struct rnbd_clt_dev *dev, char *buf,
518 size_t len)
519 {
520 int ret;
521 char pathname[NAME_MAX], *s;
522
523 strscpy(pathname, dev->pathname, sizeof(pathname));
524 while ((s = strchr(pathname, '/')))
525 s[0] = '!';
526
527 ret = snprintf(buf, len, "%s@%s", pathname, dev->sess->sessname);
528 if (ret >= len)
529 return -ENAMETOOLONG;
530
531 return 0;
532 }
533
rnbd_clt_add_dev_symlink(struct rnbd_clt_dev * dev)534 static int rnbd_clt_add_dev_symlink(struct rnbd_clt_dev *dev)
535 {
536 struct kobject *gd_kobj = &disk_to_dev(dev->gd)->kobj;
537 int ret, len;
538
539 len = strlen(dev->pathname) + strlen(dev->sess->sessname) + 2;
540 dev->blk_symlink_name = kzalloc(len, GFP_KERNEL);
541 if (!dev->blk_symlink_name) {
542 rnbd_clt_err(dev, "Failed to allocate memory for blk_symlink_name\n");
543 return -ENOMEM;
544 }
545
546 ret = rnbd_clt_get_path_name(dev, dev->blk_symlink_name,
547 len);
548 if (ret) {
549 rnbd_clt_err(dev, "Failed to get /sys/block symlink path, err: %d\n",
550 ret);
551 goto out_err;
552 }
553
554 ret = sysfs_create_link(rnbd_devs_kobj, gd_kobj,
555 dev->blk_symlink_name);
556 if (ret) {
557 rnbd_clt_err(dev, "Creating /sys/block symlink failed, err: %d\n",
558 ret);
559 goto out_err;
560 }
561
562 return 0;
563
564 out_err:
565 kfree(dev->blk_symlink_name);
566 dev->blk_symlink_name = NULL ;
567 return ret;
568 }
569
rnbd_clt_map_device_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)570 static ssize_t rnbd_clt_map_device_store(struct kobject *kobj,
571 struct kobj_attribute *attr,
572 const char *buf, size_t count)
573 {
574 struct rnbd_clt_dev *dev;
575 struct rnbd_map_options opt;
576 int ret;
577 char pathname[NAME_MAX];
578 char sessname[NAME_MAX];
579 enum rnbd_access_mode access_mode = RNBD_ACCESS_RW;
580 u16 port_nr = RTRS_PORT;
581 u32 nr_poll_queues = 0;
582
583 struct sockaddr_storage *addrs;
584 struct rtrs_addr paths[6];
585 size_t path_cnt;
586
587 opt.sessname = sessname;
588 opt.paths = paths;
589 opt.path_cnt = &path_cnt;
590 opt.pathname = pathname;
591 opt.dest_port = &port_nr;
592 opt.access_mode = &access_mode;
593 opt.nr_poll_queues = &nr_poll_queues;
594 addrs = kzalloc_objs(*addrs, ARRAY_SIZE(paths) * 2);
595 if (!addrs)
596 return -ENOMEM;
597
598 for (path_cnt = 0; path_cnt < ARRAY_SIZE(paths); path_cnt++) {
599 paths[path_cnt].src = &addrs[path_cnt * 2];
600 paths[path_cnt].dst = &addrs[path_cnt * 2 + 1];
601 }
602
603 ret = rnbd_clt_parse_map_options(buf, ARRAY_SIZE(paths), &opt);
604 if (ret)
605 goto out;
606
607 pr_info("Mapping device %s on session %s, (access_mode: %s, nr_poll_queues: %d)\n",
608 pathname, sessname,
609 rnbd_access_modes[access_mode].str,
610 nr_poll_queues);
611
612 dev = rnbd_clt_map_device(sessname, paths, path_cnt, port_nr, pathname,
613 access_mode, nr_poll_queues);
614 if (IS_ERR(dev)) {
615 ret = PTR_ERR(dev);
616 goto out;
617 }
618
619 ret = rnbd_clt_add_dev_kobj(dev);
620 if (ret)
621 goto unmap_dev;
622
623 ret = rnbd_clt_add_dev_symlink(dev);
624 if (ret)
625 goto unmap_dev;
626
627 kfree(addrs);
628 return count;
629
630 unmap_dev:
631 rnbd_clt_unmap_device(dev, true, NULL);
632 out:
633 kfree(addrs);
634 return ret;
635 }
636
637 static struct kobj_attribute rnbd_clt_map_device_attr =
638 __ATTR(map_device, 0644,
639 rnbd_clt_map_device_show, rnbd_clt_map_device_store);
640
641 static struct attribute *default_attrs[] = {
642 &rnbd_clt_map_device_attr.attr,
643 NULL,
644 };
645
646 static struct attribute_group default_attr_group = {
647 .attrs = default_attrs,
648 };
649
650 static const struct attribute_group *default_attr_groups[] = {
651 &default_attr_group,
652 NULL,
653 };
654
rnbd_clt_create_sysfs_files(void)655 int rnbd_clt_create_sysfs_files(void)
656 {
657 int err;
658
659 err = class_register(&rnbd_dev_class);
660 if (err)
661 return err;
662
663 rnbd_dev = device_create_with_groups(&rnbd_dev_class, NULL,
664 MKDEV(0, 0), NULL,
665 default_attr_groups, "ctl");
666 if (IS_ERR(rnbd_dev)) {
667 err = PTR_ERR(rnbd_dev);
668 goto cls_destroy;
669 }
670 rnbd_devs_kobj = kobject_create_and_add("devices", &rnbd_dev->kobj);
671 if (!rnbd_devs_kobj) {
672 err = -ENOMEM;
673 goto dev_destroy;
674 }
675
676 return 0;
677
678 dev_destroy:
679 device_destroy(&rnbd_dev_class, MKDEV(0, 0));
680 cls_destroy:
681 class_unregister(&rnbd_dev_class);
682
683 return err;
684 }
685
rnbd_clt_destroy_sysfs_files(void)686 void rnbd_clt_destroy_sysfs_files(void)
687 {
688 sysfs_remove_group(&rnbd_dev->kobj, &default_attr_group);
689 kobject_del(rnbd_devs_kobj);
690 kobject_put(rnbd_devs_kobj);
691 device_destroy(&rnbd_dev_class, MKDEV(0, 0));
692 class_unregister(&rnbd_dev_class);
693 }
694