1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * RDMA Network Block Driver
4 *
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8 */
9
10 #undef pr_fmt
11 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
12
13 #include <linux/types.h>
14 #include <linux/ctype.h>
15 #include <linux/parser.h>
16 #include <linux/module.h>
17 #include <linux/in6.h>
18 #include <linux/fs.h>
19 #include <linux/uaccess.h>
20 #include <linux/device.h>
21 #include <rdma/ib.h>
22 #include <rdma/rdma_cm.h>
23
24 #include "rnbd-clt.h"
25
26 static struct device *rnbd_dev;
27 static const struct class rnbd_dev_class = {
28 .name = "rnbd-client",
29 };
30 static struct kobject *rnbd_devs_kobj;
31
32 enum {
33 RNBD_OPT_ERR = 0,
34 RNBD_OPT_DEST_PORT = 1 << 0,
35 RNBD_OPT_PATH = 1 << 1,
36 RNBD_OPT_DEV_PATH = 1 << 2,
37 RNBD_OPT_ACCESS_MODE = 1 << 3,
38 RNBD_OPT_SESSNAME = 1 << 6,
39 RNBD_OPT_NR_POLL_QUEUES = 1 << 7,
40 };
41
42 static const unsigned int rnbd_opt_mandatory[] = {
43 RNBD_OPT_DEV_PATH,
44 RNBD_OPT_SESSNAME,
45 };
46
47 static const match_table_t rnbd_opt_tokens = {
48 {RNBD_OPT_PATH, "path=%s" },
49 {RNBD_OPT_DEV_PATH, "device_path=%s" },
50 {RNBD_OPT_DEST_PORT, "dest_port=%d" },
51 {RNBD_OPT_ACCESS_MODE, "access_mode=%s" },
52 {RNBD_OPT_SESSNAME, "sessname=%s" },
53 {RNBD_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
54 {RNBD_OPT_ERR, NULL },
55 };
56
57 struct rnbd_map_options {
58 char *sessname;
59 struct rtrs_addr *paths;
60 size_t *path_cnt;
61 char *pathname;
62 u16 *dest_port;
63 enum rnbd_access_mode *access_mode;
64 u32 *nr_poll_queues;
65 };
66
rnbd_clt_parse_map_options(const char * buf,size_t max_path_cnt,struct rnbd_map_options * opt)67 static int rnbd_clt_parse_map_options(const char *buf, size_t max_path_cnt,
68 struct rnbd_map_options *opt)
69 {
70 char *options, *sep_opt;
71 char *p;
72 substring_t args[MAX_OPT_ARGS];
73 int opt_mask = 0;
74 int token;
75 int ret = -EINVAL;
76 int nr_poll_queues = 0;
77 int dest_port = 0;
78 int p_cnt = 0;
79 int i;
80
81 options = kstrdup(buf, GFP_KERNEL);
82 if (!options)
83 return -ENOMEM;
84
85 sep_opt = strstrip(options);
86 while ((p = strsep(&sep_opt, " ")) != NULL) {
87 if (!*p)
88 continue;
89
90 token = match_token(p, rnbd_opt_tokens, args);
91 opt_mask |= token;
92
93 switch (token) {
94 case RNBD_OPT_SESSNAME:
95 p = match_strdup(args);
96 if (!p) {
97 ret = -ENOMEM;
98 goto out;
99 }
100 if (strlen(p) > NAME_MAX) {
101 pr_err("map_device: sessname too long\n");
102 ret = -EINVAL;
103 kfree(p);
104 goto out;
105 }
106 strscpy(opt->sessname, p, NAME_MAX);
107 kfree(p);
108 break;
109
110 case RNBD_OPT_PATH:
111 if (p_cnt >= max_path_cnt) {
112 pr_err("map_device: too many (> %zu) paths provided\n",
113 max_path_cnt);
114 ret = -ENOMEM;
115 goto out;
116 }
117 p = match_strdup(args);
118 if (!p) {
119 ret = -ENOMEM;
120 goto out;
121 }
122
123 ret = rtrs_addr_to_sockaddr(p, strlen(p),
124 *opt->dest_port,
125 &opt->paths[p_cnt]);
126 if (ret) {
127 pr_err("Can't parse path %s: %d\n", p, ret);
128 kfree(p);
129 goto out;
130 }
131
132 p_cnt++;
133
134 kfree(p);
135 break;
136
137 case RNBD_OPT_DEV_PATH:
138 p = match_strdup(args);
139 if (!p) {
140 ret = -ENOMEM;
141 goto out;
142 }
143 if (strlen(p) > NAME_MAX) {
144 pr_err("map_device: Device path too long\n");
145 ret = -EINVAL;
146 kfree(p);
147 goto out;
148 }
149 strscpy(opt->pathname, p, NAME_MAX);
150 kfree(p);
151 break;
152
153 case RNBD_OPT_DEST_PORT:
154 if (match_int(args, &dest_port) || dest_port < 0 ||
155 dest_port > 65535) {
156 pr_err("bad destination port number parameter '%d'\n",
157 dest_port);
158 ret = -EINVAL;
159 goto out;
160 }
161 *opt->dest_port = dest_port;
162 break;
163
164 case RNBD_OPT_ACCESS_MODE:
165 p = match_strdup(args);
166 if (!p) {
167 ret = -ENOMEM;
168 goto out;
169 }
170
171 if (!strcmp(p, "ro")) {
172 *opt->access_mode = RNBD_ACCESS_RO;
173 } else if (!strcmp(p, "rw")) {
174 *opt->access_mode = RNBD_ACCESS_RW;
175 } else if (!strcmp(p, "migration")) {
176 *opt->access_mode = RNBD_ACCESS_MIGRATION;
177 } else {
178 pr_err("map_device: Invalid access_mode: '%s'\n",
179 p);
180 ret = -EINVAL;
181 kfree(p);
182 goto out;
183 }
184
185 kfree(p);
186 break;
187
188 case RNBD_OPT_NR_POLL_QUEUES:
189 if (match_int(args, &nr_poll_queues) || nr_poll_queues < -1 ||
190 nr_poll_queues > (int)nr_cpu_ids) {
191 pr_err("bad nr_poll_queues parameter '%d'\n",
192 nr_poll_queues);
193 ret = -EINVAL;
194 goto out;
195 }
196 if (nr_poll_queues == -1)
197 nr_poll_queues = nr_cpu_ids;
198 *opt->nr_poll_queues = nr_poll_queues;
199 break;
200
201 default:
202 pr_err("map_device: Unknown parameter or missing value '%s'\n",
203 p);
204 ret = -EINVAL;
205 goto out;
206 }
207 }
208
209 for (i = 0; i < ARRAY_SIZE(rnbd_opt_mandatory); i++) {
210 if ((opt_mask & rnbd_opt_mandatory[i])) {
211 ret = 0;
212 } else {
213 pr_err("map_device: Parameters missing\n");
214 ret = -EINVAL;
215 break;
216 }
217 }
218
219 out:
220 *opt->path_cnt = p_cnt;
221 kfree(options);
222 return ret;
223 }
224
state_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)225 static ssize_t state_show(struct kobject *kobj,
226 struct kobj_attribute *attr, char *page)
227 {
228 struct rnbd_clt_dev *dev;
229
230 dev = container_of(kobj, struct rnbd_clt_dev, kobj);
231
232 switch (dev->dev_state) {
233 case DEV_STATE_INIT:
234 return sysfs_emit(page, "init\n");
235 case DEV_STATE_MAPPED:
236 /* TODO fix cli tool before changing to proper state */
237 return sysfs_emit(page, "open\n");
238 case DEV_STATE_MAPPED_DISCONNECTED:
239 /* TODO fix cli tool before changing to proper state */
240 return sysfs_emit(page, "closed\n");
241 case DEV_STATE_UNMAPPED:
242 return sysfs_emit(page, "unmapped\n");
243 default:
244 return sysfs_emit(page, "unknown\n");
245 }
246 }
247
248 static struct kobj_attribute rnbd_clt_state_attr = __ATTR_RO(state);
249
nr_poll_queues_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)250 static ssize_t nr_poll_queues_show(struct kobject *kobj,
251 struct kobj_attribute *attr, char *page)
252 {
253 struct rnbd_clt_dev *dev;
254
255 dev = container_of(kobj, struct rnbd_clt_dev, kobj);
256
257 return sysfs_emit(page, "%d\n", dev->nr_poll_queues);
258 }
259
260 static struct kobj_attribute rnbd_clt_nr_poll_queues =
261 __ATTR_RO(nr_poll_queues);
262
mapping_path_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)263 static ssize_t mapping_path_show(struct kobject *kobj,
264 struct kobj_attribute *attr, char *page)
265 {
266 struct rnbd_clt_dev *dev;
267
268 dev = container_of(kobj, struct rnbd_clt_dev, kobj);
269
270 return sysfs_emit(page, "%s\n", dev->pathname);
271 }
272
273 static struct kobj_attribute rnbd_clt_mapping_path_attr =
274 __ATTR_RO(mapping_path);
275
access_mode_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)276 static ssize_t access_mode_show(struct kobject *kobj,
277 struct kobj_attribute *attr, char *page)
278 {
279 struct rnbd_clt_dev *dev;
280
281 dev = container_of(kobj, struct rnbd_clt_dev, kobj);
282
283 return sysfs_emit(page, "%s\n", rnbd_access_modes[dev->access_mode].str);
284 }
285
286 static struct kobj_attribute rnbd_clt_access_mode =
287 __ATTR_RO(access_mode);
288
rnbd_clt_unmap_dev_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)289 static ssize_t rnbd_clt_unmap_dev_show(struct kobject *kobj,
290 struct kobj_attribute *attr, char *page)
291 {
292 return sysfs_emit(page, "Usage: echo <normal|force> > %s\n",
293 attr->attr.name);
294 }
295
rnbd_clt_unmap_dev_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)296 static ssize_t rnbd_clt_unmap_dev_store(struct kobject *kobj,
297 struct kobj_attribute *attr,
298 const char *buf, size_t count)
299 {
300 struct rnbd_clt_dev *dev;
301 char *opt, *options;
302 bool force;
303 int err;
304
305 opt = kstrdup(buf, GFP_KERNEL);
306 if (!opt)
307 return -ENOMEM;
308
309 options = strstrip(opt);
310 dev = container_of(kobj, struct rnbd_clt_dev, kobj);
311 if (sysfs_streq(options, "normal")) {
312 force = false;
313 } else if (sysfs_streq(options, "force")) {
314 force = true;
315 } else {
316 rnbd_clt_err(dev,
317 "unmap_device: Invalid value: %s\n",
318 options);
319 err = -EINVAL;
320 goto out;
321 }
322
323 rnbd_clt_info(dev, "Unmapping device, option: %s.\n",
324 force ? "force" : "normal");
325
326 /*
327 * We take explicit module reference only for one reason: do not
328 * race with lockless rnbd_destroy_sessions().
329 */
330 if (!try_module_get(THIS_MODULE)) {
331 err = -ENODEV;
332 goto out;
333 }
334 err = rnbd_clt_unmap_device(dev, force, &attr->attr);
335 if (err) {
336 if (err != -EALREADY)
337 rnbd_clt_err(dev, "unmap_device: %d\n", err);
338 goto module_put;
339 }
340
341 /*
342 * Here device can be vanished!
343 */
344
345 err = count;
346
347 module_put:
348 module_put(THIS_MODULE);
349 out:
350 kfree(opt);
351
352 return err;
353 }
354
355 static struct kobj_attribute rnbd_clt_unmap_device_attr =
356 __ATTR(unmap_device, 0644, rnbd_clt_unmap_dev_show,
357 rnbd_clt_unmap_dev_store);
358
rnbd_clt_resize_dev_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)359 static ssize_t rnbd_clt_resize_dev_show(struct kobject *kobj,
360 struct kobj_attribute *attr,
361 char *page)
362 {
363 return sysfs_emit(page, "Usage: echo <new size in sectors> > %s\n",
364 attr->attr.name);
365 }
366
rnbd_clt_resize_dev_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)367 static ssize_t rnbd_clt_resize_dev_store(struct kobject *kobj,
368 struct kobj_attribute *attr,
369 const char *buf, size_t count)
370 {
371 int ret;
372 unsigned long sectors;
373 struct rnbd_clt_dev *dev;
374
375 dev = container_of(kobj, struct rnbd_clt_dev, kobj);
376
377 ret = kstrtoul(buf, 0, §ors);
378 if (ret)
379 return ret;
380
381 ret = rnbd_clt_resize_disk(dev, sectors);
382 if (ret)
383 return ret;
384
385 return count;
386 }
387
388 static struct kobj_attribute rnbd_clt_resize_dev_attr =
389 __ATTR(resize, 0644, rnbd_clt_resize_dev_show,
390 rnbd_clt_resize_dev_store);
391
rnbd_clt_remap_dev_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)392 static ssize_t rnbd_clt_remap_dev_show(struct kobject *kobj,
393 struct kobj_attribute *attr, char *page)
394 {
395 return sysfs_emit(page, "Usage: echo <1> > %s\n", attr->attr.name);
396 }
397
rnbd_clt_remap_dev_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)398 static ssize_t rnbd_clt_remap_dev_store(struct kobject *kobj,
399 struct kobj_attribute *attr,
400 const char *buf, size_t count)
401 {
402 struct rnbd_clt_dev *dev;
403 char *opt, *options;
404 int err;
405
406 opt = kstrdup(buf, GFP_KERNEL);
407 if (!opt)
408 return -ENOMEM;
409
410 options = strstrip(opt);
411 dev = container_of(kobj, struct rnbd_clt_dev, kobj);
412 if (!sysfs_streq(options, "1")) {
413 rnbd_clt_err(dev,
414 "remap_device: Invalid value: %s\n",
415 options);
416 err = -EINVAL;
417 goto out;
418 }
419 err = rnbd_clt_remap_device(dev);
420 if (likely(!err))
421 err = count;
422
423 out:
424 kfree(opt);
425
426 return err;
427 }
428
429 static struct kobj_attribute rnbd_clt_remap_device_attr =
430 __ATTR(remap_device, 0644, rnbd_clt_remap_dev_show,
431 rnbd_clt_remap_dev_store);
432
session_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)433 static ssize_t session_show(struct kobject *kobj, struct kobj_attribute *attr,
434 char *page)
435 {
436 struct rnbd_clt_dev *dev;
437
438 dev = container_of(kobj, struct rnbd_clt_dev, kobj);
439
440 return sysfs_emit(page, "%s\n", dev->sess->sessname);
441 }
442
443 static struct kobj_attribute rnbd_clt_session_attr =
444 __ATTR_RO(session);
445
446 static struct attribute *rnbd_dev_attrs[] = {
447 &rnbd_clt_unmap_device_attr.attr,
448 &rnbd_clt_resize_dev_attr.attr,
449 &rnbd_clt_remap_device_attr.attr,
450 &rnbd_clt_mapping_path_attr.attr,
451 &rnbd_clt_state_attr.attr,
452 &rnbd_clt_session_attr.attr,
453 &rnbd_clt_access_mode.attr,
454 &rnbd_clt_nr_poll_queues.attr,
455 NULL,
456 };
457 ATTRIBUTE_GROUPS(rnbd_dev);
458
rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev * dev)459 void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
460 {
461 /*
462 * The module unload rnbd_client_exit path is racing with unmapping of
463 * the last single device from the sysfs manually
464 * i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because
465 * of sysfs link already was removed already.
466 */
467 if (dev->blk_symlink_name) {
468 if (try_module_get(THIS_MODULE)) {
469 sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
470 module_put(THIS_MODULE);
471 }
472 /* It should be freed always. */
473 kfree(dev->blk_symlink_name);
474 dev->blk_symlink_name = NULL;
475 }
476 }
477
478 static const struct kobj_type rnbd_dev_ktype = {
479 .sysfs_ops = &kobj_sysfs_ops,
480 .default_groups = rnbd_dev_groups,
481 };
482
rnbd_clt_add_dev_kobj(struct rnbd_clt_dev * dev)483 static int rnbd_clt_add_dev_kobj(struct rnbd_clt_dev *dev)
484 {
485 int ret;
486 struct kobject *gd_kobj = &disk_to_dev(dev->gd)->kobj;
487
488 ret = kobject_init_and_add(&dev->kobj, &rnbd_dev_ktype, gd_kobj, "%s",
489 "rnbd");
490 if (ret) {
491 rnbd_clt_err(dev, "Failed to create device sysfs dir, err: %d\n",
492 ret);
493 kobject_put(&dev->kobj);
494 }
495 kobject_uevent(gd_kobj, KOBJ_ONLINE);
496
497 return ret;
498 }
499
rnbd_clt_map_device_show(struct kobject * kobj,struct kobj_attribute * attr,char * page)500 static ssize_t rnbd_clt_map_device_show(struct kobject *kobj,
501 struct kobj_attribute *attr,
502 char *page)
503 {
504 return sysfs_emit(page,
505 "Usage: echo \"[dest_port=server port number] sessname=<name of the rtrs session> path=<[srcaddr@]dstaddr> [path=<[srcaddr@]dstaddr>] device_path=<full path on remote side> [access_mode=<ro|rw|migration>] [nr_poll_queues=<number of queues>]\" > %s\n\naddr ::= [ ip:<ipv4> | ip:<ipv6> | gid:<gid> ]\n",
506 attr->attr.name);
507 }
508
rnbd_clt_get_path_name(struct rnbd_clt_dev * dev,char * buf,size_t len)509 static int rnbd_clt_get_path_name(struct rnbd_clt_dev *dev, char *buf,
510 size_t len)
511 {
512 int ret;
513 char pathname[NAME_MAX], *s;
514
515 strscpy(pathname, dev->pathname, sizeof(pathname));
516 while ((s = strchr(pathname, '/')))
517 s[0] = '!';
518
519 ret = snprintf(buf, len, "%s@%s", pathname, dev->sess->sessname);
520 if (ret >= len)
521 return -ENAMETOOLONG;
522
523 return 0;
524 }
525
rnbd_clt_add_dev_symlink(struct rnbd_clt_dev * dev)526 static int rnbd_clt_add_dev_symlink(struct rnbd_clt_dev *dev)
527 {
528 struct kobject *gd_kobj = &disk_to_dev(dev->gd)->kobj;
529 int ret, len;
530
531 len = strlen(dev->pathname) + strlen(dev->sess->sessname) + 2;
532 dev->blk_symlink_name = kzalloc(len, GFP_KERNEL);
533 if (!dev->blk_symlink_name) {
534 rnbd_clt_err(dev, "Failed to allocate memory for blk_symlink_name\n");
535 return -ENOMEM;
536 }
537
538 ret = rnbd_clt_get_path_name(dev, dev->blk_symlink_name,
539 len);
540 if (ret) {
541 rnbd_clt_err(dev, "Failed to get /sys/block symlink path, err: %d\n",
542 ret);
543 goto out_err;
544 }
545
546 ret = sysfs_create_link(rnbd_devs_kobj, gd_kobj,
547 dev->blk_symlink_name);
548 if (ret) {
549 rnbd_clt_err(dev, "Creating /sys/block symlink failed, err: %d\n",
550 ret);
551 goto out_err;
552 }
553
554 return 0;
555
556 out_err:
557 kfree(dev->blk_symlink_name);
558 dev->blk_symlink_name = NULL ;
559 return ret;
560 }
561
rnbd_clt_map_device_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)562 static ssize_t rnbd_clt_map_device_store(struct kobject *kobj,
563 struct kobj_attribute *attr,
564 const char *buf, size_t count)
565 {
566 struct rnbd_clt_dev *dev;
567 struct rnbd_map_options opt;
568 int ret;
569 char pathname[NAME_MAX];
570 char sessname[NAME_MAX];
571 enum rnbd_access_mode access_mode = RNBD_ACCESS_RW;
572 u16 port_nr = RTRS_PORT;
573 u32 nr_poll_queues = 0;
574
575 struct sockaddr_storage *addrs;
576 struct rtrs_addr paths[6];
577 size_t path_cnt;
578
579 opt.sessname = sessname;
580 opt.paths = paths;
581 opt.path_cnt = &path_cnt;
582 opt.pathname = pathname;
583 opt.dest_port = &port_nr;
584 opt.access_mode = &access_mode;
585 opt.nr_poll_queues = &nr_poll_queues;
586 addrs = kcalloc(ARRAY_SIZE(paths) * 2, sizeof(*addrs), GFP_KERNEL);
587 if (!addrs)
588 return -ENOMEM;
589
590 for (path_cnt = 0; path_cnt < ARRAY_SIZE(paths); path_cnt++) {
591 paths[path_cnt].src = &addrs[path_cnt * 2];
592 paths[path_cnt].dst = &addrs[path_cnt * 2 + 1];
593 }
594
595 ret = rnbd_clt_parse_map_options(buf, ARRAY_SIZE(paths), &opt);
596 if (ret)
597 goto out;
598
599 pr_info("Mapping device %s on session %s, (access_mode: %s, nr_poll_queues: %d)\n",
600 pathname, sessname,
601 rnbd_access_modes[access_mode].str,
602 nr_poll_queues);
603
604 dev = rnbd_clt_map_device(sessname, paths, path_cnt, port_nr, pathname,
605 access_mode, nr_poll_queues);
606 if (IS_ERR(dev)) {
607 ret = PTR_ERR(dev);
608 goto out;
609 }
610
611 ret = rnbd_clt_add_dev_kobj(dev);
612 if (ret)
613 goto unmap_dev;
614
615 ret = rnbd_clt_add_dev_symlink(dev);
616 if (ret)
617 goto unmap_dev;
618
619 kfree(addrs);
620 return count;
621
622 unmap_dev:
623 rnbd_clt_unmap_device(dev, true, NULL);
624 out:
625 kfree(addrs);
626 return ret;
627 }
628
629 static struct kobj_attribute rnbd_clt_map_device_attr =
630 __ATTR(map_device, 0644,
631 rnbd_clt_map_device_show, rnbd_clt_map_device_store);
632
633 static struct attribute *default_attrs[] = {
634 &rnbd_clt_map_device_attr.attr,
635 NULL,
636 };
637
638 static struct attribute_group default_attr_group = {
639 .attrs = default_attrs,
640 };
641
642 static const struct attribute_group *default_attr_groups[] = {
643 &default_attr_group,
644 NULL,
645 };
646
rnbd_clt_create_sysfs_files(void)647 int rnbd_clt_create_sysfs_files(void)
648 {
649 int err;
650
651 err = class_register(&rnbd_dev_class);
652 if (err)
653 return err;
654
655 rnbd_dev = device_create_with_groups(&rnbd_dev_class, NULL,
656 MKDEV(0, 0), NULL,
657 default_attr_groups, "ctl");
658 if (IS_ERR(rnbd_dev)) {
659 err = PTR_ERR(rnbd_dev);
660 goto cls_destroy;
661 }
662 rnbd_devs_kobj = kobject_create_and_add("devices", &rnbd_dev->kobj);
663 if (!rnbd_devs_kobj) {
664 err = -ENOMEM;
665 goto dev_destroy;
666 }
667
668 return 0;
669
670 dev_destroy:
671 device_destroy(&rnbd_dev_class, MKDEV(0, 0));
672 cls_destroy:
673 class_unregister(&rnbd_dev_class);
674
675 return err;
676 }
677
rnbd_clt_destroy_sysfs_files(void)678 void rnbd_clt_destroy_sysfs_files(void)
679 {
680 sysfs_remove_group(&rnbd_dev->kobj, &default_attr_group);
681 kobject_del(rnbd_devs_kobj);
682 kobject_put(rnbd_devs_kobj);
683 device_destroy(&rnbd_dev_class, MKDEV(0, 0));
684 class_unregister(&rnbd_dev_class);
685 }
686