xref: /linux/drivers/dma/idxd/sysfs.c (revision 3621d3e57d9ec4677085bd11ebbf99bb405685b2)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
11 #include "idxd.h"
12 
13 static char *idxd_wq_type_names[] = {
14 	[IDXD_WQT_NONE]		= "none",
15 	[IDXD_WQT_KERNEL]	= "kernel",
16 	[IDXD_WQT_USER]		= "user",
17 };
18 
19 static void idxd_conf_device_release(struct device *dev)
20 {
21 	dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
22 }
23 
24 static struct device_type idxd_group_device_type = {
25 	.name = "group",
26 	.release = idxd_conf_device_release,
27 };
28 
29 static struct device_type idxd_wq_device_type = {
30 	.name = "wq",
31 	.release = idxd_conf_device_release,
32 };
33 
34 static struct device_type idxd_engine_device_type = {
35 	.name = "engine",
36 	.release = idxd_conf_device_release,
37 };
38 
39 static struct device_type dsa_device_type = {
40 	.name = "dsa",
41 	.release = idxd_conf_device_release,
42 };
43 
44 static inline bool is_dsa_dev(struct device *dev)
45 {
46 	return dev ? dev->type == &dsa_device_type : false;
47 }
48 
49 static inline bool is_idxd_dev(struct device *dev)
50 {
51 	return is_dsa_dev(dev);
52 }
53 
54 static inline bool is_idxd_wq_dev(struct device *dev)
55 {
56 	return dev ? dev->type == &idxd_wq_device_type : false;
57 }
58 
59 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
60 {
61 	if (wq->type == IDXD_WQT_KERNEL &&
62 	    strcmp(wq->name, "dmaengine") == 0)
63 		return true;
64 	return false;
65 }
66 
67 static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
68 {
69 	return wq->type == IDXD_WQT_USER;
70 }
71 
72 static int idxd_config_bus_match(struct device *dev,
73 				 struct device_driver *drv)
74 {
75 	int matched = 0;
76 
77 	if (is_idxd_dev(dev)) {
78 		struct idxd_device *idxd = confdev_to_idxd(dev);
79 
80 		if (idxd->state != IDXD_DEV_CONF_READY)
81 			return 0;
82 		matched = 1;
83 	} else if (is_idxd_wq_dev(dev)) {
84 		struct idxd_wq *wq = confdev_to_wq(dev);
85 		struct idxd_device *idxd = wq->idxd;
86 
87 		if (idxd->state < IDXD_DEV_CONF_READY)
88 			return 0;
89 
90 		if (wq->state != IDXD_WQ_DISABLED) {
91 			dev_dbg(dev, "%s not disabled\n", dev_name(dev));
92 			return 0;
93 		}
94 		matched = 1;
95 	}
96 
97 	if (matched)
98 		dev_dbg(dev, "%s matched\n", dev_name(dev));
99 
100 	return matched;
101 }
102 
103 static int idxd_config_bus_probe(struct device *dev)
104 {
105 	int rc;
106 	unsigned long flags;
107 
108 	dev_dbg(dev, "%s called\n", __func__);
109 
110 	if (is_idxd_dev(dev)) {
111 		struct idxd_device *idxd = confdev_to_idxd(dev);
112 
113 		if (idxd->state != IDXD_DEV_CONF_READY) {
114 			dev_warn(dev, "Device not ready for config\n");
115 			return -EBUSY;
116 		}
117 
118 		if (!try_module_get(THIS_MODULE))
119 			return -ENXIO;
120 
121 		/* Perform IDXD configuration and enabling */
122 		spin_lock_irqsave(&idxd->dev_lock, flags);
123 		rc = idxd_device_config(idxd);
124 		spin_unlock_irqrestore(&idxd->dev_lock, flags);
125 		if (rc < 0) {
126 			module_put(THIS_MODULE);
127 			dev_warn(dev, "Device config failed: %d\n", rc);
128 			return rc;
129 		}
130 
131 		/* start device */
132 		rc = idxd_device_enable(idxd);
133 		if (rc < 0) {
134 			module_put(THIS_MODULE);
135 			dev_warn(dev, "Device enable failed: %d\n", rc);
136 			return rc;
137 		}
138 
139 		dev_info(dev, "Device %s enabled\n", dev_name(dev));
140 
141 		rc = idxd_register_dma_device(idxd);
142 		if (rc < 0) {
143 			module_put(THIS_MODULE);
144 			dev_dbg(dev, "Failed to register dmaengine device\n");
145 			return rc;
146 		}
147 		return 0;
148 	} else if (is_idxd_wq_dev(dev)) {
149 		struct idxd_wq *wq = confdev_to_wq(dev);
150 		struct idxd_device *idxd = wq->idxd;
151 
152 		mutex_lock(&wq->wq_lock);
153 
154 		if (idxd->state != IDXD_DEV_ENABLED) {
155 			mutex_unlock(&wq->wq_lock);
156 			dev_warn(dev, "Enabling while device not enabled.\n");
157 			return -EPERM;
158 		}
159 
160 		if (wq->state != IDXD_WQ_DISABLED) {
161 			mutex_unlock(&wq->wq_lock);
162 			dev_warn(dev, "WQ %d already enabled.\n", wq->id);
163 			return -EBUSY;
164 		}
165 
166 		if (!wq->group) {
167 			mutex_unlock(&wq->wq_lock);
168 			dev_warn(dev, "WQ not attached to group.\n");
169 			return -EINVAL;
170 		}
171 
172 		if (strlen(wq->name) == 0) {
173 			mutex_unlock(&wq->wq_lock);
174 			dev_warn(dev, "WQ name not set.\n");
175 			return -EINVAL;
176 		}
177 
178 		rc = idxd_wq_alloc_resources(wq);
179 		if (rc < 0) {
180 			mutex_unlock(&wq->wq_lock);
181 			dev_warn(dev, "WQ resource alloc failed\n");
182 			return rc;
183 		}
184 
185 		spin_lock_irqsave(&idxd->dev_lock, flags);
186 		rc = idxd_device_config(idxd);
187 		spin_unlock_irqrestore(&idxd->dev_lock, flags);
188 		if (rc < 0) {
189 			mutex_unlock(&wq->wq_lock);
190 			dev_warn(dev, "Writing WQ %d config failed: %d\n",
191 				 wq->id, rc);
192 			return rc;
193 		}
194 
195 		rc = idxd_wq_enable(wq);
196 		if (rc < 0) {
197 			mutex_unlock(&wq->wq_lock);
198 			dev_warn(dev, "WQ %d enabling failed: %d\n",
199 				 wq->id, rc);
200 			return rc;
201 		}
202 
203 		rc = idxd_wq_map_portal(wq);
204 		if (rc < 0) {
205 			dev_warn(dev, "wq portal mapping failed: %d\n", rc);
206 			rc = idxd_wq_disable(wq);
207 			if (rc < 0)
208 				dev_warn(dev, "IDXD wq disable failed\n");
209 			mutex_unlock(&wq->wq_lock);
210 			return rc;
211 		}
212 
213 		wq->client_count = 0;
214 
215 		dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
216 
217 		if (is_idxd_wq_dmaengine(wq)) {
218 			rc = idxd_register_dma_channel(wq);
219 			if (rc < 0) {
220 				dev_dbg(dev, "DMA channel register failed\n");
221 				mutex_unlock(&wq->wq_lock);
222 				return rc;
223 			}
224 		} else if (is_idxd_wq_cdev(wq)) {
225 			rc = idxd_wq_add_cdev(wq);
226 			if (rc < 0) {
227 				dev_dbg(dev, "Cdev creation failed\n");
228 				mutex_unlock(&wq->wq_lock);
229 				return rc;
230 			}
231 		}
232 
233 		mutex_unlock(&wq->wq_lock);
234 		return 0;
235 	}
236 
237 	return -ENODEV;
238 }
239 
240 static void disable_wq(struct idxd_wq *wq)
241 {
242 	struct idxd_device *idxd = wq->idxd;
243 	struct device *dev = &idxd->pdev->dev;
244 	int rc;
245 
246 	mutex_lock(&wq->wq_lock);
247 	dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
248 	if (wq->state == IDXD_WQ_DISABLED) {
249 		mutex_unlock(&wq->wq_lock);
250 		return;
251 	}
252 
253 	if (is_idxd_wq_dmaengine(wq))
254 		idxd_unregister_dma_channel(wq);
255 	else if (is_idxd_wq_cdev(wq))
256 		idxd_wq_del_cdev(wq);
257 
258 	if (idxd_wq_refcount(wq))
259 		dev_warn(dev, "Clients has claim on wq %d: %d\n",
260 			 wq->id, idxd_wq_refcount(wq));
261 
262 	idxd_wq_unmap_portal(wq);
263 
264 	idxd_wq_drain(wq);
265 	rc = idxd_wq_disable(wq);
266 
267 	idxd_wq_free_resources(wq);
268 	wq->client_count = 0;
269 	mutex_unlock(&wq->wq_lock);
270 
271 	if (rc < 0)
272 		dev_warn(dev, "Failed to disable %s: %d\n",
273 			 dev_name(&wq->conf_dev), rc);
274 	else
275 		dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
276 }
277 
278 static int idxd_config_bus_remove(struct device *dev)
279 {
280 	int rc;
281 
282 	dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
283 
284 	/* disable workqueue here */
285 	if (is_idxd_wq_dev(dev)) {
286 		struct idxd_wq *wq = confdev_to_wq(dev);
287 
288 		disable_wq(wq);
289 	} else if (is_idxd_dev(dev)) {
290 		struct idxd_device *idxd = confdev_to_idxd(dev);
291 		int i;
292 
293 		dev_dbg(dev, "%s removing dev %s\n", __func__,
294 			dev_name(&idxd->conf_dev));
295 		for (i = 0; i < idxd->max_wqs; i++) {
296 			struct idxd_wq *wq = &idxd->wqs[i];
297 
298 			if (wq->state == IDXD_WQ_DISABLED)
299 				continue;
300 			dev_warn(dev, "Active wq %d on disable %s.\n", i,
301 				 dev_name(&idxd->conf_dev));
302 			device_release_driver(&wq->conf_dev);
303 		}
304 
305 		idxd_unregister_dma_device(idxd);
306 		rc = idxd_device_disable(idxd);
307 		module_put(THIS_MODULE);
308 		if (rc < 0)
309 			dev_warn(dev, "Device disable failed\n");
310 		else
311 			dev_info(dev, "Device %s disabled\n", dev_name(dev));
312 
313 	}
314 
315 	return 0;
316 }
317 
318 static void idxd_config_bus_shutdown(struct device *dev)
319 {
320 	dev_dbg(dev, "%s called\n", __func__);
321 }
322 
323 struct bus_type dsa_bus_type = {
324 	.name = "dsa",
325 	.match = idxd_config_bus_match,
326 	.probe = idxd_config_bus_probe,
327 	.remove = idxd_config_bus_remove,
328 	.shutdown = idxd_config_bus_shutdown,
329 };
330 
331 static struct bus_type *idxd_bus_types[] = {
332 	&dsa_bus_type
333 };
334 
335 static struct idxd_device_driver dsa_drv = {
336 	.drv = {
337 		.name = "dsa",
338 		.bus = &dsa_bus_type,
339 		.owner = THIS_MODULE,
340 		.mod_name = KBUILD_MODNAME,
341 	},
342 };
343 
344 static struct idxd_device_driver *idxd_drvs[] = {
345 	&dsa_drv
346 };
347 
348 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
349 {
350 	return idxd_bus_types[idxd->type];
351 }
352 
353 static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
354 {
355 	if (idxd->type == IDXD_TYPE_DSA)
356 		return &dsa_device_type;
357 	else
358 		return NULL;
359 }
360 
361 /* IDXD generic driver setup */
362 int idxd_register_driver(void)
363 {
364 	int i, rc;
365 
366 	for (i = 0; i < IDXD_TYPE_MAX; i++) {
367 		rc = driver_register(&idxd_drvs[i]->drv);
368 		if (rc < 0)
369 			goto drv_fail;
370 	}
371 
372 	return 0;
373 
374 drv_fail:
375 	for (; i > 0; i--)
376 		driver_unregister(&idxd_drvs[i]->drv);
377 	return rc;
378 }
379 
380 void idxd_unregister_driver(void)
381 {
382 	int i;
383 
384 	for (i = 0; i < IDXD_TYPE_MAX; i++)
385 		driver_unregister(&idxd_drvs[i]->drv);
386 }
387 
388 /* IDXD engine attributes */
389 static ssize_t engine_group_id_show(struct device *dev,
390 				    struct device_attribute *attr, char *buf)
391 {
392 	struct idxd_engine *engine =
393 		container_of(dev, struct idxd_engine, conf_dev);
394 
395 	if (engine->group)
396 		return sprintf(buf, "%d\n", engine->group->id);
397 	else
398 		return sprintf(buf, "%d\n", -1);
399 }
400 
401 static ssize_t engine_group_id_store(struct device *dev,
402 				     struct device_attribute *attr,
403 				     const char *buf, size_t count)
404 {
405 	struct idxd_engine *engine =
406 		container_of(dev, struct idxd_engine, conf_dev);
407 	struct idxd_device *idxd = engine->idxd;
408 	long id;
409 	int rc;
410 	struct idxd_group *prevg;
411 
412 	rc = kstrtol(buf, 10, &id);
413 	if (rc < 0)
414 		return -EINVAL;
415 
416 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
417 		return -EPERM;
418 
419 	if (id > idxd->max_groups - 1 || id < -1)
420 		return -EINVAL;
421 
422 	if (id == -1) {
423 		if (engine->group) {
424 			engine->group->num_engines--;
425 			engine->group = NULL;
426 		}
427 		return count;
428 	}
429 
430 	prevg = engine->group;
431 
432 	if (prevg)
433 		prevg->num_engines--;
434 	engine->group = &idxd->groups[id];
435 	engine->group->num_engines++;
436 
437 	return count;
438 }
439 
440 static struct device_attribute dev_attr_engine_group =
441 		__ATTR(group_id, 0644, engine_group_id_show,
442 		       engine_group_id_store);
443 
444 static struct attribute *idxd_engine_attributes[] = {
445 	&dev_attr_engine_group.attr,
446 	NULL,
447 };
448 
449 static const struct attribute_group idxd_engine_attribute_group = {
450 	.attrs = idxd_engine_attributes,
451 };
452 
453 static const struct attribute_group *idxd_engine_attribute_groups[] = {
454 	&idxd_engine_attribute_group,
455 	NULL,
456 };
457 
458 /* Group attributes */
459 
460 static void idxd_set_free_tokens(struct idxd_device *idxd)
461 {
462 	int i, tokens;
463 
464 	for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
465 		struct idxd_group *g = &idxd->groups[i];
466 
467 		tokens += g->tokens_reserved;
468 	}
469 
470 	idxd->nr_tokens = idxd->max_tokens - tokens;
471 }
472 
473 static ssize_t group_tokens_reserved_show(struct device *dev,
474 					  struct device_attribute *attr,
475 					  char *buf)
476 {
477 	struct idxd_group *group =
478 		container_of(dev, struct idxd_group, conf_dev);
479 
480 	return sprintf(buf, "%u\n", group->tokens_reserved);
481 }
482 
483 static ssize_t group_tokens_reserved_store(struct device *dev,
484 					   struct device_attribute *attr,
485 					   const char *buf, size_t count)
486 {
487 	struct idxd_group *group =
488 		container_of(dev, struct idxd_group, conf_dev);
489 	struct idxd_device *idxd = group->idxd;
490 	unsigned long val;
491 	int rc;
492 
493 	rc = kstrtoul(buf, 10, &val);
494 	if (rc < 0)
495 		return -EINVAL;
496 
497 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
498 		return -EPERM;
499 
500 	if (idxd->state == IDXD_DEV_ENABLED)
501 		return -EPERM;
502 
503 	if (val > idxd->max_tokens)
504 		return -EINVAL;
505 
506 	if (val > idxd->nr_tokens + group->tokens_reserved)
507 		return -EINVAL;
508 
509 	group->tokens_reserved = val;
510 	idxd_set_free_tokens(idxd);
511 	return count;
512 }
513 
514 static struct device_attribute dev_attr_group_tokens_reserved =
515 		__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
516 		       group_tokens_reserved_store);
517 
518 static ssize_t group_tokens_allowed_show(struct device *dev,
519 					 struct device_attribute *attr,
520 					 char *buf)
521 {
522 	struct idxd_group *group =
523 		container_of(dev, struct idxd_group, conf_dev);
524 
525 	return sprintf(buf, "%u\n", group->tokens_allowed);
526 }
527 
528 static ssize_t group_tokens_allowed_store(struct device *dev,
529 					  struct device_attribute *attr,
530 					  const char *buf, size_t count)
531 {
532 	struct idxd_group *group =
533 		container_of(dev, struct idxd_group, conf_dev);
534 	struct idxd_device *idxd = group->idxd;
535 	unsigned long val;
536 	int rc;
537 
538 	rc = kstrtoul(buf, 10, &val);
539 	if (rc < 0)
540 		return -EINVAL;
541 
542 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
543 		return -EPERM;
544 
545 	if (idxd->state == IDXD_DEV_ENABLED)
546 		return -EPERM;
547 
548 	if (val < 4 * group->num_engines ||
549 	    val > group->tokens_reserved + idxd->nr_tokens)
550 		return -EINVAL;
551 
552 	group->tokens_allowed = val;
553 	return count;
554 }
555 
556 static struct device_attribute dev_attr_group_tokens_allowed =
557 		__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
558 		       group_tokens_allowed_store);
559 
560 static ssize_t group_use_token_limit_show(struct device *dev,
561 					  struct device_attribute *attr,
562 					  char *buf)
563 {
564 	struct idxd_group *group =
565 		container_of(dev, struct idxd_group, conf_dev);
566 
567 	return sprintf(buf, "%u\n", group->use_token_limit);
568 }
569 
570 static ssize_t group_use_token_limit_store(struct device *dev,
571 					   struct device_attribute *attr,
572 					   const char *buf, size_t count)
573 {
574 	struct idxd_group *group =
575 		container_of(dev, struct idxd_group, conf_dev);
576 	struct idxd_device *idxd = group->idxd;
577 	unsigned long val;
578 	int rc;
579 
580 	rc = kstrtoul(buf, 10, &val);
581 	if (rc < 0)
582 		return -EINVAL;
583 
584 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
585 		return -EPERM;
586 
587 	if (idxd->state == IDXD_DEV_ENABLED)
588 		return -EPERM;
589 
590 	if (idxd->token_limit == 0)
591 		return -EPERM;
592 
593 	group->use_token_limit = !!val;
594 	return count;
595 }
596 
597 static struct device_attribute dev_attr_group_use_token_limit =
598 		__ATTR(use_token_limit, 0644, group_use_token_limit_show,
599 		       group_use_token_limit_store);
600 
601 static ssize_t group_engines_show(struct device *dev,
602 				  struct device_attribute *attr, char *buf)
603 {
604 	struct idxd_group *group =
605 		container_of(dev, struct idxd_group, conf_dev);
606 	int i, rc = 0;
607 	char *tmp = buf;
608 	struct idxd_device *idxd = group->idxd;
609 
610 	for (i = 0; i < idxd->max_engines; i++) {
611 		struct idxd_engine *engine = &idxd->engines[i];
612 
613 		if (!engine->group)
614 			continue;
615 
616 		if (engine->group->id == group->id)
617 			rc += sprintf(tmp + rc, "engine%d.%d ",
618 					idxd->id, engine->id);
619 	}
620 
621 	rc--;
622 	rc += sprintf(tmp + rc, "\n");
623 
624 	return rc;
625 }
626 
627 static struct device_attribute dev_attr_group_engines =
628 		__ATTR(engines, 0444, group_engines_show, NULL);
629 
630 static ssize_t group_work_queues_show(struct device *dev,
631 				      struct device_attribute *attr, char *buf)
632 {
633 	struct idxd_group *group =
634 		container_of(dev, struct idxd_group, conf_dev);
635 	int i, rc = 0;
636 	char *tmp = buf;
637 	struct idxd_device *idxd = group->idxd;
638 
639 	for (i = 0; i < idxd->max_wqs; i++) {
640 		struct idxd_wq *wq = &idxd->wqs[i];
641 
642 		if (!wq->group)
643 			continue;
644 
645 		if (wq->group->id == group->id)
646 			rc += sprintf(tmp + rc, "wq%d.%d ",
647 					idxd->id, wq->id);
648 	}
649 
650 	rc--;
651 	rc += sprintf(tmp + rc, "\n");
652 
653 	return rc;
654 }
655 
656 static struct device_attribute dev_attr_group_work_queues =
657 		__ATTR(work_queues, 0444, group_work_queues_show, NULL);
658 
659 static ssize_t group_traffic_class_a_show(struct device *dev,
660 					  struct device_attribute *attr,
661 					  char *buf)
662 {
663 	struct idxd_group *group =
664 		container_of(dev, struct idxd_group, conf_dev);
665 
666 	return sprintf(buf, "%d\n", group->tc_a);
667 }
668 
669 static ssize_t group_traffic_class_a_store(struct device *dev,
670 					   struct device_attribute *attr,
671 					   const char *buf, size_t count)
672 {
673 	struct idxd_group *group =
674 		container_of(dev, struct idxd_group, conf_dev);
675 	struct idxd_device *idxd = group->idxd;
676 	long val;
677 	int rc;
678 
679 	rc = kstrtol(buf, 10, &val);
680 	if (rc < 0)
681 		return -EINVAL;
682 
683 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
684 		return -EPERM;
685 
686 	if (idxd->state == IDXD_DEV_ENABLED)
687 		return -EPERM;
688 
689 	if (val < 0 || val > 7)
690 		return -EINVAL;
691 
692 	group->tc_a = val;
693 	return count;
694 }
695 
696 static struct device_attribute dev_attr_group_traffic_class_a =
697 		__ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
698 		       group_traffic_class_a_store);
699 
700 static ssize_t group_traffic_class_b_show(struct device *dev,
701 					  struct device_attribute *attr,
702 					  char *buf)
703 {
704 	struct idxd_group *group =
705 		container_of(dev, struct idxd_group, conf_dev);
706 
707 	return sprintf(buf, "%d\n", group->tc_b);
708 }
709 
710 static ssize_t group_traffic_class_b_store(struct device *dev,
711 					   struct device_attribute *attr,
712 					   const char *buf, size_t count)
713 {
714 	struct idxd_group *group =
715 		container_of(dev, struct idxd_group, conf_dev);
716 	struct idxd_device *idxd = group->idxd;
717 	long val;
718 	int rc;
719 
720 	rc = kstrtol(buf, 10, &val);
721 	if (rc < 0)
722 		return -EINVAL;
723 
724 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
725 		return -EPERM;
726 
727 	if (idxd->state == IDXD_DEV_ENABLED)
728 		return -EPERM;
729 
730 	if (val < 0 || val > 7)
731 		return -EINVAL;
732 
733 	group->tc_b = val;
734 	return count;
735 }
736 
737 static struct device_attribute dev_attr_group_traffic_class_b =
738 		__ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
739 		       group_traffic_class_b_store);
740 
741 static struct attribute *idxd_group_attributes[] = {
742 	&dev_attr_group_work_queues.attr,
743 	&dev_attr_group_engines.attr,
744 	&dev_attr_group_use_token_limit.attr,
745 	&dev_attr_group_tokens_allowed.attr,
746 	&dev_attr_group_tokens_reserved.attr,
747 	&dev_attr_group_traffic_class_a.attr,
748 	&dev_attr_group_traffic_class_b.attr,
749 	NULL,
750 };
751 
752 static const struct attribute_group idxd_group_attribute_group = {
753 	.attrs = idxd_group_attributes,
754 };
755 
756 static const struct attribute_group *idxd_group_attribute_groups[] = {
757 	&idxd_group_attribute_group,
758 	NULL,
759 };
760 
761 /* IDXD work queue attribs */
762 static ssize_t wq_clients_show(struct device *dev,
763 			       struct device_attribute *attr, char *buf)
764 {
765 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
766 
767 	return sprintf(buf, "%d\n", wq->client_count);
768 }
769 
770 static struct device_attribute dev_attr_wq_clients =
771 		__ATTR(clients, 0444, wq_clients_show, NULL);
772 
773 static ssize_t wq_state_show(struct device *dev,
774 			     struct device_attribute *attr, char *buf)
775 {
776 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
777 
778 	switch (wq->state) {
779 	case IDXD_WQ_DISABLED:
780 		return sprintf(buf, "disabled\n");
781 	case IDXD_WQ_ENABLED:
782 		return sprintf(buf, "enabled\n");
783 	}
784 
785 	return sprintf(buf, "unknown\n");
786 }
787 
788 static struct device_attribute dev_attr_wq_state =
789 		__ATTR(state, 0444, wq_state_show, NULL);
790 
791 static ssize_t wq_group_id_show(struct device *dev,
792 				struct device_attribute *attr, char *buf)
793 {
794 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
795 
796 	if (wq->group)
797 		return sprintf(buf, "%u\n", wq->group->id);
798 	else
799 		return sprintf(buf, "-1\n");
800 }
801 
802 static ssize_t wq_group_id_store(struct device *dev,
803 				 struct device_attribute *attr,
804 				 const char *buf, size_t count)
805 {
806 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
807 	struct idxd_device *idxd = wq->idxd;
808 	long id;
809 	int rc;
810 	struct idxd_group *prevg, *group;
811 
812 	rc = kstrtol(buf, 10, &id);
813 	if (rc < 0)
814 		return -EINVAL;
815 
816 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
817 		return -EPERM;
818 
819 	if (wq->state != IDXD_WQ_DISABLED)
820 		return -EPERM;
821 
822 	if (id > idxd->max_groups - 1 || id < -1)
823 		return -EINVAL;
824 
825 	if (id == -1) {
826 		if (wq->group) {
827 			wq->group->num_wqs--;
828 			wq->group = NULL;
829 		}
830 		return count;
831 	}
832 
833 	group = &idxd->groups[id];
834 	prevg = wq->group;
835 
836 	if (prevg)
837 		prevg->num_wqs--;
838 	wq->group = group;
839 	group->num_wqs++;
840 	return count;
841 }
842 
843 static struct device_attribute dev_attr_wq_group_id =
844 		__ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
845 
846 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
847 			    char *buf)
848 {
849 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
850 
851 	return sprintf(buf, "%s\n",
852 			wq_dedicated(wq) ? "dedicated" : "shared");
853 }
854 
855 static ssize_t wq_mode_store(struct device *dev,
856 			     struct device_attribute *attr, const char *buf,
857 			     size_t count)
858 {
859 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
860 	struct idxd_device *idxd = wq->idxd;
861 
862 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
863 		return -EPERM;
864 
865 	if (wq->state != IDXD_WQ_DISABLED)
866 		return -EPERM;
867 
868 	if (sysfs_streq(buf, "dedicated")) {
869 		set_bit(WQ_FLAG_DEDICATED, &wq->flags);
870 		wq->threshold = 0;
871 	} else {
872 		return -EINVAL;
873 	}
874 
875 	return count;
876 }
877 
878 static struct device_attribute dev_attr_wq_mode =
879 		__ATTR(mode, 0644, wq_mode_show, wq_mode_store);
880 
881 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
882 			    char *buf)
883 {
884 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
885 
886 	return sprintf(buf, "%u\n", wq->size);
887 }
888 
889 static int total_claimed_wq_size(struct idxd_device *idxd)
890 {
891 	int i;
892 	int wq_size = 0;
893 
894 	for (i = 0; i < idxd->max_wqs; i++) {
895 		struct idxd_wq *wq = &idxd->wqs[i];
896 
897 		wq_size += wq->size;
898 	}
899 
900 	return wq_size;
901 }
902 
903 static ssize_t wq_size_store(struct device *dev,
904 			     struct device_attribute *attr, const char *buf,
905 			     size_t count)
906 {
907 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
908 	unsigned long size;
909 	struct idxd_device *idxd = wq->idxd;
910 	int rc;
911 
912 	rc = kstrtoul(buf, 10, &size);
913 	if (rc < 0)
914 		return -EINVAL;
915 
916 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
917 		return -EPERM;
918 
919 	if (wq->state != IDXD_WQ_DISABLED)
920 		return -EPERM;
921 
922 	if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
923 		return -EINVAL;
924 
925 	wq->size = size;
926 	return count;
927 }
928 
929 static struct device_attribute dev_attr_wq_size =
930 		__ATTR(size, 0644, wq_size_show, wq_size_store);
931 
932 static ssize_t wq_priority_show(struct device *dev,
933 				struct device_attribute *attr, char *buf)
934 {
935 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
936 
937 	return sprintf(buf, "%u\n", wq->priority);
938 }
939 
940 static ssize_t wq_priority_store(struct device *dev,
941 				 struct device_attribute *attr,
942 				 const char *buf, size_t count)
943 {
944 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
945 	unsigned long prio;
946 	struct idxd_device *idxd = wq->idxd;
947 	int rc;
948 
949 	rc = kstrtoul(buf, 10, &prio);
950 	if (rc < 0)
951 		return -EINVAL;
952 
953 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
954 		return -EPERM;
955 
956 	if (wq->state != IDXD_WQ_DISABLED)
957 		return -EPERM;
958 
959 	if (prio > IDXD_MAX_PRIORITY)
960 		return -EINVAL;
961 
962 	wq->priority = prio;
963 	return count;
964 }
965 
966 static struct device_attribute dev_attr_wq_priority =
967 		__ATTR(priority, 0644, wq_priority_show, wq_priority_store);
968 
969 static ssize_t wq_type_show(struct device *dev,
970 			    struct device_attribute *attr, char *buf)
971 {
972 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
973 
974 	switch (wq->type) {
975 	case IDXD_WQT_KERNEL:
976 		return sprintf(buf, "%s\n",
977 			       idxd_wq_type_names[IDXD_WQT_KERNEL]);
978 	case IDXD_WQT_USER:
979 		return sprintf(buf, "%s\n",
980 			       idxd_wq_type_names[IDXD_WQT_USER]);
981 	case IDXD_WQT_NONE:
982 	default:
983 		return sprintf(buf, "%s\n",
984 			       idxd_wq_type_names[IDXD_WQT_NONE]);
985 	}
986 
987 	return -EINVAL;
988 }
989 
990 static ssize_t wq_type_store(struct device *dev,
991 			     struct device_attribute *attr, const char *buf,
992 			     size_t count)
993 {
994 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
995 	enum idxd_wq_type old_type;
996 
997 	if (wq->state != IDXD_WQ_DISABLED)
998 		return -EPERM;
999 
1000 	old_type = wq->type;
1001 	if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1002 		wq->type = IDXD_WQT_NONE;
1003 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
1004 		wq->type = IDXD_WQT_KERNEL;
1005 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1006 		wq->type = IDXD_WQT_USER;
1007 	else
1008 		return -EINVAL;
1009 
1010 	/* If we are changing queue type, clear the name */
1011 	if (wq->type != old_type)
1012 		memset(wq->name, 0, WQ_NAME_SIZE + 1);
1013 
1014 	return count;
1015 }
1016 
1017 static struct device_attribute dev_attr_wq_type =
1018 		__ATTR(type, 0644, wq_type_show, wq_type_store);
1019 
1020 static ssize_t wq_name_show(struct device *dev,
1021 			    struct device_attribute *attr, char *buf)
1022 {
1023 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1024 
1025 	return sprintf(buf, "%s\n", wq->name);
1026 }
1027 
1028 static ssize_t wq_name_store(struct device *dev,
1029 			     struct device_attribute *attr, const char *buf,
1030 			     size_t count)
1031 {
1032 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1033 
1034 	if (wq->state != IDXD_WQ_DISABLED)
1035 		return -EPERM;
1036 
1037 	if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1038 		return -EINVAL;
1039 
1040 	memset(wq->name, 0, WQ_NAME_SIZE + 1);
1041 	strncpy(wq->name, buf, WQ_NAME_SIZE);
1042 	strreplace(wq->name, '\n', '\0');
1043 	return count;
1044 }
1045 
1046 static struct device_attribute dev_attr_wq_name =
1047 		__ATTR(name, 0644, wq_name_show, wq_name_store);
1048 
1049 static ssize_t wq_cdev_minor_show(struct device *dev,
1050 				  struct device_attribute *attr, char *buf)
1051 {
1052 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1053 
1054 	return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
1055 }
1056 
1057 static struct device_attribute dev_attr_wq_cdev_minor =
1058 		__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1059 
1060 static struct attribute *idxd_wq_attributes[] = {
1061 	&dev_attr_wq_clients.attr,
1062 	&dev_attr_wq_state.attr,
1063 	&dev_attr_wq_group_id.attr,
1064 	&dev_attr_wq_mode.attr,
1065 	&dev_attr_wq_size.attr,
1066 	&dev_attr_wq_priority.attr,
1067 	&dev_attr_wq_type.attr,
1068 	&dev_attr_wq_name.attr,
1069 	&dev_attr_wq_cdev_minor.attr,
1070 	NULL,
1071 };
1072 
1073 static const struct attribute_group idxd_wq_attribute_group = {
1074 	.attrs = idxd_wq_attributes,
1075 };
1076 
1077 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1078 	&idxd_wq_attribute_group,
1079 	NULL,
1080 };
1081 
1082 /* IDXD device attribs */
1083 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1084 			    char *buf)
1085 {
1086 	struct idxd_device *idxd =
1087 		container_of(dev, struct idxd_device, conf_dev);
1088 
1089 	return sprintf(buf, "%#x\n", idxd->hw.version);
1090 }
1091 static DEVICE_ATTR_RO(version);
1092 
1093 static ssize_t max_work_queues_size_show(struct device *dev,
1094 					 struct device_attribute *attr,
1095 					 char *buf)
1096 {
1097 	struct idxd_device *idxd =
1098 		container_of(dev, struct idxd_device, conf_dev);
1099 
1100 	return sprintf(buf, "%u\n", idxd->max_wq_size);
1101 }
1102 static DEVICE_ATTR_RO(max_work_queues_size);
1103 
1104 static ssize_t max_groups_show(struct device *dev,
1105 			       struct device_attribute *attr, char *buf)
1106 {
1107 	struct idxd_device *idxd =
1108 		container_of(dev, struct idxd_device, conf_dev);
1109 
1110 	return sprintf(buf, "%u\n", idxd->max_groups);
1111 }
1112 static DEVICE_ATTR_RO(max_groups);
1113 
1114 static ssize_t max_work_queues_show(struct device *dev,
1115 				    struct device_attribute *attr, char *buf)
1116 {
1117 	struct idxd_device *idxd =
1118 		container_of(dev, struct idxd_device, conf_dev);
1119 
1120 	return sprintf(buf, "%u\n", idxd->max_wqs);
1121 }
1122 static DEVICE_ATTR_RO(max_work_queues);
1123 
1124 static ssize_t max_engines_show(struct device *dev,
1125 				struct device_attribute *attr, char *buf)
1126 {
1127 	struct idxd_device *idxd =
1128 		container_of(dev, struct idxd_device, conf_dev);
1129 
1130 	return sprintf(buf, "%u\n", idxd->max_engines);
1131 }
1132 static DEVICE_ATTR_RO(max_engines);
1133 
1134 static ssize_t numa_node_show(struct device *dev,
1135 			      struct device_attribute *attr, char *buf)
1136 {
1137 	struct idxd_device *idxd =
1138 		container_of(dev, struct idxd_device, conf_dev);
1139 
1140 	return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1141 }
1142 static DEVICE_ATTR_RO(numa_node);
1143 
1144 static ssize_t max_batch_size_show(struct device *dev,
1145 				   struct device_attribute *attr, char *buf)
1146 {
1147 	struct idxd_device *idxd =
1148 		container_of(dev, struct idxd_device, conf_dev);
1149 
1150 	return sprintf(buf, "%u\n", idxd->max_batch_size);
1151 }
1152 static DEVICE_ATTR_RO(max_batch_size);
1153 
1154 static ssize_t max_transfer_size_show(struct device *dev,
1155 				      struct device_attribute *attr,
1156 				      char *buf)
1157 {
1158 	struct idxd_device *idxd =
1159 		container_of(dev, struct idxd_device, conf_dev);
1160 
1161 	return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
1162 }
1163 static DEVICE_ATTR_RO(max_transfer_size);
1164 
1165 static ssize_t op_cap_show(struct device *dev,
1166 			   struct device_attribute *attr, char *buf)
1167 {
1168 	struct idxd_device *idxd =
1169 		container_of(dev, struct idxd_device, conf_dev);
1170 
1171 	return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
1172 }
1173 static DEVICE_ATTR_RO(op_cap);
1174 
1175 static ssize_t gen_cap_show(struct device *dev,
1176 			    struct device_attribute *attr, char *buf)
1177 {
1178 	struct idxd_device *idxd =
1179 		container_of(dev, struct idxd_device, conf_dev);
1180 
1181 	return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1182 }
1183 static DEVICE_ATTR_RO(gen_cap);
1184 
1185 static ssize_t configurable_show(struct device *dev,
1186 				 struct device_attribute *attr, char *buf)
1187 {
1188 	struct idxd_device *idxd =
1189 		container_of(dev, struct idxd_device, conf_dev);
1190 
1191 	return sprintf(buf, "%u\n",
1192 			test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1193 }
1194 static DEVICE_ATTR_RO(configurable);
1195 
1196 static ssize_t clients_show(struct device *dev,
1197 			    struct device_attribute *attr, char *buf)
1198 {
1199 	struct idxd_device *idxd =
1200 		container_of(dev, struct idxd_device, conf_dev);
1201 	unsigned long flags;
1202 	int count = 0, i;
1203 
1204 	spin_lock_irqsave(&idxd->dev_lock, flags);
1205 	for (i = 0; i < idxd->max_wqs; i++) {
1206 		struct idxd_wq *wq = &idxd->wqs[i];
1207 
1208 		count += wq->client_count;
1209 	}
1210 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1211 
1212 	return sprintf(buf, "%d\n", count);
1213 }
1214 static DEVICE_ATTR_RO(clients);
1215 
1216 static ssize_t state_show(struct device *dev,
1217 			  struct device_attribute *attr, char *buf)
1218 {
1219 	struct idxd_device *idxd =
1220 		container_of(dev, struct idxd_device, conf_dev);
1221 
1222 	switch (idxd->state) {
1223 	case IDXD_DEV_DISABLED:
1224 	case IDXD_DEV_CONF_READY:
1225 		return sprintf(buf, "disabled\n");
1226 	case IDXD_DEV_ENABLED:
1227 		return sprintf(buf, "enabled\n");
1228 	case IDXD_DEV_HALTED:
1229 		return sprintf(buf, "halted\n");
1230 	}
1231 
1232 	return sprintf(buf, "unknown\n");
1233 }
1234 static DEVICE_ATTR_RO(state);
1235 
1236 static ssize_t errors_show(struct device *dev,
1237 			   struct device_attribute *attr, char *buf)
1238 {
1239 	struct idxd_device *idxd =
1240 		container_of(dev, struct idxd_device, conf_dev);
1241 	int i, out = 0;
1242 	unsigned long flags;
1243 
1244 	spin_lock_irqsave(&idxd->dev_lock, flags);
1245 	for (i = 0; i < 4; i++)
1246 		out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
1247 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1248 	out--;
1249 	out += sprintf(buf + out, "\n");
1250 	return out;
1251 }
1252 static DEVICE_ATTR_RO(errors);
1253 
1254 static ssize_t max_tokens_show(struct device *dev,
1255 			       struct device_attribute *attr, char *buf)
1256 {
1257 	struct idxd_device *idxd =
1258 		container_of(dev, struct idxd_device, conf_dev);
1259 
1260 	return sprintf(buf, "%u\n", idxd->max_tokens);
1261 }
1262 static DEVICE_ATTR_RO(max_tokens);
1263 
1264 static ssize_t token_limit_show(struct device *dev,
1265 				struct device_attribute *attr, char *buf)
1266 {
1267 	struct idxd_device *idxd =
1268 		container_of(dev, struct idxd_device, conf_dev);
1269 
1270 	return sprintf(buf, "%u\n", idxd->token_limit);
1271 }
1272 
1273 static ssize_t token_limit_store(struct device *dev,
1274 				 struct device_attribute *attr,
1275 				 const char *buf, size_t count)
1276 {
1277 	struct idxd_device *idxd =
1278 		container_of(dev, struct idxd_device, conf_dev);
1279 	unsigned long val;
1280 	int rc;
1281 
1282 	rc = kstrtoul(buf, 10, &val);
1283 	if (rc < 0)
1284 		return -EINVAL;
1285 
1286 	if (idxd->state == IDXD_DEV_ENABLED)
1287 		return -EPERM;
1288 
1289 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1290 		return -EPERM;
1291 
1292 	if (!idxd->hw.group_cap.token_limit)
1293 		return -EPERM;
1294 
1295 	if (val > idxd->hw.group_cap.total_tokens)
1296 		return -EINVAL;
1297 
1298 	idxd->token_limit = val;
1299 	return count;
1300 }
1301 static DEVICE_ATTR_RW(token_limit);
1302 
1303 static ssize_t cdev_major_show(struct device *dev,
1304 			       struct device_attribute *attr, char *buf)
1305 {
1306 	struct idxd_device *idxd =
1307 		container_of(dev, struct idxd_device, conf_dev);
1308 
1309 	return sprintf(buf, "%u\n", idxd->major);
1310 }
1311 static DEVICE_ATTR_RO(cdev_major);
1312 
1313 static struct attribute *idxd_device_attributes[] = {
1314 	&dev_attr_version.attr,
1315 	&dev_attr_max_groups.attr,
1316 	&dev_attr_max_work_queues.attr,
1317 	&dev_attr_max_work_queues_size.attr,
1318 	&dev_attr_max_engines.attr,
1319 	&dev_attr_numa_node.attr,
1320 	&dev_attr_max_batch_size.attr,
1321 	&dev_attr_max_transfer_size.attr,
1322 	&dev_attr_op_cap.attr,
1323 	&dev_attr_gen_cap.attr,
1324 	&dev_attr_configurable.attr,
1325 	&dev_attr_clients.attr,
1326 	&dev_attr_state.attr,
1327 	&dev_attr_errors.attr,
1328 	&dev_attr_max_tokens.attr,
1329 	&dev_attr_token_limit.attr,
1330 	&dev_attr_cdev_major.attr,
1331 	NULL,
1332 };
1333 
1334 static const struct attribute_group idxd_device_attribute_group = {
1335 	.attrs = idxd_device_attributes,
1336 };
1337 
1338 static const struct attribute_group *idxd_attribute_groups[] = {
1339 	&idxd_device_attribute_group,
1340 	NULL,
1341 };
1342 
1343 static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
1344 {
1345 	struct device *dev = &idxd->pdev->dev;
1346 	int i, rc;
1347 
1348 	for (i = 0; i < idxd->max_engines; i++) {
1349 		struct idxd_engine *engine = &idxd->engines[i];
1350 
1351 		engine->conf_dev.parent = &idxd->conf_dev;
1352 		dev_set_name(&engine->conf_dev, "engine%d.%d",
1353 			     idxd->id, engine->id);
1354 		engine->conf_dev.bus = idxd_get_bus_type(idxd);
1355 		engine->conf_dev.groups = idxd_engine_attribute_groups;
1356 		engine->conf_dev.type = &idxd_engine_device_type;
1357 		dev_dbg(dev, "Engine device register: %s\n",
1358 			dev_name(&engine->conf_dev));
1359 		rc = device_register(&engine->conf_dev);
1360 		if (rc < 0) {
1361 			put_device(&engine->conf_dev);
1362 			goto cleanup;
1363 		}
1364 	}
1365 
1366 	return 0;
1367 
1368 cleanup:
1369 	while (i--) {
1370 		struct idxd_engine *engine = &idxd->engines[i];
1371 
1372 		device_unregister(&engine->conf_dev);
1373 	}
1374 	return rc;
1375 }
1376 
1377 static int idxd_setup_group_sysfs(struct idxd_device *idxd)
1378 {
1379 	struct device *dev = &idxd->pdev->dev;
1380 	int i, rc;
1381 
1382 	for (i = 0; i < idxd->max_groups; i++) {
1383 		struct idxd_group *group = &idxd->groups[i];
1384 
1385 		group->conf_dev.parent = &idxd->conf_dev;
1386 		dev_set_name(&group->conf_dev, "group%d.%d",
1387 			     idxd->id, group->id);
1388 		group->conf_dev.bus = idxd_get_bus_type(idxd);
1389 		group->conf_dev.groups = idxd_group_attribute_groups;
1390 		group->conf_dev.type = &idxd_group_device_type;
1391 		dev_dbg(dev, "Group device register: %s\n",
1392 			dev_name(&group->conf_dev));
1393 		rc = device_register(&group->conf_dev);
1394 		if (rc < 0) {
1395 			put_device(&group->conf_dev);
1396 			goto cleanup;
1397 		}
1398 	}
1399 
1400 	return 0;
1401 
1402 cleanup:
1403 	while (i--) {
1404 		struct idxd_group *group = &idxd->groups[i];
1405 
1406 		device_unregister(&group->conf_dev);
1407 	}
1408 	return rc;
1409 }
1410 
1411 static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
1412 {
1413 	struct device *dev = &idxd->pdev->dev;
1414 	int i, rc;
1415 
1416 	for (i = 0; i < idxd->max_wqs; i++) {
1417 		struct idxd_wq *wq = &idxd->wqs[i];
1418 
1419 		wq->conf_dev.parent = &idxd->conf_dev;
1420 		dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
1421 		wq->conf_dev.bus = idxd_get_bus_type(idxd);
1422 		wq->conf_dev.groups = idxd_wq_attribute_groups;
1423 		wq->conf_dev.type = &idxd_wq_device_type;
1424 		dev_dbg(dev, "WQ device register: %s\n",
1425 			dev_name(&wq->conf_dev));
1426 		rc = device_register(&wq->conf_dev);
1427 		if (rc < 0) {
1428 			put_device(&wq->conf_dev);
1429 			goto cleanup;
1430 		}
1431 	}
1432 
1433 	return 0;
1434 
1435 cleanup:
1436 	while (i--) {
1437 		struct idxd_wq *wq = &idxd->wqs[i];
1438 
1439 		device_unregister(&wq->conf_dev);
1440 	}
1441 	return rc;
1442 }
1443 
1444 static int idxd_setup_device_sysfs(struct idxd_device *idxd)
1445 {
1446 	struct device *dev = &idxd->pdev->dev;
1447 	int rc;
1448 	char devname[IDXD_NAME_SIZE];
1449 
1450 	sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
1451 	idxd->conf_dev.parent = dev;
1452 	dev_set_name(&idxd->conf_dev, "%s", devname);
1453 	idxd->conf_dev.bus = idxd_get_bus_type(idxd);
1454 	idxd->conf_dev.groups = idxd_attribute_groups;
1455 	idxd->conf_dev.type = idxd_get_device_type(idxd);
1456 
1457 	dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
1458 	rc = device_register(&idxd->conf_dev);
1459 	if (rc < 0) {
1460 		put_device(&idxd->conf_dev);
1461 		return rc;
1462 	}
1463 
1464 	return 0;
1465 }
1466 
1467 int idxd_setup_sysfs(struct idxd_device *idxd)
1468 {
1469 	struct device *dev = &idxd->pdev->dev;
1470 	int rc;
1471 
1472 	rc = idxd_setup_device_sysfs(idxd);
1473 	if (rc < 0) {
1474 		dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
1475 		return rc;
1476 	}
1477 
1478 	rc = idxd_setup_wq_sysfs(idxd);
1479 	if (rc < 0) {
1480 		/* unregister conf dev */
1481 		dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
1482 		return rc;
1483 	}
1484 
1485 	rc = idxd_setup_group_sysfs(idxd);
1486 	if (rc < 0) {
1487 		/* unregister conf dev */
1488 		dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
1489 		return rc;
1490 	}
1491 
1492 	rc = idxd_setup_engine_sysfs(idxd);
1493 	if (rc < 0) {
1494 		/* unregister conf dev */
1495 		dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
1496 		return rc;
1497 	}
1498 
1499 	return 0;
1500 }
1501 
1502 void idxd_cleanup_sysfs(struct idxd_device *idxd)
1503 {
1504 	int i;
1505 
1506 	for (i = 0; i < idxd->max_wqs; i++) {
1507 		struct idxd_wq *wq = &idxd->wqs[i];
1508 
1509 		device_unregister(&wq->conf_dev);
1510 	}
1511 
1512 	for (i = 0; i < idxd->max_engines; i++) {
1513 		struct idxd_engine *engine = &idxd->engines[i];
1514 
1515 		device_unregister(&engine->conf_dev);
1516 	}
1517 
1518 	for (i = 0; i < idxd->max_groups; i++) {
1519 		struct idxd_group *group = &idxd->groups[i];
1520 
1521 		device_unregister(&group->conf_dev);
1522 	}
1523 
1524 	device_unregister(&idxd->conf_dev);
1525 }
1526 
1527 int idxd_register_bus_type(void)
1528 {
1529 	int i, rc;
1530 
1531 	for (i = 0; i < IDXD_TYPE_MAX; i++) {
1532 		rc = bus_register(idxd_bus_types[i]);
1533 		if (rc < 0)
1534 			goto bus_err;
1535 	}
1536 
1537 	return 0;
1538 
1539 bus_err:
1540 	for (; i > 0; i--)
1541 		bus_unregister(idxd_bus_types[i]);
1542 	return rc;
1543 }
1544 
1545 void idxd_unregister_bus_type(void)
1546 {
1547 	int i;
1548 
1549 	for (i = 0; i < IDXD_TYPE_MAX; i++)
1550 		bus_unregister(idxd_bus_types[i]);
1551 }
1552