xref: /linux/drivers/dma/idxd/sysfs.c (revision 6e7f3ee97bbe2c7d7a53b7dbd7a08a579e03c8c9)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
11 #include "idxd.h"
12 
13 static char *idxd_wq_type_names[] = {
14 	[IDXD_WQT_NONE]		= "none",
15 	[IDXD_WQT_KERNEL]	= "kernel",
16 	[IDXD_WQT_USER]		= "user",
17 };
18 
19 /* IDXD engine attributes */
20 static ssize_t engine_group_id_show(struct device *dev,
21 				    struct device_attribute *attr, char *buf)
22 {
23 	struct idxd_engine *engine = confdev_to_engine(dev);
24 
25 	if (engine->group)
26 		return sysfs_emit(buf, "%d\n", engine->group->id);
27 	else
28 		return sysfs_emit(buf, "%d\n", -1);
29 }
30 
31 static ssize_t engine_group_id_store(struct device *dev,
32 				     struct device_attribute *attr,
33 				     const char *buf, size_t count)
34 {
35 	struct idxd_engine *engine = confdev_to_engine(dev);
36 	struct idxd_device *idxd = engine->idxd;
37 	long id;
38 	int rc;
39 	struct idxd_group *prevg;
40 
41 	rc = kstrtol(buf, 10, &id);
42 	if (rc < 0)
43 		return -EINVAL;
44 
45 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
46 		return -EPERM;
47 
48 	if (id > idxd->max_groups - 1 || id < -1)
49 		return -EINVAL;
50 
51 	if (id == -1) {
52 		if (engine->group) {
53 			engine->group->num_engines--;
54 			engine->group = NULL;
55 		}
56 		return count;
57 	}
58 
59 	prevg = engine->group;
60 
61 	if (prevg)
62 		prevg->num_engines--;
63 	engine->group = idxd->groups[id];
64 	engine->group->num_engines++;
65 
66 	return count;
67 }
68 
69 static struct device_attribute dev_attr_engine_group =
70 		__ATTR(group_id, 0644, engine_group_id_show,
71 		       engine_group_id_store);
72 
73 static struct attribute *idxd_engine_attributes[] = {
74 	&dev_attr_engine_group.attr,
75 	NULL,
76 };
77 
78 static const struct attribute_group idxd_engine_attribute_group = {
79 	.attrs = idxd_engine_attributes,
80 };
81 
82 static const struct attribute_group *idxd_engine_attribute_groups[] = {
83 	&idxd_engine_attribute_group,
84 	NULL,
85 };
86 
87 static void idxd_conf_engine_release(struct device *dev)
88 {
89 	struct idxd_engine *engine = confdev_to_engine(dev);
90 
91 	kfree(engine);
92 }
93 
94 struct device_type idxd_engine_device_type = {
95 	.name = "engine",
96 	.release = idxd_conf_engine_release,
97 	.groups = idxd_engine_attribute_groups,
98 };
99 
100 /* Group attributes */
101 
102 static void idxd_set_free_tokens(struct idxd_device *idxd)
103 {
104 	int i, tokens;
105 
106 	for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
107 		struct idxd_group *g = idxd->groups[i];
108 
109 		tokens += g->tokens_reserved;
110 	}
111 
112 	idxd->nr_tokens = idxd->max_tokens - tokens;
113 }
114 
115 static ssize_t group_tokens_reserved_show(struct device *dev,
116 					  struct device_attribute *attr,
117 					  char *buf)
118 {
119 	struct idxd_group *group = confdev_to_group(dev);
120 
121 	return sysfs_emit(buf, "%u\n", group->tokens_reserved);
122 }
123 
124 static ssize_t group_tokens_reserved_store(struct device *dev,
125 					   struct device_attribute *attr,
126 					   const char *buf, size_t count)
127 {
128 	struct idxd_group *group = confdev_to_group(dev);
129 	struct idxd_device *idxd = group->idxd;
130 	unsigned long val;
131 	int rc;
132 
133 	rc = kstrtoul(buf, 10, &val);
134 	if (rc < 0)
135 		return -EINVAL;
136 
137 	if (idxd->data->type == IDXD_TYPE_IAX)
138 		return -EOPNOTSUPP;
139 
140 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
141 		return -EPERM;
142 
143 	if (idxd->state == IDXD_DEV_ENABLED)
144 		return -EPERM;
145 
146 	if (val > idxd->max_tokens)
147 		return -EINVAL;
148 
149 	if (val > idxd->nr_tokens + group->tokens_reserved)
150 		return -EINVAL;
151 
152 	group->tokens_reserved = val;
153 	idxd_set_free_tokens(idxd);
154 	return count;
155 }
156 
157 static struct device_attribute dev_attr_group_tokens_reserved =
158 		__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
159 		       group_tokens_reserved_store);
160 
161 static ssize_t group_tokens_allowed_show(struct device *dev,
162 					 struct device_attribute *attr,
163 					 char *buf)
164 {
165 	struct idxd_group *group = confdev_to_group(dev);
166 
167 	return sysfs_emit(buf, "%u\n", group->tokens_allowed);
168 }
169 
170 static ssize_t group_tokens_allowed_store(struct device *dev,
171 					  struct device_attribute *attr,
172 					  const char *buf, size_t count)
173 {
174 	struct idxd_group *group = confdev_to_group(dev);
175 	struct idxd_device *idxd = group->idxd;
176 	unsigned long val;
177 	int rc;
178 
179 	rc = kstrtoul(buf, 10, &val);
180 	if (rc < 0)
181 		return -EINVAL;
182 
183 	if (idxd->data->type == IDXD_TYPE_IAX)
184 		return -EOPNOTSUPP;
185 
186 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
187 		return -EPERM;
188 
189 	if (idxd->state == IDXD_DEV_ENABLED)
190 		return -EPERM;
191 
192 	if (val < 4 * group->num_engines ||
193 	    val > group->tokens_reserved + idxd->nr_tokens)
194 		return -EINVAL;
195 
196 	group->tokens_allowed = val;
197 	return count;
198 }
199 
200 static struct device_attribute dev_attr_group_tokens_allowed =
201 		__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
202 		       group_tokens_allowed_store);
203 
204 static ssize_t group_use_token_limit_show(struct device *dev,
205 					  struct device_attribute *attr,
206 					  char *buf)
207 {
208 	struct idxd_group *group = confdev_to_group(dev);
209 
210 	return sysfs_emit(buf, "%u\n", group->use_token_limit);
211 }
212 
213 static ssize_t group_use_token_limit_store(struct device *dev,
214 					   struct device_attribute *attr,
215 					   const char *buf, size_t count)
216 {
217 	struct idxd_group *group = confdev_to_group(dev);
218 	struct idxd_device *idxd = group->idxd;
219 	unsigned long val;
220 	int rc;
221 
222 	rc = kstrtoul(buf, 10, &val);
223 	if (rc < 0)
224 		return -EINVAL;
225 
226 	if (idxd->data->type == IDXD_TYPE_IAX)
227 		return -EOPNOTSUPP;
228 
229 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
230 		return -EPERM;
231 
232 	if (idxd->state == IDXD_DEV_ENABLED)
233 		return -EPERM;
234 
235 	if (idxd->token_limit == 0)
236 		return -EPERM;
237 
238 	group->use_token_limit = !!val;
239 	return count;
240 }
241 
242 static struct device_attribute dev_attr_group_use_token_limit =
243 		__ATTR(use_token_limit, 0644, group_use_token_limit_show,
244 		       group_use_token_limit_store);
245 
246 static ssize_t group_engines_show(struct device *dev,
247 				  struct device_attribute *attr, char *buf)
248 {
249 	struct idxd_group *group = confdev_to_group(dev);
250 	int i, rc = 0;
251 	struct idxd_device *idxd = group->idxd;
252 
253 	for (i = 0; i < idxd->max_engines; i++) {
254 		struct idxd_engine *engine = idxd->engines[i];
255 
256 		if (!engine->group)
257 			continue;
258 
259 		if (engine->group->id == group->id)
260 			rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
261 	}
262 
263 	if (!rc)
264 		return 0;
265 	rc--;
266 	rc += sysfs_emit_at(buf, rc, "\n");
267 
268 	return rc;
269 }
270 
271 static struct device_attribute dev_attr_group_engines =
272 		__ATTR(engines, 0444, group_engines_show, NULL);
273 
274 static ssize_t group_work_queues_show(struct device *dev,
275 				      struct device_attribute *attr, char *buf)
276 {
277 	struct idxd_group *group = confdev_to_group(dev);
278 	int i, rc = 0;
279 	struct idxd_device *idxd = group->idxd;
280 
281 	for (i = 0; i < idxd->max_wqs; i++) {
282 		struct idxd_wq *wq = idxd->wqs[i];
283 
284 		if (!wq->group)
285 			continue;
286 
287 		if (wq->group->id == group->id)
288 			rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
289 	}
290 
291 	if (!rc)
292 		return 0;
293 	rc--;
294 	rc += sysfs_emit_at(buf, rc, "\n");
295 
296 	return rc;
297 }
298 
299 static struct device_attribute dev_attr_group_work_queues =
300 		__ATTR(work_queues, 0444, group_work_queues_show, NULL);
301 
302 static ssize_t group_traffic_class_a_show(struct device *dev,
303 					  struct device_attribute *attr,
304 					  char *buf)
305 {
306 	struct idxd_group *group = confdev_to_group(dev);
307 
308 	return sysfs_emit(buf, "%d\n", group->tc_a);
309 }
310 
311 static ssize_t group_traffic_class_a_store(struct device *dev,
312 					   struct device_attribute *attr,
313 					   const char *buf, size_t count)
314 {
315 	struct idxd_group *group = confdev_to_group(dev);
316 	struct idxd_device *idxd = group->idxd;
317 	long val;
318 	int rc;
319 
320 	rc = kstrtol(buf, 10, &val);
321 	if (rc < 0)
322 		return -EINVAL;
323 
324 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
325 		return -EPERM;
326 
327 	if (idxd->state == IDXD_DEV_ENABLED)
328 		return -EPERM;
329 
330 	if (val < 0 || val > 7)
331 		return -EINVAL;
332 
333 	group->tc_a = val;
334 	return count;
335 }
336 
337 static struct device_attribute dev_attr_group_traffic_class_a =
338 		__ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
339 		       group_traffic_class_a_store);
340 
341 static ssize_t group_traffic_class_b_show(struct device *dev,
342 					  struct device_attribute *attr,
343 					  char *buf)
344 {
345 	struct idxd_group *group = confdev_to_group(dev);
346 
347 	return sysfs_emit(buf, "%d\n", group->tc_b);
348 }
349 
350 static ssize_t group_traffic_class_b_store(struct device *dev,
351 					   struct device_attribute *attr,
352 					   const char *buf, size_t count)
353 {
354 	struct idxd_group *group = confdev_to_group(dev);
355 	struct idxd_device *idxd = group->idxd;
356 	long val;
357 	int rc;
358 
359 	rc = kstrtol(buf, 10, &val);
360 	if (rc < 0)
361 		return -EINVAL;
362 
363 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
364 		return -EPERM;
365 
366 	if (idxd->state == IDXD_DEV_ENABLED)
367 		return -EPERM;
368 
369 	if (val < 0 || val > 7)
370 		return -EINVAL;
371 
372 	group->tc_b = val;
373 	return count;
374 }
375 
376 static struct device_attribute dev_attr_group_traffic_class_b =
377 		__ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
378 		       group_traffic_class_b_store);
379 
380 static struct attribute *idxd_group_attributes[] = {
381 	&dev_attr_group_work_queues.attr,
382 	&dev_attr_group_engines.attr,
383 	&dev_attr_group_use_token_limit.attr,
384 	&dev_attr_group_tokens_allowed.attr,
385 	&dev_attr_group_tokens_reserved.attr,
386 	&dev_attr_group_traffic_class_a.attr,
387 	&dev_attr_group_traffic_class_b.attr,
388 	NULL,
389 };
390 
391 static const struct attribute_group idxd_group_attribute_group = {
392 	.attrs = idxd_group_attributes,
393 };
394 
395 static const struct attribute_group *idxd_group_attribute_groups[] = {
396 	&idxd_group_attribute_group,
397 	NULL,
398 };
399 
400 static void idxd_conf_group_release(struct device *dev)
401 {
402 	struct idxd_group *group = confdev_to_group(dev);
403 
404 	kfree(group);
405 }
406 
407 struct device_type idxd_group_device_type = {
408 	.name = "group",
409 	.release = idxd_conf_group_release,
410 	.groups = idxd_group_attribute_groups,
411 };
412 
413 /* IDXD work queue attribs */
414 static ssize_t wq_clients_show(struct device *dev,
415 			       struct device_attribute *attr, char *buf)
416 {
417 	struct idxd_wq *wq = confdev_to_wq(dev);
418 
419 	return sysfs_emit(buf, "%d\n", wq->client_count);
420 }
421 
422 static struct device_attribute dev_attr_wq_clients =
423 		__ATTR(clients, 0444, wq_clients_show, NULL);
424 
425 static ssize_t wq_state_show(struct device *dev,
426 			     struct device_attribute *attr, char *buf)
427 {
428 	struct idxd_wq *wq = confdev_to_wq(dev);
429 
430 	switch (wq->state) {
431 	case IDXD_WQ_DISABLED:
432 		return sysfs_emit(buf, "disabled\n");
433 	case IDXD_WQ_ENABLED:
434 		return sysfs_emit(buf, "enabled\n");
435 	}
436 
437 	return sysfs_emit(buf, "unknown\n");
438 }
439 
440 static struct device_attribute dev_attr_wq_state =
441 		__ATTR(state, 0444, wq_state_show, NULL);
442 
443 static ssize_t wq_group_id_show(struct device *dev,
444 				struct device_attribute *attr, char *buf)
445 {
446 	struct idxd_wq *wq = confdev_to_wq(dev);
447 
448 	if (wq->group)
449 		return sysfs_emit(buf, "%u\n", wq->group->id);
450 	else
451 		return sysfs_emit(buf, "-1\n");
452 }
453 
454 static ssize_t wq_group_id_store(struct device *dev,
455 				 struct device_attribute *attr,
456 				 const char *buf, size_t count)
457 {
458 	struct idxd_wq *wq = confdev_to_wq(dev);
459 	struct idxd_device *idxd = wq->idxd;
460 	long id;
461 	int rc;
462 	struct idxd_group *prevg, *group;
463 
464 	rc = kstrtol(buf, 10, &id);
465 	if (rc < 0)
466 		return -EINVAL;
467 
468 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
469 		return -EPERM;
470 
471 	if (wq->state != IDXD_WQ_DISABLED)
472 		return -EPERM;
473 
474 	if (id > idxd->max_groups - 1 || id < -1)
475 		return -EINVAL;
476 
477 	if (id == -1) {
478 		if (wq->group) {
479 			wq->group->num_wqs--;
480 			wq->group = NULL;
481 		}
482 		return count;
483 	}
484 
485 	group = idxd->groups[id];
486 	prevg = wq->group;
487 
488 	if (prevg)
489 		prevg->num_wqs--;
490 	wq->group = group;
491 	group->num_wqs++;
492 	return count;
493 }
494 
495 static struct device_attribute dev_attr_wq_group_id =
496 		__ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
497 
498 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
499 			    char *buf)
500 {
501 	struct idxd_wq *wq = confdev_to_wq(dev);
502 
503 	return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
504 }
505 
506 static ssize_t wq_mode_store(struct device *dev,
507 			     struct device_attribute *attr, const char *buf,
508 			     size_t count)
509 {
510 	struct idxd_wq *wq = confdev_to_wq(dev);
511 	struct idxd_device *idxd = wq->idxd;
512 
513 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
514 		return -EPERM;
515 
516 	if (wq->state != IDXD_WQ_DISABLED)
517 		return -EPERM;
518 
519 	if (sysfs_streq(buf, "dedicated")) {
520 		set_bit(WQ_FLAG_DEDICATED, &wq->flags);
521 		wq->threshold = 0;
522 	} else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
523 		clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
524 	} else {
525 		return -EINVAL;
526 	}
527 
528 	return count;
529 }
530 
531 static struct device_attribute dev_attr_wq_mode =
532 		__ATTR(mode, 0644, wq_mode_show, wq_mode_store);
533 
534 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
535 			    char *buf)
536 {
537 	struct idxd_wq *wq = confdev_to_wq(dev);
538 
539 	return sysfs_emit(buf, "%u\n", wq->size);
540 }
541 
542 static int total_claimed_wq_size(struct idxd_device *idxd)
543 {
544 	int i;
545 	int wq_size = 0;
546 
547 	for (i = 0; i < idxd->max_wqs; i++) {
548 		struct idxd_wq *wq = idxd->wqs[i];
549 
550 		wq_size += wq->size;
551 	}
552 
553 	return wq_size;
554 }
555 
556 static ssize_t wq_size_store(struct device *dev,
557 			     struct device_attribute *attr, const char *buf,
558 			     size_t count)
559 {
560 	struct idxd_wq *wq = confdev_to_wq(dev);
561 	unsigned long size;
562 	struct idxd_device *idxd = wq->idxd;
563 	int rc;
564 
565 	rc = kstrtoul(buf, 10, &size);
566 	if (rc < 0)
567 		return -EINVAL;
568 
569 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
570 		return -EPERM;
571 
572 	if (idxd->state == IDXD_DEV_ENABLED)
573 		return -EPERM;
574 
575 	if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
576 		return -EINVAL;
577 
578 	wq->size = size;
579 	return count;
580 }
581 
582 static struct device_attribute dev_attr_wq_size =
583 		__ATTR(size, 0644, wq_size_show, wq_size_store);
584 
585 static ssize_t wq_priority_show(struct device *dev,
586 				struct device_attribute *attr, char *buf)
587 {
588 	struct idxd_wq *wq = confdev_to_wq(dev);
589 
590 	return sysfs_emit(buf, "%u\n", wq->priority);
591 }
592 
593 static ssize_t wq_priority_store(struct device *dev,
594 				 struct device_attribute *attr,
595 				 const char *buf, size_t count)
596 {
597 	struct idxd_wq *wq = confdev_to_wq(dev);
598 	unsigned long prio;
599 	struct idxd_device *idxd = wq->idxd;
600 	int rc;
601 
602 	rc = kstrtoul(buf, 10, &prio);
603 	if (rc < 0)
604 		return -EINVAL;
605 
606 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
607 		return -EPERM;
608 
609 	if (wq->state != IDXD_WQ_DISABLED)
610 		return -EPERM;
611 
612 	if (prio > IDXD_MAX_PRIORITY)
613 		return -EINVAL;
614 
615 	wq->priority = prio;
616 	return count;
617 }
618 
619 static struct device_attribute dev_attr_wq_priority =
620 		__ATTR(priority, 0644, wq_priority_show, wq_priority_store);
621 
622 static ssize_t wq_block_on_fault_show(struct device *dev,
623 				      struct device_attribute *attr, char *buf)
624 {
625 	struct idxd_wq *wq = confdev_to_wq(dev);
626 
627 	return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
628 }
629 
630 static ssize_t wq_block_on_fault_store(struct device *dev,
631 				       struct device_attribute *attr,
632 				       const char *buf, size_t count)
633 {
634 	struct idxd_wq *wq = confdev_to_wq(dev);
635 	struct idxd_device *idxd = wq->idxd;
636 	bool bof;
637 	int rc;
638 
639 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
640 		return -EPERM;
641 
642 	if (wq->state != IDXD_WQ_DISABLED)
643 		return -ENXIO;
644 
645 	rc = kstrtobool(buf, &bof);
646 	if (rc < 0)
647 		return rc;
648 
649 	if (bof)
650 		set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
651 	else
652 		clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
653 
654 	return count;
655 }
656 
657 static struct device_attribute dev_attr_wq_block_on_fault =
658 		__ATTR(block_on_fault, 0644, wq_block_on_fault_show,
659 		       wq_block_on_fault_store);
660 
661 static ssize_t wq_threshold_show(struct device *dev,
662 				 struct device_attribute *attr, char *buf)
663 {
664 	struct idxd_wq *wq = confdev_to_wq(dev);
665 
666 	return sysfs_emit(buf, "%u\n", wq->threshold);
667 }
668 
669 static ssize_t wq_threshold_store(struct device *dev,
670 				  struct device_attribute *attr,
671 				  const char *buf, size_t count)
672 {
673 	struct idxd_wq *wq = confdev_to_wq(dev);
674 	struct idxd_device *idxd = wq->idxd;
675 	unsigned int val;
676 	int rc;
677 
678 	rc = kstrtouint(buf, 0, &val);
679 	if (rc < 0)
680 		return -EINVAL;
681 
682 	if (val > wq->size || val <= 0)
683 		return -EINVAL;
684 
685 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
686 		return -EPERM;
687 
688 	if (wq->state != IDXD_WQ_DISABLED)
689 		return -ENXIO;
690 
691 	if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
692 		return -EINVAL;
693 
694 	wq->threshold = val;
695 
696 	return count;
697 }
698 
699 static struct device_attribute dev_attr_wq_threshold =
700 		__ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
701 
702 static ssize_t wq_type_show(struct device *dev,
703 			    struct device_attribute *attr, char *buf)
704 {
705 	struct idxd_wq *wq = confdev_to_wq(dev);
706 
707 	switch (wq->type) {
708 	case IDXD_WQT_KERNEL:
709 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
710 	case IDXD_WQT_USER:
711 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
712 	case IDXD_WQT_NONE:
713 	default:
714 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
715 	}
716 
717 	return -EINVAL;
718 }
719 
720 static ssize_t wq_type_store(struct device *dev,
721 			     struct device_attribute *attr, const char *buf,
722 			     size_t count)
723 {
724 	struct idxd_wq *wq = confdev_to_wq(dev);
725 	enum idxd_wq_type old_type;
726 
727 	if (wq->state != IDXD_WQ_DISABLED)
728 		return -EPERM;
729 
730 	old_type = wq->type;
731 	if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
732 		wq->type = IDXD_WQT_NONE;
733 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
734 		wq->type = IDXD_WQT_KERNEL;
735 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
736 		wq->type = IDXD_WQT_USER;
737 	else
738 		return -EINVAL;
739 
740 	/* If we are changing queue type, clear the name */
741 	if (wq->type != old_type)
742 		memset(wq->name, 0, WQ_NAME_SIZE + 1);
743 
744 	return count;
745 }
746 
747 static struct device_attribute dev_attr_wq_type =
748 		__ATTR(type, 0644, wq_type_show, wq_type_store);
749 
750 static ssize_t wq_name_show(struct device *dev,
751 			    struct device_attribute *attr, char *buf)
752 {
753 	struct idxd_wq *wq = confdev_to_wq(dev);
754 
755 	return sysfs_emit(buf, "%s\n", wq->name);
756 }
757 
758 static ssize_t wq_name_store(struct device *dev,
759 			     struct device_attribute *attr, const char *buf,
760 			     size_t count)
761 {
762 	struct idxd_wq *wq = confdev_to_wq(dev);
763 
764 	if (wq->state != IDXD_WQ_DISABLED)
765 		return -EPERM;
766 
767 	if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
768 		return -EINVAL;
769 
770 	/*
771 	 * This is temporarily placed here until we have SVM support for
772 	 * dmaengine.
773 	 */
774 	if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
775 		return -EOPNOTSUPP;
776 
777 	memset(wq->name, 0, WQ_NAME_SIZE + 1);
778 	strncpy(wq->name, buf, WQ_NAME_SIZE);
779 	strreplace(wq->name, '\n', '\0');
780 	return count;
781 }
782 
783 static struct device_attribute dev_attr_wq_name =
784 		__ATTR(name, 0644, wq_name_show, wq_name_store);
785 
786 static ssize_t wq_cdev_minor_show(struct device *dev,
787 				  struct device_attribute *attr, char *buf)
788 {
789 	struct idxd_wq *wq = confdev_to_wq(dev);
790 	int minor = -1;
791 
792 	mutex_lock(&wq->wq_lock);
793 	if (wq->idxd_cdev)
794 		minor = wq->idxd_cdev->minor;
795 	mutex_unlock(&wq->wq_lock);
796 
797 	if (minor == -1)
798 		return -ENXIO;
799 	return sysfs_emit(buf, "%d\n", minor);
800 }
801 
802 static struct device_attribute dev_attr_wq_cdev_minor =
803 		__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
804 
805 static int __get_sysfs_u64(const char *buf, u64 *val)
806 {
807 	int rc;
808 
809 	rc = kstrtou64(buf, 0, val);
810 	if (rc < 0)
811 		return -EINVAL;
812 
813 	if (*val == 0)
814 		return -EINVAL;
815 
816 	*val = roundup_pow_of_two(*val);
817 	return 0;
818 }
819 
820 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
821 					 char *buf)
822 {
823 	struct idxd_wq *wq = confdev_to_wq(dev);
824 
825 	return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
826 }
827 
828 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
829 					  const char *buf, size_t count)
830 {
831 	struct idxd_wq *wq = confdev_to_wq(dev);
832 	struct idxd_device *idxd = wq->idxd;
833 	u64 xfer_size;
834 	int rc;
835 
836 	if (wq->state != IDXD_WQ_DISABLED)
837 		return -EPERM;
838 
839 	rc = __get_sysfs_u64(buf, &xfer_size);
840 	if (rc < 0)
841 		return rc;
842 
843 	if (xfer_size > idxd->max_xfer_bytes)
844 		return -EINVAL;
845 
846 	wq->max_xfer_bytes = xfer_size;
847 
848 	return count;
849 }
850 
851 static struct device_attribute dev_attr_wq_max_transfer_size =
852 		__ATTR(max_transfer_size, 0644,
853 		       wq_max_transfer_size_show, wq_max_transfer_size_store);
854 
855 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
856 {
857 	struct idxd_wq *wq = confdev_to_wq(dev);
858 
859 	return sysfs_emit(buf, "%u\n", wq->max_batch_size);
860 }
861 
862 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
863 				       const char *buf, size_t count)
864 {
865 	struct idxd_wq *wq = confdev_to_wq(dev);
866 	struct idxd_device *idxd = wq->idxd;
867 	u64 batch_size;
868 	int rc;
869 
870 	if (wq->state != IDXD_WQ_DISABLED)
871 		return -EPERM;
872 
873 	rc = __get_sysfs_u64(buf, &batch_size);
874 	if (rc < 0)
875 		return rc;
876 
877 	if (batch_size > idxd->max_batch_size)
878 		return -EINVAL;
879 
880 	wq->max_batch_size = (u32)batch_size;
881 
882 	return count;
883 }
884 
885 static struct device_attribute dev_attr_wq_max_batch_size =
886 		__ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
887 
888 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
889 {
890 	struct idxd_wq *wq = confdev_to_wq(dev);
891 
892 	return sysfs_emit(buf, "%u\n", wq->ats_dis);
893 }
894 
895 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
896 				    const char *buf, size_t count)
897 {
898 	struct idxd_wq *wq = confdev_to_wq(dev);
899 	struct idxd_device *idxd = wq->idxd;
900 	bool ats_dis;
901 	int rc;
902 
903 	if (wq->state != IDXD_WQ_DISABLED)
904 		return -EPERM;
905 
906 	if (!idxd->hw.wq_cap.wq_ats_support)
907 		return -EOPNOTSUPP;
908 
909 	rc = kstrtobool(buf, &ats_dis);
910 	if (rc < 0)
911 		return rc;
912 
913 	wq->ats_dis = ats_dis;
914 
915 	return count;
916 }
917 
918 static struct device_attribute dev_attr_wq_ats_disable =
919 		__ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
920 
921 static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
922 {
923 	struct idxd_wq *wq = confdev_to_wq(dev);
924 	struct idxd_device *idxd = wq->idxd;
925 	u32 occup, offset;
926 
927 	if (!idxd->hw.wq_cap.occupancy)
928 		return -EOPNOTSUPP;
929 
930 	offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
931 	occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
932 
933 	return sysfs_emit(buf, "%u\n", occup);
934 }
935 
936 static struct device_attribute dev_attr_wq_occupancy =
937 		__ATTR(occupancy, 0444, wq_occupancy_show, NULL);
938 
939 static struct attribute *idxd_wq_attributes[] = {
940 	&dev_attr_wq_clients.attr,
941 	&dev_attr_wq_state.attr,
942 	&dev_attr_wq_group_id.attr,
943 	&dev_attr_wq_mode.attr,
944 	&dev_attr_wq_size.attr,
945 	&dev_attr_wq_priority.attr,
946 	&dev_attr_wq_block_on_fault.attr,
947 	&dev_attr_wq_threshold.attr,
948 	&dev_attr_wq_type.attr,
949 	&dev_attr_wq_name.attr,
950 	&dev_attr_wq_cdev_minor.attr,
951 	&dev_attr_wq_max_transfer_size.attr,
952 	&dev_attr_wq_max_batch_size.attr,
953 	&dev_attr_wq_ats_disable.attr,
954 	&dev_attr_wq_occupancy.attr,
955 	NULL,
956 };
957 
958 static const struct attribute_group idxd_wq_attribute_group = {
959 	.attrs = idxd_wq_attributes,
960 };
961 
962 static const struct attribute_group *idxd_wq_attribute_groups[] = {
963 	&idxd_wq_attribute_group,
964 	NULL,
965 };
966 
967 static void idxd_conf_wq_release(struct device *dev)
968 {
969 	struct idxd_wq *wq = confdev_to_wq(dev);
970 
971 	kfree(wq->wqcfg);
972 	kfree(wq);
973 }
974 
975 struct device_type idxd_wq_device_type = {
976 	.name = "wq",
977 	.release = idxd_conf_wq_release,
978 	.groups = idxd_wq_attribute_groups,
979 };
980 
981 /* IDXD device attribs */
982 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
983 			    char *buf)
984 {
985 	struct idxd_device *idxd = confdev_to_idxd(dev);
986 
987 	return sysfs_emit(buf, "%#x\n", idxd->hw.version);
988 }
989 static DEVICE_ATTR_RO(version);
990 
991 static ssize_t max_work_queues_size_show(struct device *dev,
992 					 struct device_attribute *attr,
993 					 char *buf)
994 {
995 	struct idxd_device *idxd = confdev_to_idxd(dev);
996 
997 	return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
998 }
999 static DEVICE_ATTR_RO(max_work_queues_size);
1000 
1001 static ssize_t max_groups_show(struct device *dev,
1002 			       struct device_attribute *attr, char *buf)
1003 {
1004 	struct idxd_device *idxd = confdev_to_idxd(dev);
1005 
1006 	return sysfs_emit(buf, "%u\n", idxd->max_groups);
1007 }
1008 static DEVICE_ATTR_RO(max_groups);
1009 
1010 static ssize_t max_work_queues_show(struct device *dev,
1011 				    struct device_attribute *attr, char *buf)
1012 {
1013 	struct idxd_device *idxd = confdev_to_idxd(dev);
1014 
1015 	return sysfs_emit(buf, "%u\n", idxd->max_wqs);
1016 }
1017 static DEVICE_ATTR_RO(max_work_queues);
1018 
1019 static ssize_t max_engines_show(struct device *dev,
1020 				struct device_attribute *attr, char *buf)
1021 {
1022 	struct idxd_device *idxd = confdev_to_idxd(dev);
1023 
1024 	return sysfs_emit(buf, "%u\n", idxd->max_engines);
1025 }
1026 static DEVICE_ATTR_RO(max_engines);
1027 
1028 static ssize_t numa_node_show(struct device *dev,
1029 			      struct device_attribute *attr, char *buf)
1030 {
1031 	struct idxd_device *idxd = confdev_to_idxd(dev);
1032 
1033 	return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1034 }
1035 static DEVICE_ATTR_RO(numa_node);
1036 
1037 static ssize_t max_batch_size_show(struct device *dev,
1038 				   struct device_attribute *attr, char *buf)
1039 {
1040 	struct idxd_device *idxd = confdev_to_idxd(dev);
1041 
1042 	return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
1043 }
1044 static DEVICE_ATTR_RO(max_batch_size);
1045 
1046 static ssize_t max_transfer_size_show(struct device *dev,
1047 				      struct device_attribute *attr,
1048 				      char *buf)
1049 {
1050 	struct idxd_device *idxd = confdev_to_idxd(dev);
1051 
1052 	return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
1053 }
1054 static DEVICE_ATTR_RO(max_transfer_size);
1055 
1056 static ssize_t op_cap_show(struct device *dev,
1057 			   struct device_attribute *attr, char *buf)
1058 {
1059 	struct idxd_device *idxd = confdev_to_idxd(dev);
1060 	int i, rc = 0;
1061 
1062 	for (i = 0; i < 4; i++)
1063 		rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1064 
1065 	rc--;
1066 	rc += sysfs_emit_at(buf, rc, "\n");
1067 	return rc;
1068 }
1069 static DEVICE_ATTR_RO(op_cap);
1070 
1071 static ssize_t gen_cap_show(struct device *dev,
1072 			    struct device_attribute *attr, char *buf)
1073 {
1074 	struct idxd_device *idxd = confdev_to_idxd(dev);
1075 
1076 	return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1077 }
1078 static DEVICE_ATTR_RO(gen_cap);
1079 
1080 static ssize_t configurable_show(struct device *dev,
1081 				 struct device_attribute *attr, char *buf)
1082 {
1083 	struct idxd_device *idxd = confdev_to_idxd(dev);
1084 
1085 	return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1086 }
1087 static DEVICE_ATTR_RO(configurable);
1088 
1089 static ssize_t clients_show(struct device *dev,
1090 			    struct device_attribute *attr, char *buf)
1091 {
1092 	struct idxd_device *idxd = confdev_to_idxd(dev);
1093 	unsigned long flags;
1094 	int count = 0, i;
1095 
1096 	spin_lock_irqsave(&idxd->dev_lock, flags);
1097 	for (i = 0; i < idxd->max_wqs; i++) {
1098 		struct idxd_wq *wq = idxd->wqs[i];
1099 
1100 		count += wq->client_count;
1101 	}
1102 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1103 
1104 	return sysfs_emit(buf, "%d\n", count);
1105 }
1106 static DEVICE_ATTR_RO(clients);
1107 
1108 static ssize_t pasid_enabled_show(struct device *dev,
1109 				  struct device_attribute *attr, char *buf)
1110 {
1111 	struct idxd_device *idxd = confdev_to_idxd(dev);
1112 
1113 	return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
1114 }
1115 static DEVICE_ATTR_RO(pasid_enabled);
1116 
1117 static ssize_t state_show(struct device *dev,
1118 			  struct device_attribute *attr, char *buf)
1119 {
1120 	struct idxd_device *idxd = confdev_to_idxd(dev);
1121 
1122 	switch (idxd->state) {
1123 	case IDXD_DEV_DISABLED:
1124 		return sysfs_emit(buf, "disabled\n");
1125 	case IDXD_DEV_ENABLED:
1126 		return sysfs_emit(buf, "enabled\n");
1127 	case IDXD_DEV_HALTED:
1128 		return sysfs_emit(buf, "halted\n");
1129 	}
1130 
1131 	return sysfs_emit(buf, "unknown\n");
1132 }
1133 static DEVICE_ATTR_RO(state);
1134 
1135 static ssize_t errors_show(struct device *dev,
1136 			   struct device_attribute *attr, char *buf)
1137 {
1138 	struct idxd_device *idxd = confdev_to_idxd(dev);
1139 	int i, out = 0;
1140 	unsigned long flags;
1141 
1142 	spin_lock_irqsave(&idxd->dev_lock, flags);
1143 	for (i = 0; i < 4; i++)
1144 		out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
1145 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1146 	out--;
1147 	out += sysfs_emit_at(buf, out, "\n");
1148 	return out;
1149 }
1150 static DEVICE_ATTR_RO(errors);
1151 
1152 static ssize_t max_tokens_show(struct device *dev,
1153 			       struct device_attribute *attr, char *buf)
1154 {
1155 	struct idxd_device *idxd = confdev_to_idxd(dev);
1156 
1157 	return sysfs_emit(buf, "%u\n", idxd->max_tokens);
1158 }
1159 static DEVICE_ATTR_RO(max_tokens);
1160 
1161 static ssize_t token_limit_show(struct device *dev,
1162 				struct device_attribute *attr, char *buf)
1163 {
1164 	struct idxd_device *idxd = confdev_to_idxd(dev);
1165 
1166 	return sysfs_emit(buf, "%u\n", idxd->token_limit);
1167 }
1168 
1169 static ssize_t token_limit_store(struct device *dev,
1170 				 struct device_attribute *attr,
1171 				 const char *buf, size_t count)
1172 {
1173 	struct idxd_device *idxd = confdev_to_idxd(dev);
1174 	unsigned long val;
1175 	int rc;
1176 
1177 	rc = kstrtoul(buf, 10, &val);
1178 	if (rc < 0)
1179 		return -EINVAL;
1180 
1181 	if (idxd->state == IDXD_DEV_ENABLED)
1182 		return -EPERM;
1183 
1184 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1185 		return -EPERM;
1186 
1187 	if (!idxd->hw.group_cap.token_limit)
1188 		return -EPERM;
1189 
1190 	if (val > idxd->hw.group_cap.total_tokens)
1191 		return -EINVAL;
1192 
1193 	idxd->token_limit = val;
1194 	return count;
1195 }
1196 static DEVICE_ATTR_RW(token_limit);
1197 
1198 static ssize_t cdev_major_show(struct device *dev,
1199 			       struct device_attribute *attr, char *buf)
1200 {
1201 	struct idxd_device *idxd = confdev_to_idxd(dev);
1202 
1203 	return sysfs_emit(buf, "%u\n", idxd->major);
1204 }
1205 static DEVICE_ATTR_RO(cdev_major);
1206 
1207 static ssize_t cmd_status_show(struct device *dev,
1208 			       struct device_attribute *attr, char *buf)
1209 {
1210 	struct idxd_device *idxd = confdev_to_idxd(dev);
1211 
1212 	return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
1213 }
1214 static DEVICE_ATTR_RO(cmd_status);
1215 
1216 static struct attribute *idxd_device_attributes[] = {
1217 	&dev_attr_version.attr,
1218 	&dev_attr_max_groups.attr,
1219 	&dev_attr_max_work_queues.attr,
1220 	&dev_attr_max_work_queues_size.attr,
1221 	&dev_attr_max_engines.attr,
1222 	&dev_attr_numa_node.attr,
1223 	&dev_attr_max_batch_size.attr,
1224 	&dev_attr_max_transfer_size.attr,
1225 	&dev_attr_op_cap.attr,
1226 	&dev_attr_gen_cap.attr,
1227 	&dev_attr_configurable.attr,
1228 	&dev_attr_clients.attr,
1229 	&dev_attr_pasid_enabled.attr,
1230 	&dev_attr_state.attr,
1231 	&dev_attr_errors.attr,
1232 	&dev_attr_max_tokens.attr,
1233 	&dev_attr_token_limit.attr,
1234 	&dev_attr_cdev_major.attr,
1235 	&dev_attr_cmd_status.attr,
1236 	NULL,
1237 };
1238 
1239 static const struct attribute_group idxd_device_attribute_group = {
1240 	.attrs = idxd_device_attributes,
1241 };
1242 
1243 static const struct attribute_group *idxd_attribute_groups[] = {
1244 	&idxd_device_attribute_group,
1245 	NULL,
1246 };
1247 
1248 static void idxd_conf_device_release(struct device *dev)
1249 {
1250 	struct idxd_device *idxd = confdev_to_idxd(dev);
1251 
1252 	kfree(idxd->groups);
1253 	kfree(idxd->wqs);
1254 	kfree(idxd->engines);
1255 	kfree(idxd->irq_entries);
1256 	kfree(idxd->int_handles);
1257 	ida_free(&idxd_ida, idxd->id);
1258 	kfree(idxd);
1259 }
1260 
1261 struct device_type dsa_device_type = {
1262 	.name = "dsa",
1263 	.release = idxd_conf_device_release,
1264 	.groups = idxd_attribute_groups,
1265 };
1266 
1267 struct device_type iax_device_type = {
1268 	.name = "iax",
1269 	.release = idxd_conf_device_release,
1270 	.groups = idxd_attribute_groups,
1271 };
1272 
1273 static int idxd_register_engine_devices(struct idxd_device *idxd)
1274 {
1275 	struct idxd_engine *engine;
1276 	int i, j, rc;
1277 
1278 	for (i = 0; i < idxd->max_engines; i++) {
1279 		engine = idxd->engines[i];
1280 		rc = device_add(engine_confdev(engine));
1281 		if (rc < 0)
1282 			goto cleanup;
1283 	}
1284 
1285 	return 0;
1286 
1287 cleanup:
1288 	j = i - 1;
1289 	for (; i < idxd->max_engines; i++) {
1290 		engine = idxd->engines[i];
1291 		put_device(engine_confdev(engine));
1292 	}
1293 
1294 	while (j--) {
1295 		engine = idxd->engines[j];
1296 		device_unregister(engine_confdev(engine));
1297 	}
1298 	return rc;
1299 }
1300 
1301 static int idxd_register_group_devices(struct idxd_device *idxd)
1302 {
1303 	struct idxd_group *group;
1304 	int i, j, rc;
1305 
1306 	for (i = 0; i < idxd->max_groups; i++) {
1307 		group = idxd->groups[i];
1308 		rc = device_add(group_confdev(group));
1309 		if (rc < 0)
1310 			goto cleanup;
1311 	}
1312 
1313 	return 0;
1314 
1315 cleanup:
1316 	j = i - 1;
1317 	for (; i < idxd->max_groups; i++) {
1318 		group = idxd->groups[i];
1319 		put_device(group_confdev(group));
1320 	}
1321 
1322 	while (j--) {
1323 		group = idxd->groups[j];
1324 		device_unregister(group_confdev(group));
1325 	}
1326 	return rc;
1327 }
1328 
1329 static int idxd_register_wq_devices(struct idxd_device *idxd)
1330 {
1331 	struct idxd_wq *wq;
1332 	int i, rc, j;
1333 
1334 	for (i = 0; i < idxd->max_wqs; i++) {
1335 		wq = idxd->wqs[i];
1336 		rc = device_add(wq_confdev(wq));
1337 		if (rc < 0)
1338 			goto cleanup;
1339 	}
1340 
1341 	return 0;
1342 
1343 cleanup:
1344 	j = i - 1;
1345 	for (; i < idxd->max_wqs; i++) {
1346 		wq = idxd->wqs[i];
1347 		put_device(wq_confdev(wq));
1348 	}
1349 
1350 	while (j--) {
1351 		wq = idxd->wqs[j];
1352 		device_unregister(wq_confdev(wq));
1353 	}
1354 	return rc;
1355 }
1356 
1357 int idxd_register_devices(struct idxd_device *idxd)
1358 {
1359 	struct device *dev = &idxd->pdev->dev;
1360 	int rc, i;
1361 
1362 	rc = device_add(idxd_confdev(idxd));
1363 	if (rc < 0)
1364 		return rc;
1365 
1366 	rc = idxd_register_wq_devices(idxd);
1367 	if (rc < 0) {
1368 		dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1369 		goto err_wq;
1370 	}
1371 
1372 	rc = idxd_register_engine_devices(idxd);
1373 	if (rc < 0) {
1374 		dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1375 		goto err_engine;
1376 	}
1377 
1378 	rc = idxd_register_group_devices(idxd);
1379 	if (rc < 0) {
1380 		dev_dbg(dev, "Group device registering failed: %d\n", rc);
1381 		goto err_group;
1382 	}
1383 
1384 	return 0;
1385 
1386  err_group:
1387 	for (i = 0; i < idxd->max_engines; i++)
1388 		device_unregister(engine_confdev(idxd->engines[i]));
1389  err_engine:
1390 	for (i = 0; i < idxd->max_wqs; i++)
1391 		device_unregister(wq_confdev(idxd->wqs[i]));
1392  err_wq:
1393 	device_del(idxd_confdev(idxd));
1394 	return rc;
1395 }
1396 
1397 void idxd_unregister_devices(struct idxd_device *idxd)
1398 {
1399 	int i;
1400 
1401 	for (i = 0; i < idxd->max_wqs; i++) {
1402 		struct idxd_wq *wq = idxd->wqs[i];
1403 
1404 		device_unregister(wq_confdev(wq));
1405 	}
1406 
1407 	for (i = 0; i < idxd->max_engines; i++) {
1408 		struct idxd_engine *engine = idxd->engines[i];
1409 
1410 		device_unregister(engine_confdev(engine));
1411 	}
1412 
1413 	for (i = 0; i < idxd->max_groups; i++) {
1414 		struct idxd_group *group = idxd->groups[i];
1415 
1416 		device_unregister(group_confdev(group));
1417 	}
1418 }
1419 
1420 int idxd_register_bus_type(void)
1421 {
1422 	return bus_register(&dsa_bus_type);
1423 }
1424 
1425 void idxd_unregister_bus_type(void)
1426 {
1427 	bus_unregister(&dsa_bus_type);
1428 }
1429