xref: /linux/drivers/dma/idxd/sysfs.c (revision 08f3e0873ac203449465c2b8473d684e2f9f41d1)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
11 #include "idxd.h"
12 
13 static char *idxd_wq_type_names[] = {
14 	[IDXD_WQT_NONE]		= "none",
15 	[IDXD_WQT_KERNEL]	= "kernel",
16 	[IDXD_WQT_USER]		= "user",
17 };
18 
19 /* IDXD engine attributes */
20 static ssize_t engine_group_id_show(struct device *dev,
21 				    struct device_attribute *attr, char *buf)
22 {
23 	struct idxd_engine *engine = confdev_to_engine(dev);
24 
25 	if (engine->group)
26 		return sysfs_emit(buf, "%d\n", engine->group->id);
27 	else
28 		return sysfs_emit(buf, "%d\n", -1);
29 }
30 
31 static ssize_t engine_group_id_store(struct device *dev,
32 				     struct device_attribute *attr,
33 				     const char *buf, size_t count)
34 {
35 	struct idxd_engine *engine = confdev_to_engine(dev);
36 	struct idxd_device *idxd = engine->idxd;
37 	long id;
38 	int rc;
39 	struct idxd_group *prevg;
40 
41 	rc = kstrtol(buf, 10, &id);
42 	if (rc < 0)
43 		return -EINVAL;
44 
45 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
46 		return -EPERM;
47 
48 	if (id > idxd->max_groups - 1 || id < -1)
49 		return -EINVAL;
50 
51 	if (id == -1) {
52 		if (engine->group) {
53 			engine->group->num_engines--;
54 			engine->group = NULL;
55 		}
56 		return count;
57 	}
58 
59 	prevg = engine->group;
60 
61 	if (prevg)
62 		prevg->num_engines--;
63 	engine->group = idxd->groups[id];
64 	engine->group->num_engines++;
65 
66 	return count;
67 }
68 
69 static struct device_attribute dev_attr_engine_group =
70 		__ATTR(group_id, 0644, engine_group_id_show,
71 		       engine_group_id_store);
72 
73 static struct attribute *idxd_engine_attributes[] = {
74 	&dev_attr_engine_group.attr,
75 	NULL,
76 };
77 
78 static const struct attribute_group idxd_engine_attribute_group = {
79 	.attrs = idxd_engine_attributes,
80 };
81 
82 static const struct attribute_group *idxd_engine_attribute_groups[] = {
83 	&idxd_engine_attribute_group,
84 	NULL,
85 };
86 
87 static void idxd_conf_engine_release(struct device *dev)
88 {
89 	struct idxd_engine *engine = confdev_to_engine(dev);
90 
91 	kfree(engine);
92 }
93 
94 struct device_type idxd_engine_device_type = {
95 	.name = "engine",
96 	.release = idxd_conf_engine_release,
97 	.groups = idxd_engine_attribute_groups,
98 };
99 
100 /* Group attributes */
101 
102 static void idxd_set_free_tokens(struct idxd_device *idxd)
103 {
104 	int i, tokens;
105 
106 	for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
107 		struct idxd_group *g = idxd->groups[i];
108 
109 		tokens += g->tokens_reserved;
110 	}
111 
112 	idxd->nr_tokens = idxd->max_tokens - tokens;
113 }
114 
115 static ssize_t group_tokens_reserved_show(struct device *dev,
116 					  struct device_attribute *attr,
117 					  char *buf)
118 {
119 	struct idxd_group *group = confdev_to_group(dev);
120 
121 	return sysfs_emit(buf, "%u\n", group->tokens_reserved);
122 }
123 
124 static ssize_t group_tokens_reserved_store(struct device *dev,
125 					   struct device_attribute *attr,
126 					   const char *buf, size_t count)
127 {
128 	struct idxd_group *group = confdev_to_group(dev);
129 	struct idxd_device *idxd = group->idxd;
130 	unsigned long val;
131 	int rc;
132 
133 	rc = kstrtoul(buf, 10, &val);
134 	if (rc < 0)
135 		return -EINVAL;
136 
137 	if (idxd->data->type == IDXD_TYPE_IAX)
138 		return -EOPNOTSUPP;
139 
140 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
141 		return -EPERM;
142 
143 	if (idxd->state == IDXD_DEV_ENABLED)
144 		return -EPERM;
145 
146 	if (val > idxd->max_tokens)
147 		return -EINVAL;
148 
149 	if (val > idxd->nr_tokens + group->tokens_reserved)
150 		return -EINVAL;
151 
152 	group->tokens_reserved = val;
153 	idxd_set_free_tokens(idxd);
154 	return count;
155 }
156 
157 static struct device_attribute dev_attr_group_tokens_reserved =
158 		__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
159 		       group_tokens_reserved_store);
160 
161 static ssize_t group_tokens_allowed_show(struct device *dev,
162 					 struct device_attribute *attr,
163 					 char *buf)
164 {
165 	struct idxd_group *group = confdev_to_group(dev);
166 
167 	return sysfs_emit(buf, "%u\n", group->tokens_allowed);
168 }
169 
170 static ssize_t group_tokens_allowed_store(struct device *dev,
171 					  struct device_attribute *attr,
172 					  const char *buf, size_t count)
173 {
174 	struct idxd_group *group = confdev_to_group(dev);
175 	struct idxd_device *idxd = group->idxd;
176 	unsigned long val;
177 	int rc;
178 
179 	rc = kstrtoul(buf, 10, &val);
180 	if (rc < 0)
181 		return -EINVAL;
182 
183 	if (idxd->data->type == IDXD_TYPE_IAX)
184 		return -EOPNOTSUPP;
185 
186 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
187 		return -EPERM;
188 
189 	if (idxd->state == IDXD_DEV_ENABLED)
190 		return -EPERM;
191 
192 	if (val < 4 * group->num_engines ||
193 	    val > group->tokens_reserved + idxd->nr_tokens)
194 		return -EINVAL;
195 
196 	group->tokens_allowed = val;
197 	return count;
198 }
199 
200 static struct device_attribute dev_attr_group_tokens_allowed =
201 		__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
202 		       group_tokens_allowed_store);
203 
204 static ssize_t group_use_token_limit_show(struct device *dev,
205 					  struct device_attribute *attr,
206 					  char *buf)
207 {
208 	struct idxd_group *group = confdev_to_group(dev);
209 
210 	return sysfs_emit(buf, "%u\n", group->use_token_limit);
211 }
212 
213 static ssize_t group_use_token_limit_store(struct device *dev,
214 					   struct device_attribute *attr,
215 					   const char *buf, size_t count)
216 {
217 	struct idxd_group *group = confdev_to_group(dev);
218 	struct idxd_device *idxd = group->idxd;
219 	unsigned long val;
220 	int rc;
221 
222 	rc = kstrtoul(buf, 10, &val);
223 	if (rc < 0)
224 		return -EINVAL;
225 
226 	if (idxd->data->type == IDXD_TYPE_IAX)
227 		return -EOPNOTSUPP;
228 
229 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
230 		return -EPERM;
231 
232 	if (idxd->state == IDXD_DEV_ENABLED)
233 		return -EPERM;
234 
235 	if (idxd->token_limit == 0)
236 		return -EPERM;
237 
238 	group->use_token_limit = !!val;
239 	return count;
240 }
241 
242 static struct device_attribute dev_attr_group_use_token_limit =
243 		__ATTR(use_token_limit, 0644, group_use_token_limit_show,
244 		       group_use_token_limit_store);
245 
246 static ssize_t group_engines_show(struct device *dev,
247 				  struct device_attribute *attr, char *buf)
248 {
249 	struct idxd_group *group = confdev_to_group(dev);
250 	int i, rc = 0;
251 	struct idxd_device *idxd = group->idxd;
252 
253 	for (i = 0; i < idxd->max_engines; i++) {
254 		struct idxd_engine *engine = idxd->engines[i];
255 
256 		if (!engine->group)
257 			continue;
258 
259 		if (engine->group->id == group->id)
260 			rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
261 	}
262 
263 	if (!rc)
264 		return 0;
265 	rc--;
266 	rc += sysfs_emit_at(buf, rc, "\n");
267 
268 	return rc;
269 }
270 
271 static struct device_attribute dev_attr_group_engines =
272 		__ATTR(engines, 0444, group_engines_show, NULL);
273 
274 static ssize_t group_work_queues_show(struct device *dev,
275 				      struct device_attribute *attr, char *buf)
276 {
277 	struct idxd_group *group = confdev_to_group(dev);
278 	int i, rc = 0;
279 	struct idxd_device *idxd = group->idxd;
280 
281 	for (i = 0; i < idxd->max_wqs; i++) {
282 		struct idxd_wq *wq = idxd->wqs[i];
283 
284 		if (!wq->group)
285 			continue;
286 
287 		if (wq->group->id == group->id)
288 			rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
289 	}
290 
291 	if (!rc)
292 		return 0;
293 	rc--;
294 	rc += sysfs_emit_at(buf, rc, "\n");
295 
296 	return rc;
297 }
298 
299 static struct device_attribute dev_attr_group_work_queues =
300 		__ATTR(work_queues, 0444, group_work_queues_show, NULL);
301 
302 static ssize_t group_traffic_class_a_show(struct device *dev,
303 					  struct device_attribute *attr,
304 					  char *buf)
305 {
306 	struct idxd_group *group = confdev_to_group(dev);
307 
308 	return sysfs_emit(buf, "%d\n", group->tc_a);
309 }
310 
311 static ssize_t group_traffic_class_a_store(struct device *dev,
312 					   struct device_attribute *attr,
313 					   const char *buf, size_t count)
314 {
315 	struct idxd_group *group = confdev_to_group(dev);
316 	struct idxd_device *idxd = group->idxd;
317 	long val;
318 	int rc;
319 
320 	rc = kstrtol(buf, 10, &val);
321 	if (rc < 0)
322 		return -EINVAL;
323 
324 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
325 		return -EPERM;
326 
327 	if (idxd->state == IDXD_DEV_ENABLED)
328 		return -EPERM;
329 
330 	if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
331 		return -EPERM;
332 
333 	if (val < 0 || val > 7)
334 		return -EINVAL;
335 
336 	group->tc_a = val;
337 	return count;
338 }
339 
340 static struct device_attribute dev_attr_group_traffic_class_a =
341 		__ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
342 		       group_traffic_class_a_store);
343 
344 static ssize_t group_traffic_class_b_show(struct device *dev,
345 					  struct device_attribute *attr,
346 					  char *buf)
347 {
348 	struct idxd_group *group = confdev_to_group(dev);
349 
350 	return sysfs_emit(buf, "%d\n", group->tc_b);
351 }
352 
353 static ssize_t group_traffic_class_b_store(struct device *dev,
354 					   struct device_attribute *attr,
355 					   const char *buf, size_t count)
356 {
357 	struct idxd_group *group = confdev_to_group(dev);
358 	struct idxd_device *idxd = group->idxd;
359 	long val;
360 	int rc;
361 
362 	rc = kstrtol(buf, 10, &val);
363 	if (rc < 0)
364 		return -EINVAL;
365 
366 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
367 		return -EPERM;
368 
369 	if (idxd->state == IDXD_DEV_ENABLED)
370 		return -EPERM;
371 
372 	if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
373 		return -EPERM;
374 
375 	if (val < 0 || val > 7)
376 		return -EINVAL;
377 
378 	group->tc_b = val;
379 	return count;
380 }
381 
382 static struct device_attribute dev_attr_group_traffic_class_b =
383 		__ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
384 		       group_traffic_class_b_store);
385 
386 static struct attribute *idxd_group_attributes[] = {
387 	&dev_attr_group_work_queues.attr,
388 	&dev_attr_group_engines.attr,
389 	&dev_attr_group_use_token_limit.attr,
390 	&dev_attr_group_tokens_allowed.attr,
391 	&dev_attr_group_tokens_reserved.attr,
392 	&dev_attr_group_traffic_class_a.attr,
393 	&dev_attr_group_traffic_class_b.attr,
394 	NULL,
395 };
396 
397 static const struct attribute_group idxd_group_attribute_group = {
398 	.attrs = idxd_group_attributes,
399 };
400 
401 static const struct attribute_group *idxd_group_attribute_groups[] = {
402 	&idxd_group_attribute_group,
403 	NULL,
404 };
405 
406 static void idxd_conf_group_release(struct device *dev)
407 {
408 	struct idxd_group *group = confdev_to_group(dev);
409 
410 	kfree(group);
411 }
412 
413 struct device_type idxd_group_device_type = {
414 	.name = "group",
415 	.release = idxd_conf_group_release,
416 	.groups = idxd_group_attribute_groups,
417 };
418 
419 /* IDXD work queue attribs */
420 static ssize_t wq_clients_show(struct device *dev,
421 			       struct device_attribute *attr, char *buf)
422 {
423 	struct idxd_wq *wq = confdev_to_wq(dev);
424 
425 	return sysfs_emit(buf, "%d\n", wq->client_count);
426 }
427 
428 static struct device_attribute dev_attr_wq_clients =
429 		__ATTR(clients, 0444, wq_clients_show, NULL);
430 
431 static ssize_t wq_state_show(struct device *dev,
432 			     struct device_attribute *attr, char *buf)
433 {
434 	struct idxd_wq *wq = confdev_to_wq(dev);
435 
436 	switch (wq->state) {
437 	case IDXD_WQ_DISABLED:
438 		return sysfs_emit(buf, "disabled\n");
439 	case IDXD_WQ_ENABLED:
440 		return sysfs_emit(buf, "enabled\n");
441 	}
442 
443 	return sysfs_emit(buf, "unknown\n");
444 }
445 
446 static struct device_attribute dev_attr_wq_state =
447 		__ATTR(state, 0444, wq_state_show, NULL);
448 
449 static ssize_t wq_group_id_show(struct device *dev,
450 				struct device_attribute *attr, char *buf)
451 {
452 	struct idxd_wq *wq = confdev_to_wq(dev);
453 
454 	if (wq->group)
455 		return sysfs_emit(buf, "%u\n", wq->group->id);
456 	else
457 		return sysfs_emit(buf, "-1\n");
458 }
459 
460 static ssize_t wq_group_id_store(struct device *dev,
461 				 struct device_attribute *attr,
462 				 const char *buf, size_t count)
463 {
464 	struct idxd_wq *wq = confdev_to_wq(dev);
465 	struct idxd_device *idxd = wq->idxd;
466 	long id;
467 	int rc;
468 	struct idxd_group *prevg, *group;
469 
470 	rc = kstrtol(buf, 10, &id);
471 	if (rc < 0)
472 		return -EINVAL;
473 
474 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
475 		return -EPERM;
476 
477 	if (wq->state != IDXD_WQ_DISABLED)
478 		return -EPERM;
479 
480 	if (id > idxd->max_groups - 1 || id < -1)
481 		return -EINVAL;
482 
483 	if (id == -1) {
484 		if (wq->group) {
485 			wq->group->num_wqs--;
486 			wq->group = NULL;
487 		}
488 		return count;
489 	}
490 
491 	group = idxd->groups[id];
492 	prevg = wq->group;
493 
494 	if (prevg)
495 		prevg->num_wqs--;
496 	wq->group = group;
497 	group->num_wqs++;
498 	return count;
499 }
500 
501 static struct device_attribute dev_attr_wq_group_id =
502 		__ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
503 
504 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
505 			    char *buf)
506 {
507 	struct idxd_wq *wq = confdev_to_wq(dev);
508 
509 	return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
510 }
511 
512 static ssize_t wq_mode_store(struct device *dev,
513 			     struct device_attribute *attr, const char *buf,
514 			     size_t count)
515 {
516 	struct idxd_wq *wq = confdev_to_wq(dev);
517 	struct idxd_device *idxd = wq->idxd;
518 
519 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
520 		return -EPERM;
521 
522 	if (wq->state != IDXD_WQ_DISABLED)
523 		return -EPERM;
524 
525 	if (sysfs_streq(buf, "dedicated")) {
526 		set_bit(WQ_FLAG_DEDICATED, &wq->flags);
527 		wq->threshold = 0;
528 	} else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
529 		clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
530 	} else {
531 		return -EINVAL;
532 	}
533 
534 	return count;
535 }
536 
537 static struct device_attribute dev_attr_wq_mode =
538 		__ATTR(mode, 0644, wq_mode_show, wq_mode_store);
539 
540 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
541 			    char *buf)
542 {
543 	struct idxd_wq *wq = confdev_to_wq(dev);
544 
545 	return sysfs_emit(buf, "%u\n", wq->size);
546 }
547 
548 static int total_claimed_wq_size(struct idxd_device *idxd)
549 {
550 	int i;
551 	int wq_size = 0;
552 
553 	for (i = 0; i < idxd->max_wqs; i++) {
554 		struct idxd_wq *wq = idxd->wqs[i];
555 
556 		wq_size += wq->size;
557 	}
558 
559 	return wq_size;
560 }
561 
562 static ssize_t wq_size_store(struct device *dev,
563 			     struct device_attribute *attr, const char *buf,
564 			     size_t count)
565 {
566 	struct idxd_wq *wq = confdev_to_wq(dev);
567 	unsigned long size;
568 	struct idxd_device *idxd = wq->idxd;
569 	int rc;
570 
571 	rc = kstrtoul(buf, 10, &size);
572 	if (rc < 0)
573 		return -EINVAL;
574 
575 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
576 		return -EPERM;
577 
578 	if (idxd->state == IDXD_DEV_ENABLED)
579 		return -EPERM;
580 
581 	if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
582 		return -EINVAL;
583 
584 	wq->size = size;
585 	return count;
586 }
587 
588 static struct device_attribute dev_attr_wq_size =
589 		__ATTR(size, 0644, wq_size_show, wq_size_store);
590 
591 static ssize_t wq_priority_show(struct device *dev,
592 				struct device_attribute *attr, char *buf)
593 {
594 	struct idxd_wq *wq = confdev_to_wq(dev);
595 
596 	return sysfs_emit(buf, "%u\n", wq->priority);
597 }
598 
599 static ssize_t wq_priority_store(struct device *dev,
600 				 struct device_attribute *attr,
601 				 const char *buf, size_t count)
602 {
603 	struct idxd_wq *wq = confdev_to_wq(dev);
604 	unsigned long prio;
605 	struct idxd_device *idxd = wq->idxd;
606 	int rc;
607 
608 	rc = kstrtoul(buf, 10, &prio);
609 	if (rc < 0)
610 		return -EINVAL;
611 
612 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
613 		return -EPERM;
614 
615 	if (wq->state != IDXD_WQ_DISABLED)
616 		return -EPERM;
617 
618 	if (prio > IDXD_MAX_PRIORITY)
619 		return -EINVAL;
620 
621 	wq->priority = prio;
622 	return count;
623 }
624 
625 static struct device_attribute dev_attr_wq_priority =
626 		__ATTR(priority, 0644, wq_priority_show, wq_priority_store);
627 
628 static ssize_t wq_block_on_fault_show(struct device *dev,
629 				      struct device_attribute *attr, char *buf)
630 {
631 	struct idxd_wq *wq = confdev_to_wq(dev);
632 
633 	return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
634 }
635 
636 static ssize_t wq_block_on_fault_store(struct device *dev,
637 				       struct device_attribute *attr,
638 				       const char *buf, size_t count)
639 {
640 	struct idxd_wq *wq = confdev_to_wq(dev);
641 	struct idxd_device *idxd = wq->idxd;
642 	bool bof;
643 	int rc;
644 
645 	if (!idxd->hw.gen_cap.block_on_fault)
646 		return -EOPNOTSUPP;
647 
648 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
649 		return -EPERM;
650 
651 	if (wq->state != IDXD_WQ_DISABLED)
652 		return -ENXIO;
653 
654 	rc = kstrtobool(buf, &bof);
655 	if (rc < 0)
656 		return rc;
657 
658 	if (bof)
659 		set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
660 	else
661 		clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
662 
663 	return count;
664 }
665 
666 static struct device_attribute dev_attr_wq_block_on_fault =
667 		__ATTR(block_on_fault, 0644, wq_block_on_fault_show,
668 		       wq_block_on_fault_store);
669 
670 static ssize_t wq_threshold_show(struct device *dev,
671 				 struct device_attribute *attr, char *buf)
672 {
673 	struct idxd_wq *wq = confdev_to_wq(dev);
674 
675 	return sysfs_emit(buf, "%u\n", wq->threshold);
676 }
677 
678 static ssize_t wq_threshold_store(struct device *dev,
679 				  struct device_attribute *attr,
680 				  const char *buf, size_t count)
681 {
682 	struct idxd_wq *wq = confdev_to_wq(dev);
683 	struct idxd_device *idxd = wq->idxd;
684 	unsigned int val;
685 	int rc;
686 
687 	rc = kstrtouint(buf, 0, &val);
688 	if (rc < 0)
689 		return -EINVAL;
690 
691 	if (val > wq->size || val <= 0)
692 		return -EINVAL;
693 
694 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
695 		return -EPERM;
696 
697 	if (wq->state != IDXD_WQ_DISABLED)
698 		return -ENXIO;
699 
700 	if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
701 		return -EINVAL;
702 
703 	wq->threshold = val;
704 
705 	return count;
706 }
707 
708 static struct device_attribute dev_attr_wq_threshold =
709 		__ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
710 
711 static ssize_t wq_type_show(struct device *dev,
712 			    struct device_attribute *attr, char *buf)
713 {
714 	struct idxd_wq *wq = confdev_to_wq(dev);
715 
716 	switch (wq->type) {
717 	case IDXD_WQT_KERNEL:
718 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
719 	case IDXD_WQT_USER:
720 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
721 	case IDXD_WQT_NONE:
722 	default:
723 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
724 	}
725 
726 	return -EINVAL;
727 }
728 
729 static ssize_t wq_type_store(struct device *dev,
730 			     struct device_attribute *attr, const char *buf,
731 			     size_t count)
732 {
733 	struct idxd_wq *wq = confdev_to_wq(dev);
734 	enum idxd_wq_type old_type;
735 
736 	if (wq->state != IDXD_WQ_DISABLED)
737 		return -EPERM;
738 
739 	old_type = wq->type;
740 	if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
741 		wq->type = IDXD_WQT_NONE;
742 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
743 		wq->type = IDXD_WQT_KERNEL;
744 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
745 		wq->type = IDXD_WQT_USER;
746 	else
747 		return -EINVAL;
748 
749 	/* If we are changing queue type, clear the name */
750 	if (wq->type != old_type)
751 		memset(wq->name, 0, WQ_NAME_SIZE + 1);
752 
753 	return count;
754 }
755 
756 static struct device_attribute dev_attr_wq_type =
757 		__ATTR(type, 0644, wq_type_show, wq_type_store);
758 
759 static ssize_t wq_name_show(struct device *dev,
760 			    struct device_attribute *attr, char *buf)
761 {
762 	struct idxd_wq *wq = confdev_to_wq(dev);
763 
764 	return sysfs_emit(buf, "%s\n", wq->name);
765 }
766 
767 static ssize_t wq_name_store(struct device *dev,
768 			     struct device_attribute *attr, const char *buf,
769 			     size_t count)
770 {
771 	struct idxd_wq *wq = confdev_to_wq(dev);
772 
773 	if (wq->state != IDXD_WQ_DISABLED)
774 		return -EPERM;
775 
776 	if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
777 		return -EINVAL;
778 
779 	/*
780 	 * This is temporarily placed here until we have SVM support for
781 	 * dmaengine.
782 	 */
783 	if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
784 		return -EOPNOTSUPP;
785 
786 	memset(wq->name, 0, WQ_NAME_SIZE + 1);
787 	strncpy(wq->name, buf, WQ_NAME_SIZE);
788 	strreplace(wq->name, '\n', '\0');
789 	return count;
790 }
791 
792 static struct device_attribute dev_attr_wq_name =
793 		__ATTR(name, 0644, wq_name_show, wq_name_store);
794 
795 static ssize_t wq_cdev_minor_show(struct device *dev,
796 				  struct device_attribute *attr, char *buf)
797 {
798 	struct idxd_wq *wq = confdev_to_wq(dev);
799 	int minor = -1;
800 
801 	mutex_lock(&wq->wq_lock);
802 	if (wq->idxd_cdev)
803 		minor = wq->idxd_cdev->minor;
804 	mutex_unlock(&wq->wq_lock);
805 
806 	if (minor == -1)
807 		return -ENXIO;
808 	return sysfs_emit(buf, "%d\n", minor);
809 }
810 
811 static struct device_attribute dev_attr_wq_cdev_minor =
812 		__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
813 
814 static int __get_sysfs_u64(const char *buf, u64 *val)
815 {
816 	int rc;
817 
818 	rc = kstrtou64(buf, 0, val);
819 	if (rc < 0)
820 		return -EINVAL;
821 
822 	if (*val == 0)
823 		return -EINVAL;
824 
825 	*val = roundup_pow_of_two(*val);
826 	return 0;
827 }
828 
829 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
830 					 char *buf)
831 {
832 	struct idxd_wq *wq = confdev_to_wq(dev);
833 
834 	return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
835 }
836 
837 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
838 					  const char *buf, size_t count)
839 {
840 	struct idxd_wq *wq = confdev_to_wq(dev);
841 	struct idxd_device *idxd = wq->idxd;
842 	u64 xfer_size;
843 	int rc;
844 
845 	if (wq->state != IDXD_WQ_DISABLED)
846 		return -EPERM;
847 
848 	rc = __get_sysfs_u64(buf, &xfer_size);
849 	if (rc < 0)
850 		return rc;
851 
852 	if (xfer_size > idxd->max_xfer_bytes)
853 		return -EINVAL;
854 
855 	wq->max_xfer_bytes = xfer_size;
856 
857 	return count;
858 }
859 
860 static struct device_attribute dev_attr_wq_max_transfer_size =
861 		__ATTR(max_transfer_size, 0644,
862 		       wq_max_transfer_size_show, wq_max_transfer_size_store);
863 
864 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
865 {
866 	struct idxd_wq *wq = confdev_to_wq(dev);
867 
868 	return sysfs_emit(buf, "%u\n", wq->max_batch_size);
869 }
870 
871 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
872 				       const char *buf, size_t count)
873 {
874 	struct idxd_wq *wq = confdev_to_wq(dev);
875 	struct idxd_device *idxd = wq->idxd;
876 	u64 batch_size;
877 	int rc;
878 
879 	if (wq->state != IDXD_WQ_DISABLED)
880 		return -EPERM;
881 
882 	rc = __get_sysfs_u64(buf, &batch_size);
883 	if (rc < 0)
884 		return rc;
885 
886 	if (batch_size > idxd->max_batch_size)
887 		return -EINVAL;
888 
889 	wq->max_batch_size = (u32)batch_size;
890 
891 	return count;
892 }
893 
894 static struct device_attribute dev_attr_wq_max_batch_size =
895 		__ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
896 
897 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
898 {
899 	struct idxd_wq *wq = confdev_to_wq(dev);
900 
901 	return sysfs_emit(buf, "%u\n", wq->ats_dis);
902 }
903 
904 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
905 				    const char *buf, size_t count)
906 {
907 	struct idxd_wq *wq = confdev_to_wq(dev);
908 	struct idxd_device *idxd = wq->idxd;
909 	bool ats_dis;
910 	int rc;
911 
912 	if (wq->state != IDXD_WQ_DISABLED)
913 		return -EPERM;
914 
915 	if (!idxd->hw.wq_cap.wq_ats_support)
916 		return -EOPNOTSUPP;
917 
918 	rc = kstrtobool(buf, &ats_dis);
919 	if (rc < 0)
920 		return rc;
921 
922 	wq->ats_dis = ats_dis;
923 
924 	return count;
925 }
926 
927 static struct device_attribute dev_attr_wq_ats_disable =
928 		__ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
929 
930 static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf)
931 {
932 	struct idxd_wq *wq = confdev_to_wq(dev);
933 	struct idxd_device *idxd = wq->idxd;
934 	u32 occup, offset;
935 
936 	if (!idxd->hw.wq_cap.occupancy)
937 		return -EOPNOTSUPP;
938 
939 	offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX);
940 	occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK;
941 
942 	return sysfs_emit(buf, "%u\n", occup);
943 }
944 
945 static struct device_attribute dev_attr_wq_occupancy =
946 		__ATTR(occupancy, 0444, wq_occupancy_show, NULL);
947 
948 static struct attribute *idxd_wq_attributes[] = {
949 	&dev_attr_wq_clients.attr,
950 	&dev_attr_wq_state.attr,
951 	&dev_attr_wq_group_id.attr,
952 	&dev_attr_wq_mode.attr,
953 	&dev_attr_wq_size.attr,
954 	&dev_attr_wq_priority.attr,
955 	&dev_attr_wq_block_on_fault.attr,
956 	&dev_attr_wq_threshold.attr,
957 	&dev_attr_wq_type.attr,
958 	&dev_attr_wq_name.attr,
959 	&dev_attr_wq_cdev_minor.attr,
960 	&dev_attr_wq_max_transfer_size.attr,
961 	&dev_attr_wq_max_batch_size.attr,
962 	&dev_attr_wq_ats_disable.attr,
963 	&dev_attr_wq_occupancy.attr,
964 	NULL,
965 };
966 
967 static const struct attribute_group idxd_wq_attribute_group = {
968 	.attrs = idxd_wq_attributes,
969 };
970 
971 static const struct attribute_group *idxd_wq_attribute_groups[] = {
972 	&idxd_wq_attribute_group,
973 	NULL,
974 };
975 
976 static void idxd_conf_wq_release(struct device *dev)
977 {
978 	struct idxd_wq *wq = confdev_to_wq(dev);
979 
980 	kfree(wq->wqcfg);
981 	kfree(wq);
982 }
983 
984 struct device_type idxd_wq_device_type = {
985 	.name = "wq",
986 	.release = idxd_conf_wq_release,
987 	.groups = idxd_wq_attribute_groups,
988 };
989 
990 /* IDXD device attribs */
991 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
992 			    char *buf)
993 {
994 	struct idxd_device *idxd = confdev_to_idxd(dev);
995 
996 	return sysfs_emit(buf, "%#x\n", idxd->hw.version);
997 }
998 static DEVICE_ATTR_RO(version);
999 
1000 static ssize_t max_work_queues_size_show(struct device *dev,
1001 					 struct device_attribute *attr,
1002 					 char *buf)
1003 {
1004 	struct idxd_device *idxd = confdev_to_idxd(dev);
1005 
1006 	return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
1007 }
1008 static DEVICE_ATTR_RO(max_work_queues_size);
1009 
1010 static ssize_t max_groups_show(struct device *dev,
1011 			       struct device_attribute *attr, char *buf)
1012 {
1013 	struct idxd_device *idxd = confdev_to_idxd(dev);
1014 
1015 	return sysfs_emit(buf, "%u\n", idxd->max_groups);
1016 }
1017 static DEVICE_ATTR_RO(max_groups);
1018 
1019 static ssize_t max_work_queues_show(struct device *dev,
1020 				    struct device_attribute *attr, char *buf)
1021 {
1022 	struct idxd_device *idxd = confdev_to_idxd(dev);
1023 
1024 	return sysfs_emit(buf, "%u\n", idxd->max_wqs);
1025 }
1026 static DEVICE_ATTR_RO(max_work_queues);
1027 
1028 static ssize_t max_engines_show(struct device *dev,
1029 				struct device_attribute *attr, char *buf)
1030 {
1031 	struct idxd_device *idxd = confdev_to_idxd(dev);
1032 
1033 	return sysfs_emit(buf, "%u\n", idxd->max_engines);
1034 }
1035 static DEVICE_ATTR_RO(max_engines);
1036 
1037 static ssize_t numa_node_show(struct device *dev,
1038 			      struct device_attribute *attr, char *buf)
1039 {
1040 	struct idxd_device *idxd = confdev_to_idxd(dev);
1041 
1042 	return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1043 }
1044 static DEVICE_ATTR_RO(numa_node);
1045 
1046 static ssize_t max_batch_size_show(struct device *dev,
1047 				   struct device_attribute *attr, char *buf)
1048 {
1049 	struct idxd_device *idxd = confdev_to_idxd(dev);
1050 
1051 	return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
1052 }
1053 static DEVICE_ATTR_RO(max_batch_size);
1054 
1055 static ssize_t max_transfer_size_show(struct device *dev,
1056 				      struct device_attribute *attr,
1057 				      char *buf)
1058 {
1059 	struct idxd_device *idxd = confdev_to_idxd(dev);
1060 
1061 	return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
1062 }
1063 static DEVICE_ATTR_RO(max_transfer_size);
1064 
1065 static ssize_t op_cap_show(struct device *dev,
1066 			   struct device_attribute *attr, char *buf)
1067 {
1068 	struct idxd_device *idxd = confdev_to_idxd(dev);
1069 	int i, rc = 0;
1070 
1071 	for (i = 0; i < 4; i++)
1072 		rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1073 
1074 	rc--;
1075 	rc += sysfs_emit_at(buf, rc, "\n");
1076 	return rc;
1077 }
1078 static DEVICE_ATTR_RO(op_cap);
1079 
1080 static ssize_t gen_cap_show(struct device *dev,
1081 			    struct device_attribute *attr, char *buf)
1082 {
1083 	struct idxd_device *idxd = confdev_to_idxd(dev);
1084 
1085 	return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1086 }
1087 static DEVICE_ATTR_RO(gen_cap);
1088 
1089 static ssize_t configurable_show(struct device *dev,
1090 				 struct device_attribute *attr, char *buf)
1091 {
1092 	struct idxd_device *idxd = confdev_to_idxd(dev);
1093 
1094 	return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1095 }
1096 static DEVICE_ATTR_RO(configurable);
1097 
1098 static ssize_t clients_show(struct device *dev,
1099 			    struct device_attribute *attr, char *buf)
1100 {
1101 	struct idxd_device *idxd = confdev_to_idxd(dev);
1102 	int count = 0, i;
1103 
1104 	spin_lock(&idxd->dev_lock);
1105 	for (i = 0; i < idxd->max_wqs; i++) {
1106 		struct idxd_wq *wq = idxd->wqs[i];
1107 
1108 		count += wq->client_count;
1109 	}
1110 	spin_unlock(&idxd->dev_lock);
1111 
1112 	return sysfs_emit(buf, "%d\n", count);
1113 }
1114 static DEVICE_ATTR_RO(clients);
1115 
1116 static ssize_t pasid_enabled_show(struct device *dev,
1117 				  struct device_attribute *attr, char *buf)
1118 {
1119 	struct idxd_device *idxd = confdev_to_idxd(dev);
1120 
1121 	return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
1122 }
1123 static DEVICE_ATTR_RO(pasid_enabled);
1124 
1125 static ssize_t state_show(struct device *dev,
1126 			  struct device_attribute *attr, char *buf)
1127 {
1128 	struct idxd_device *idxd = confdev_to_idxd(dev);
1129 
1130 	switch (idxd->state) {
1131 	case IDXD_DEV_DISABLED:
1132 		return sysfs_emit(buf, "disabled\n");
1133 	case IDXD_DEV_ENABLED:
1134 		return sysfs_emit(buf, "enabled\n");
1135 	case IDXD_DEV_HALTED:
1136 		return sysfs_emit(buf, "halted\n");
1137 	}
1138 
1139 	return sysfs_emit(buf, "unknown\n");
1140 }
1141 static DEVICE_ATTR_RO(state);
1142 
1143 static ssize_t errors_show(struct device *dev,
1144 			   struct device_attribute *attr, char *buf)
1145 {
1146 	struct idxd_device *idxd = confdev_to_idxd(dev);
1147 	int i, out = 0;
1148 
1149 	spin_lock(&idxd->dev_lock);
1150 	for (i = 0; i < 4; i++)
1151 		out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
1152 	spin_unlock(&idxd->dev_lock);
1153 	out--;
1154 	out += sysfs_emit_at(buf, out, "\n");
1155 	return out;
1156 }
1157 static DEVICE_ATTR_RO(errors);
1158 
1159 static ssize_t max_tokens_show(struct device *dev,
1160 			       struct device_attribute *attr, char *buf)
1161 {
1162 	struct idxd_device *idxd = confdev_to_idxd(dev);
1163 
1164 	return sysfs_emit(buf, "%u\n", idxd->max_tokens);
1165 }
1166 static DEVICE_ATTR_RO(max_tokens);
1167 
1168 static ssize_t token_limit_show(struct device *dev,
1169 				struct device_attribute *attr, char *buf)
1170 {
1171 	struct idxd_device *idxd = confdev_to_idxd(dev);
1172 
1173 	return sysfs_emit(buf, "%u\n", idxd->token_limit);
1174 }
1175 
1176 static ssize_t token_limit_store(struct device *dev,
1177 				 struct device_attribute *attr,
1178 				 const char *buf, size_t count)
1179 {
1180 	struct idxd_device *idxd = confdev_to_idxd(dev);
1181 	unsigned long val;
1182 	int rc;
1183 
1184 	rc = kstrtoul(buf, 10, &val);
1185 	if (rc < 0)
1186 		return -EINVAL;
1187 
1188 	if (idxd->state == IDXD_DEV_ENABLED)
1189 		return -EPERM;
1190 
1191 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1192 		return -EPERM;
1193 
1194 	if (!idxd->hw.group_cap.token_limit)
1195 		return -EPERM;
1196 
1197 	if (val > idxd->hw.group_cap.total_tokens)
1198 		return -EINVAL;
1199 
1200 	idxd->token_limit = val;
1201 	return count;
1202 }
1203 static DEVICE_ATTR_RW(token_limit);
1204 
1205 static ssize_t cdev_major_show(struct device *dev,
1206 			       struct device_attribute *attr, char *buf)
1207 {
1208 	struct idxd_device *idxd = confdev_to_idxd(dev);
1209 
1210 	return sysfs_emit(buf, "%u\n", idxd->major);
1211 }
1212 static DEVICE_ATTR_RO(cdev_major);
1213 
1214 static ssize_t cmd_status_show(struct device *dev,
1215 			       struct device_attribute *attr, char *buf)
1216 {
1217 	struct idxd_device *idxd = confdev_to_idxd(dev);
1218 
1219 	return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
1220 }
1221 
1222 static ssize_t cmd_status_store(struct device *dev, struct device_attribute *attr,
1223 				const char *buf, size_t count)
1224 {
1225 	struct idxd_device *idxd = confdev_to_idxd(dev);
1226 
1227 	idxd->cmd_status = 0;
1228 	return count;
1229 }
1230 static DEVICE_ATTR_RW(cmd_status);
1231 
1232 static struct attribute *idxd_device_attributes[] = {
1233 	&dev_attr_version.attr,
1234 	&dev_attr_max_groups.attr,
1235 	&dev_attr_max_work_queues.attr,
1236 	&dev_attr_max_work_queues_size.attr,
1237 	&dev_attr_max_engines.attr,
1238 	&dev_attr_numa_node.attr,
1239 	&dev_attr_max_batch_size.attr,
1240 	&dev_attr_max_transfer_size.attr,
1241 	&dev_attr_op_cap.attr,
1242 	&dev_attr_gen_cap.attr,
1243 	&dev_attr_configurable.attr,
1244 	&dev_attr_clients.attr,
1245 	&dev_attr_pasid_enabled.attr,
1246 	&dev_attr_state.attr,
1247 	&dev_attr_errors.attr,
1248 	&dev_attr_max_tokens.attr,
1249 	&dev_attr_token_limit.attr,
1250 	&dev_attr_cdev_major.attr,
1251 	&dev_attr_cmd_status.attr,
1252 	NULL,
1253 };
1254 
1255 static const struct attribute_group idxd_device_attribute_group = {
1256 	.attrs = idxd_device_attributes,
1257 };
1258 
1259 static const struct attribute_group *idxd_attribute_groups[] = {
1260 	&idxd_device_attribute_group,
1261 	NULL,
1262 };
1263 
1264 static void idxd_conf_device_release(struct device *dev)
1265 {
1266 	struct idxd_device *idxd = confdev_to_idxd(dev);
1267 
1268 	kfree(idxd->groups);
1269 	kfree(idxd->wqs);
1270 	kfree(idxd->engines);
1271 	kfree(idxd->irq_entries);
1272 	kfree(idxd->int_handles);
1273 	ida_free(&idxd_ida, idxd->id);
1274 	kfree(idxd);
1275 }
1276 
1277 struct device_type dsa_device_type = {
1278 	.name = "dsa",
1279 	.release = idxd_conf_device_release,
1280 	.groups = idxd_attribute_groups,
1281 };
1282 
1283 struct device_type iax_device_type = {
1284 	.name = "iax",
1285 	.release = idxd_conf_device_release,
1286 	.groups = idxd_attribute_groups,
1287 };
1288 
1289 static int idxd_register_engine_devices(struct idxd_device *idxd)
1290 {
1291 	struct idxd_engine *engine;
1292 	int i, j, rc;
1293 
1294 	for (i = 0; i < idxd->max_engines; i++) {
1295 		engine = idxd->engines[i];
1296 		rc = device_add(engine_confdev(engine));
1297 		if (rc < 0)
1298 			goto cleanup;
1299 	}
1300 
1301 	return 0;
1302 
1303 cleanup:
1304 	j = i - 1;
1305 	for (; i < idxd->max_engines; i++) {
1306 		engine = idxd->engines[i];
1307 		put_device(engine_confdev(engine));
1308 	}
1309 
1310 	while (j--) {
1311 		engine = idxd->engines[j];
1312 		device_unregister(engine_confdev(engine));
1313 	}
1314 	return rc;
1315 }
1316 
1317 static int idxd_register_group_devices(struct idxd_device *idxd)
1318 {
1319 	struct idxd_group *group;
1320 	int i, j, rc;
1321 
1322 	for (i = 0; i < idxd->max_groups; i++) {
1323 		group = idxd->groups[i];
1324 		rc = device_add(group_confdev(group));
1325 		if (rc < 0)
1326 			goto cleanup;
1327 	}
1328 
1329 	return 0;
1330 
1331 cleanup:
1332 	j = i - 1;
1333 	for (; i < idxd->max_groups; i++) {
1334 		group = idxd->groups[i];
1335 		put_device(group_confdev(group));
1336 	}
1337 
1338 	while (j--) {
1339 		group = idxd->groups[j];
1340 		device_unregister(group_confdev(group));
1341 	}
1342 	return rc;
1343 }
1344 
1345 static int idxd_register_wq_devices(struct idxd_device *idxd)
1346 {
1347 	struct idxd_wq *wq;
1348 	int i, rc, j;
1349 
1350 	for (i = 0; i < idxd->max_wqs; i++) {
1351 		wq = idxd->wqs[i];
1352 		rc = device_add(wq_confdev(wq));
1353 		if (rc < 0)
1354 			goto cleanup;
1355 	}
1356 
1357 	return 0;
1358 
1359 cleanup:
1360 	j = i - 1;
1361 	for (; i < idxd->max_wqs; i++) {
1362 		wq = idxd->wqs[i];
1363 		put_device(wq_confdev(wq));
1364 	}
1365 
1366 	while (j--) {
1367 		wq = idxd->wqs[j];
1368 		device_unregister(wq_confdev(wq));
1369 	}
1370 	return rc;
1371 }
1372 
1373 int idxd_register_devices(struct idxd_device *idxd)
1374 {
1375 	struct device *dev = &idxd->pdev->dev;
1376 	int rc, i;
1377 
1378 	rc = device_add(idxd_confdev(idxd));
1379 	if (rc < 0)
1380 		return rc;
1381 
1382 	rc = idxd_register_wq_devices(idxd);
1383 	if (rc < 0) {
1384 		dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1385 		goto err_wq;
1386 	}
1387 
1388 	rc = idxd_register_engine_devices(idxd);
1389 	if (rc < 0) {
1390 		dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1391 		goto err_engine;
1392 	}
1393 
1394 	rc = idxd_register_group_devices(idxd);
1395 	if (rc < 0) {
1396 		dev_dbg(dev, "Group device registering failed: %d\n", rc);
1397 		goto err_group;
1398 	}
1399 
1400 	return 0;
1401 
1402  err_group:
1403 	for (i = 0; i < idxd->max_engines; i++)
1404 		device_unregister(engine_confdev(idxd->engines[i]));
1405  err_engine:
1406 	for (i = 0; i < idxd->max_wqs; i++)
1407 		device_unregister(wq_confdev(idxd->wqs[i]));
1408  err_wq:
1409 	device_del(idxd_confdev(idxd));
1410 	return rc;
1411 }
1412 
1413 void idxd_unregister_devices(struct idxd_device *idxd)
1414 {
1415 	int i;
1416 
1417 	for (i = 0; i < idxd->max_wqs; i++) {
1418 		struct idxd_wq *wq = idxd->wqs[i];
1419 
1420 		device_unregister(wq_confdev(wq));
1421 	}
1422 
1423 	for (i = 0; i < idxd->max_engines; i++) {
1424 		struct idxd_engine *engine = idxd->engines[i];
1425 
1426 		device_unregister(engine_confdev(engine));
1427 	}
1428 
1429 	for (i = 0; i < idxd->max_groups; i++) {
1430 		struct idxd_group *group = idxd->groups[i];
1431 
1432 		device_unregister(group_confdev(group));
1433 	}
1434 }
1435 
1436 int idxd_register_bus_type(void)
1437 {
1438 	return bus_register(&dsa_bus_type);
1439 }
1440 
1441 void idxd_unregister_bus_type(void)
1442 {
1443 	bus_unregister(&dsa_bus_type);
1444 }
1445