xref: /linux/drivers/dma/idxd/sysfs.c (revision 47c16ac27d4cb664cee53ee0b9b7e2f907923fb3)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
11 #include "idxd.h"
12 
13 static char *idxd_wq_type_names[] = {
14 	[IDXD_WQT_NONE]		= "none",
15 	[IDXD_WQT_KERNEL]	= "kernel",
16 	[IDXD_WQT_USER]		= "user",
17 };
18 
19 static void idxd_conf_sub_device_release(struct device *dev)
20 {
21 	dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
22 }
23 
24 static struct device_type idxd_group_device_type = {
25 	.name = "group",
26 	.release = idxd_conf_sub_device_release,
27 };
28 
29 static struct device_type idxd_wq_device_type = {
30 	.name = "wq",
31 	.release = idxd_conf_sub_device_release,
32 };
33 
34 static struct device_type idxd_engine_device_type = {
35 	.name = "engine",
36 	.release = idxd_conf_sub_device_release,
37 };
38 
39 static inline bool is_idxd_wq_dev(struct device *dev)
40 {
41 	return dev ? dev->type == &idxd_wq_device_type : false;
42 }
43 
44 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
45 {
46 	if (wq->type == IDXD_WQT_KERNEL &&
47 	    strcmp(wq->name, "dmaengine") == 0)
48 		return true;
49 	return false;
50 }
51 
52 static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
53 {
54 	return wq->type == IDXD_WQT_USER;
55 }
56 
57 static int idxd_config_bus_match(struct device *dev,
58 				 struct device_driver *drv)
59 {
60 	int matched = 0;
61 
62 	if (is_idxd_dev(dev)) {
63 		struct idxd_device *idxd = confdev_to_idxd(dev);
64 
65 		if (idxd->state != IDXD_DEV_CONF_READY)
66 			return 0;
67 		matched = 1;
68 	} else if (is_idxd_wq_dev(dev)) {
69 		struct idxd_wq *wq = confdev_to_wq(dev);
70 		struct idxd_device *idxd = wq->idxd;
71 
72 		if (idxd->state < IDXD_DEV_CONF_READY)
73 			return 0;
74 
75 		if (wq->state != IDXD_WQ_DISABLED) {
76 			dev_dbg(dev, "%s not disabled\n", dev_name(dev));
77 			return 0;
78 		}
79 		matched = 1;
80 	}
81 
82 	if (matched)
83 		dev_dbg(dev, "%s matched\n", dev_name(dev));
84 
85 	return matched;
86 }
87 
88 static int idxd_config_bus_probe(struct device *dev)
89 {
90 	int rc;
91 	unsigned long flags;
92 
93 	dev_dbg(dev, "%s called\n", __func__);
94 
95 	if (is_idxd_dev(dev)) {
96 		struct idxd_device *idxd = confdev_to_idxd(dev);
97 
98 		if (idxd->state != IDXD_DEV_CONF_READY) {
99 			dev_warn(dev, "Device not ready for config\n");
100 			return -EBUSY;
101 		}
102 
103 		if (!try_module_get(THIS_MODULE))
104 			return -ENXIO;
105 
106 		/* Perform IDXD configuration and enabling */
107 		spin_lock_irqsave(&idxd->dev_lock, flags);
108 		rc = idxd_device_config(idxd);
109 		spin_unlock_irqrestore(&idxd->dev_lock, flags);
110 		if (rc < 0) {
111 			module_put(THIS_MODULE);
112 			dev_warn(dev, "Device config failed: %d\n", rc);
113 			return rc;
114 		}
115 
116 		/* start device */
117 		rc = idxd_device_enable(idxd);
118 		if (rc < 0) {
119 			module_put(THIS_MODULE);
120 			dev_warn(dev, "Device enable failed: %d\n", rc);
121 			return rc;
122 		}
123 
124 		dev_info(dev, "Device %s enabled\n", dev_name(dev));
125 
126 		rc = idxd_register_dma_device(idxd);
127 		if (rc < 0) {
128 			module_put(THIS_MODULE);
129 			dev_dbg(dev, "Failed to register dmaengine device\n");
130 			return rc;
131 		}
132 		return 0;
133 	} else if (is_idxd_wq_dev(dev)) {
134 		struct idxd_wq *wq = confdev_to_wq(dev);
135 		struct idxd_device *idxd = wq->idxd;
136 
137 		mutex_lock(&wq->wq_lock);
138 
139 		if (idxd->state != IDXD_DEV_ENABLED) {
140 			mutex_unlock(&wq->wq_lock);
141 			dev_warn(dev, "Enabling while device not enabled.\n");
142 			return -EPERM;
143 		}
144 
145 		if (wq->state != IDXD_WQ_DISABLED) {
146 			mutex_unlock(&wq->wq_lock);
147 			dev_warn(dev, "WQ %d already enabled.\n", wq->id);
148 			return -EBUSY;
149 		}
150 
151 		if (!wq->group) {
152 			mutex_unlock(&wq->wq_lock);
153 			dev_warn(dev, "WQ not attached to group.\n");
154 			return -EINVAL;
155 		}
156 
157 		if (strlen(wq->name) == 0) {
158 			mutex_unlock(&wq->wq_lock);
159 			dev_warn(dev, "WQ name not set.\n");
160 			return -EINVAL;
161 		}
162 
163 		/* Shared WQ checks */
164 		if (wq_shared(wq)) {
165 			if (!device_swq_supported(idxd)) {
166 				dev_warn(dev,
167 					 "PASID not enabled and shared WQ.\n");
168 				mutex_unlock(&wq->wq_lock);
169 				return -ENXIO;
170 			}
171 			/*
172 			 * Shared wq with the threshold set to 0 means the user
173 			 * did not set the threshold or transitioned from a
174 			 * dedicated wq but did not set threshold. A value
175 			 * of 0 would effectively disable the shared wq. The
176 			 * driver does not allow a value of 0 to be set for
177 			 * threshold via sysfs.
178 			 */
179 			if (wq->threshold == 0) {
180 				dev_warn(dev,
181 					 "Shared WQ and threshold 0.\n");
182 				mutex_unlock(&wq->wq_lock);
183 				return -EINVAL;
184 			}
185 		}
186 
187 		rc = idxd_wq_alloc_resources(wq);
188 		if (rc < 0) {
189 			mutex_unlock(&wq->wq_lock);
190 			dev_warn(dev, "WQ resource alloc failed\n");
191 			return rc;
192 		}
193 
194 		spin_lock_irqsave(&idxd->dev_lock, flags);
195 		rc = idxd_device_config(idxd);
196 		spin_unlock_irqrestore(&idxd->dev_lock, flags);
197 		if (rc < 0) {
198 			mutex_unlock(&wq->wq_lock);
199 			dev_warn(dev, "Writing WQ %d config failed: %d\n",
200 				 wq->id, rc);
201 			return rc;
202 		}
203 
204 		rc = idxd_wq_enable(wq);
205 		if (rc < 0) {
206 			mutex_unlock(&wq->wq_lock);
207 			dev_warn(dev, "WQ %d enabling failed: %d\n",
208 				 wq->id, rc);
209 			return rc;
210 		}
211 
212 		rc = idxd_wq_map_portal(wq);
213 		if (rc < 0) {
214 			dev_warn(dev, "wq portal mapping failed: %d\n", rc);
215 			rc = idxd_wq_disable(wq);
216 			if (rc < 0)
217 				dev_warn(dev, "IDXD wq disable failed\n");
218 			mutex_unlock(&wq->wq_lock);
219 			return rc;
220 		}
221 
222 		wq->client_count = 0;
223 
224 		dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
225 
226 		if (is_idxd_wq_dmaengine(wq)) {
227 			rc = idxd_register_dma_channel(wq);
228 			if (rc < 0) {
229 				dev_dbg(dev, "DMA channel register failed\n");
230 				mutex_unlock(&wq->wq_lock);
231 				return rc;
232 			}
233 		} else if (is_idxd_wq_cdev(wq)) {
234 			rc = idxd_wq_add_cdev(wq);
235 			if (rc < 0) {
236 				dev_dbg(dev, "Cdev creation failed\n");
237 				mutex_unlock(&wq->wq_lock);
238 				return rc;
239 			}
240 		}
241 
242 		mutex_unlock(&wq->wq_lock);
243 		return 0;
244 	}
245 
246 	return -ENODEV;
247 }
248 
249 static void disable_wq(struct idxd_wq *wq)
250 {
251 	struct idxd_device *idxd = wq->idxd;
252 	struct device *dev = &idxd->pdev->dev;
253 
254 	mutex_lock(&wq->wq_lock);
255 	dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
256 	if (wq->state == IDXD_WQ_DISABLED) {
257 		mutex_unlock(&wq->wq_lock);
258 		return;
259 	}
260 
261 	if (is_idxd_wq_dmaengine(wq))
262 		idxd_unregister_dma_channel(wq);
263 	else if (is_idxd_wq_cdev(wq))
264 		idxd_wq_del_cdev(wq);
265 
266 	if (idxd_wq_refcount(wq))
267 		dev_warn(dev, "Clients has claim on wq %d: %d\n",
268 			 wq->id, idxd_wq_refcount(wq));
269 
270 	idxd_wq_unmap_portal(wq);
271 
272 	idxd_wq_drain(wq);
273 	idxd_wq_reset(wq);
274 
275 	idxd_wq_free_resources(wq);
276 	wq->client_count = 0;
277 	mutex_unlock(&wq->wq_lock);
278 
279 	dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
280 }
281 
282 static int idxd_config_bus_remove(struct device *dev)
283 {
284 	int rc;
285 
286 	dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
287 
288 	/* disable workqueue here */
289 	if (is_idxd_wq_dev(dev)) {
290 		struct idxd_wq *wq = confdev_to_wq(dev);
291 
292 		disable_wq(wq);
293 	} else if (is_idxd_dev(dev)) {
294 		struct idxd_device *idxd = confdev_to_idxd(dev);
295 		int i;
296 
297 		dev_dbg(dev, "%s removing dev %s\n", __func__,
298 			dev_name(&idxd->conf_dev));
299 		for (i = 0; i < idxd->max_wqs; i++) {
300 			struct idxd_wq *wq = &idxd->wqs[i];
301 
302 			if (wq->state == IDXD_WQ_DISABLED)
303 				continue;
304 			dev_warn(dev, "Active wq %d on disable %s.\n", i,
305 				 dev_name(&idxd->conf_dev));
306 			device_release_driver(&wq->conf_dev);
307 		}
308 
309 		idxd_unregister_dma_device(idxd);
310 		rc = idxd_device_disable(idxd);
311 		for (i = 0; i < idxd->max_wqs; i++) {
312 			struct idxd_wq *wq = &idxd->wqs[i];
313 
314 			mutex_lock(&wq->wq_lock);
315 			idxd_wq_disable_cleanup(wq);
316 			mutex_unlock(&wq->wq_lock);
317 		}
318 		module_put(THIS_MODULE);
319 		if (rc < 0)
320 			dev_warn(dev, "Device disable failed\n");
321 		else
322 			dev_info(dev, "Device %s disabled\n", dev_name(dev));
323 
324 	}
325 
326 	return 0;
327 }
328 
329 static void idxd_config_bus_shutdown(struct device *dev)
330 {
331 	dev_dbg(dev, "%s called\n", __func__);
332 }
333 
334 struct bus_type dsa_bus_type = {
335 	.name = "dsa",
336 	.match = idxd_config_bus_match,
337 	.probe = idxd_config_bus_probe,
338 	.remove = idxd_config_bus_remove,
339 	.shutdown = idxd_config_bus_shutdown,
340 };
341 
342 struct bus_type iax_bus_type = {
343 	.name = "iax",
344 	.match = idxd_config_bus_match,
345 	.probe = idxd_config_bus_probe,
346 	.remove = idxd_config_bus_remove,
347 	.shutdown = idxd_config_bus_shutdown,
348 };
349 
350 static struct bus_type *idxd_bus_types[] = {
351 	&dsa_bus_type,
352 	&iax_bus_type
353 };
354 
355 static struct idxd_device_driver dsa_drv = {
356 	.drv = {
357 		.name = "dsa",
358 		.bus = &dsa_bus_type,
359 		.owner = THIS_MODULE,
360 		.mod_name = KBUILD_MODNAME,
361 	},
362 };
363 
364 static struct idxd_device_driver iax_drv = {
365 	.drv = {
366 		.name = "iax",
367 		.bus = &iax_bus_type,
368 		.owner = THIS_MODULE,
369 		.mod_name = KBUILD_MODNAME,
370 	},
371 };
372 
373 static struct idxd_device_driver *idxd_drvs[] = {
374 	&dsa_drv,
375 	&iax_drv
376 };
377 
378 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
379 {
380 	return idxd_bus_types[idxd->type];
381 }
382 
383 struct device_type *idxd_get_device_type(struct idxd_device *idxd)
384 {
385 	if (idxd->type == IDXD_TYPE_DSA)
386 		return &dsa_device_type;
387 	else if (idxd->type == IDXD_TYPE_IAX)
388 		return &iax_device_type;
389 	else
390 		return NULL;
391 }
392 
393 /* IDXD generic driver setup */
394 int idxd_register_driver(void)
395 {
396 	int i, rc;
397 
398 	for (i = 0; i < IDXD_TYPE_MAX; i++) {
399 		rc = driver_register(&idxd_drvs[i]->drv);
400 		if (rc < 0)
401 			goto drv_fail;
402 	}
403 
404 	return 0;
405 
406 drv_fail:
407 	while (--i >= 0)
408 		driver_unregister(&idxd_drvs[i]->drv);
409 	return rc;
410 }
411 
412 void idxd_unregister_driver(void)
413 {
414 	int i;
415 
416 	for (i = 0; i < IDXD_TYPE_MAX; i++)
417 		driver_unregister(&idxd_drvs[i]->drv);
418 }
419 
420 /* IDXD engine attributes */
421 static ssize_t engine_group_id_show(struct device *dev,
422 				    struct device_attribute *attr, char *buf)
423 {
424 	struct idxd_engine *engine =
425 		container_of(dev, struct idxd_engine, conf_dev);
426 
427 	if (engine->group)
428 		return sprintf(buf, "%d\n", engine->group->id);
429 	else
430 		return sprintf(buf, "%d\n", -1);
431 }
432 
433 static ssize_t engine_group_id_store(struct device *dev,
434 				     struct device_attribute *attr,
435 				     const char *buf, size_t count)
436 {
437 	struct idxd_engine *engine =
438 		container_of(dev, struct idxd_engine, conf_dev);
439 	struct idxd_device *idxd = engine->idxd;
440 	long id;
441 	int rc;
442 	struct idxd_group *prevg;
443 
444 	rc = kstrtol(buf, 10, &id);
445 	if (rc < 0)
446 		return -EINVAL;
447 
448 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
449 		return -EPERM;
450 
451 	if (id > idxd->max_groups - 1 || id < -1)
452 		return -EINVAL;
453 
454 	if (id == -1) {
455 		if (engine->group) {
456 			engine->group->num_engines--;
457 			engine->group = NULL;
458 		}
459 		return count;
460 	}
461 
462 	prevg = engine->group;
463 
464 	if (prevg)
465 		prevg->num_engines--;
466 	engine->group = &idxd->groups[id];
467 	engine->group->num_engines++;
468 
469 	return count;
470 }
471 
472 static struct device_attribute dev_attr_engine_group =
473 		__ATTR(group_id, 0644, engine_group_id_show,
474 		       engine_group_id_store);
475 
476 static struct attribute *idxd_engine_attributes[] = {
477 	&dev_attr_engine_group.attr,
478 	NULL,
479 };
480 
481 static const struct attribute_group idxd_engine_attribute_group = {
482 	.attrs = idxd_engine_attributes,
483 };
484 
485 static const struct attribute_group *idxd_engine_attribute_groups[] = {
486 	&idxd_engine_attribute_group,
487 	NULL,
488 };
489 
490 /* Group attributes */
491 
492 static void idxd_set_free_tokens(struct idxd_device *idxd)
493 {
494 	int i, tokens;
495 
496 	for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
497 		struct idxd_group *g = &idxd->groups[i];
498 
499 		tokens += g->tokens_reserved;
500 	}
501 
502 	idxd->nr_tokens = idxd->max_tokens - tokens;
503 }
504 
505 static ssize_t group_tokens_reserved_show(struct device *dev,
506 					  struct device_attribute *attr,
507 					  char *buf)
508 {
509 	struct idxd_group *group =
510 		container_of(dev, struct idxd_group, conf_dev);
511 
512 	return sprintf(buf, "%u\n", group->tokens_reserved);
513 }
514 
515 static ssize_t group_tokens_reserved_store(struct device *dev,
516 					   struct device_attribute *attr,
517 					   const char *buf, size_t count)
518 {
519 	struct idxd_group *group =
520 		container_of(dev, struct idxd_group, conf_dev);
521 	struct idxd_device *idxd = group->idxd;
522 	unsigned long val;
523 	int rc;
524 
525 	rc = kstrtoul(buf, 10, &val);
526 	if (rc < 0)
527 		return -EINVAL;
528 
529 	if (idxd->type == IDXD_TYPE_IAX)
530 		return -EOPNOTSUPP;
531 
532 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
533 		return -EPERM;
534 
535 	if (idxd->state == IDXD_DEV_ENABLED)
536 		return -EPERM;
537 
538 	if (val > idxd->max_tokens)
539 		return -EINVAL;
540 
541 	if (val > idxd->nr_tokens + group->tokens_reserved)
542 		return -EINVAL;
543 
544 	group->tokens_reserved = val;
545 	idxd_set_free_tokens(idxd);
546 	return count;
547 }
548 
549 static struct device_attribute dev_attr_group_tokens_reserved =
550 		__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
551 		       group_tokens_reserved_store);
552 
553 static ssize_t group_tokens_allowed_show(struct device *dev,
554 					 struct device_attribute *attr,
555 					 char *buf)
556 {
557 	struct idxd_group *group =
558 		container_of(dev, struct idxd_group, conf_dev);
559 
560 	return sprintf(buf, "%u\n", group->tokens_allowed);
561 }
562 
563 static ssize_t group_tokens_allowed_store(struct device *dev,
564 					  struct device_attribute *attr,
565 					  const char *buf, size_t count)
566 {
567 	struct idxd_group *group =
568 		container_of(dev, struct idxd_group, conf_dev);
569 	struct idxd_device *idxd = group->idxd;
570 	unsigned long val;
571 	int rc;
572 
573 	rc = kstrtoul(buf, 10, &val);
574 	if (rc < 0)
575 		return -EINVAL;
576 
577 	if (idxd->type == IDXD_TYPE_IAX)
578 		return -EOPNOTSUPP;
579 
580 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
581 		return -EPERM;
582 
583 	if (idxd->state == IDXD_DEV_ENABLED)
584 		return -EPERM;
585 
586 	if (val < 4 * group->num_engines ||
587 	    val > group->tokens_reserved + idxd->nr_tokens)
588 		return -EINVAL;
589 
590 	group->tokens_allowed = val;
591 	return count;
592 }
593 
594 static struct device_attribute dev_attr_group_tokens_allowed =
595 		__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
596 		       group_tokens_allowed_store);
597 
598 static ssize_t group_use_token_limit_show(struct device *dev,
599 					  struct device_attribute *attr,
600 					  char *buf)
601 {
602 	struct idxd_group *group =
603 		container_of(dev, struct idxd_group, conf_dev);
604 
605 	return sprintf(buf, "%u\n", group->use_token_limit);
606 }
607 
608 static ssize_t group_use_token_limit_store(struct device *dev,
609 					   struct device_attribute *attr,
610 					   const char *buf, size_t count)
611 {
612 	struct idxd_group *group =
613 		container_of(dev, struct idxd_group, conf_dev);
614 	struct idxd_device *idxd = group->idxd;
615 	unsigned long val;
616 	int rc;
617 
618 	rc = kstrtoul(buf, 10, &val);
619 	if (rc < 0)
620 		return -EINVAL;
621 
622 	if (idxd->type == IDXD_TYPE_IAX)
623 		return -EOPNOTSUPP;
624 
625 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
626 		return -EPERM;
627 
628 	if (idxd->state == IDXD_DEV_ENABLED)
629 		return -EPERM;
630 
631 	if (idxd->token_limit == 0)
632 		return -EPERM;
633 
634 	group->use_token_limit = !!val;
635 	return count;
636 }
637 
638 static struct device_attribute dev_attr_group_use_token_limit =
639 		__ATTR(use_token_limit, 0644, group_use_token_limit_show,
640 		       group_use_token_limit_store);
641 
642 static ssize_t group_engines_show(struct device *dev,
643 				  struct device_attribute *attr, char *buf)
644 {
645 	struct idxd_group *group =
646 		container_of(dev, struct idxd_group, conf_dev);
647 	int i, rc = 0;
648 	char *tmp = buf;
649 	struct idxd_device *idxd = group->idxd;
650 
651 	for (i = 0; i < idxd->max_engines; i++) {
652 		struct idxd_engine *engine = &idxd->engines[i];
653 
654 		if (!engine->group)
655 			continue;
656 
657 		if (engine->group->id == group->id)
658 			rc += sprintf(tmp + rc, "engine%d.%d ",
659 					idxd->id, engine->id);
660 	}
661 
662 	rc--;
663 	rc += sprintf(tmp + rc, "\n");
664 
665 	return rc;
666 }
667 
668 static struct device_attribute dev_attr_group_engines =
669 		__ATTR(engines, 0444, group_engines_show, NULL);
670 
671 static ssize_t group_work_queues_show(struct device *dev,
672 				      struct device_attribute *attr, char *buf)
673 {
674 	struct idxd_group *group =
675 		container_of(dev, struct idxd_group, conf_dev);
676 	int i, rc = 0;
677 	char *tmp = buf;
678 	struct idxd_device *idxd = group->idxd;
679 
680 	for (i = 0; i < idxd->max_wqs; i++) {
681 		struct idxd_wq *wq = &idxd->wqs[i];
682 
683 		if (!wq->group)
684 			continue;
685 
686 		if (wq->group->id == group->id)
687 			rc += sprintf(tmp + rc, "wq%d.%d ",
688 					idxd->id, wq->id);
689 	}
690 
691 	rc--;
692 	rc += sprintf(tmp + rc, "\n");
693 
694 	return rc;
695 }
696 
697 static struct device_attribute dev_attr_group_work_queues =
698 		__ATTR(work_queues, 0444, group_work_queues_show, NULL);
699 
700 static ssize_t group_traffic_class_a_show(struct device *dev,
701 					  struct device_attribute *attr,
702 					  char *buf)
703 {
704 	struct idxd_group *group =
705 		container_of(dev, struct idxd_group, conf_dev);
706 
707 	return sprintf(buf, "%d\n", group->tc_a);
708 }
709 
710 static ssize_t group_traffic_class_a_store(struct device *dev,
711 					   struct device_attribute *attr,
712 					   const char *buf, size_t count)
713 {
714 	struct idxd_group *group =
715 		container_of(dev, struct idxd_group, conf_dev);
716 	struct idxd_device *idxd = group->idxd;
717 	long val;
718 	int rc;
719 
720 	rc = kstrtol(buf, 10, &val);
721 	if (rc < 0)
722 		return -EINVAL;
723 
724 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
725 		return -EPERM;
726 
727 	if (idxd->state == IDXD_DEV_ENABLED)
728 		return -EPERM;
729 
730 	if (val < 0 || val > 7)
731 		return -EINVAL;
732 
733 	group->tc_a = val;
734 	return count;
735 }
736 
737 static struct device_attribute dev_attr_group_traffic_class_a =
738 		__ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
739 		       group_traffic_class_a_store);
740 
741 static ssize_t group_traffic_class_b_show(struct device *dev,
742 					  struct device_attribute *attr,
743 					  char *buf)
744 {
745 	struct idxd_group *group =
746 		container_of(dev, struct idxd_group, conf_dev);
747 
748 	return sprintf(buf, "%d\n", group->tc_b);
749 }
750 
751 static ssize_t group_traffic_class_b_store(struct device *dev,
752 					   struct device_attribute *attr,
753 					   const char *buf, size_t count)
754 {
755 	struct idxd_group *group =
756 		container_of(dev, struct idxd_group, conf_dev);
757 	struct idxd_device *idxd = group->idxd;
758 	long val;
759 	int rc;
760 
761 	rc = kstrtol(buf, 10, &val);
762 	if (rc < 0)
763 		return -EINVAL;
764 
765 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
766 		return -EPERM;
767 
768 	if (idxd->state == IDXD_DEV_ENABLED)
769 		return -EPERM;
770 
771 	if (val < 0 || val > 7)
772 		return -EINVAL;
773 
774 	group->tc_b = val;
775 	return count;
776 }
777 
778 static struct device_attribute dev_attr_group_traffic_class_b =
779 		__ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
780 		       group_traffic_class_b_store);
781 
782 static struct attribute *idxd_group_attributes[] = {
783 	&dev_attr_group_work_queues.attr,
784 	&dev_attr_group_engines.attr,
785 	&dev_attr_group_use_token_limit.attr,
786 	&dev_attr_group_tokens_allowed.attr,
787 	&dev_attr_group_tokens_reserved.attr,
788 	&dev_attr_group_traffic_class_a.attr,
789 	&dev_attr_group_traffic_class_b.attr,
790 	NULL,
791 };
792 
793 static const struct attribute_group idxd_group_attribute_group = {
794 	.attrs = idxd_group_attributes,
795 };
796 
797 static const struct attribute_group *idxd_group_attribute_groups[] = {
798 	&idxd_group_attribute_group,
799 	NULL,
800 };
801 
802 /* IDXD work queue attribs */
803 static ssize_t wq_clients_show(struct device *dev,
804 			       struct device_attribute *attr, char *buf)
805 {
806 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
807 
808 	return sprintf(buf, "%d\n", wq->client_count);
809 }
810 
811 static struct device_attribute dev_attr_wq_clients =
812 		__ATTR(clients, 0444, wq_clients_show, NULL);
813 
814 static ssize_t wq_state_show(struct device *dev,
815 			     struct device_attribute *attr, char *buf)
816 {
817 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
818 
819 	switch (wq->state) {
820 	case IDXD_WQ_DISABLED:
821 		return sprintf(buf, "disabled\n");
822 	case IDXD_WQ_ENABLED:
823 		return sprintf(buf, "enabled\n");
824 	}
825 
826 	return sprintf(buf, "unknown\n");
827 }
828 
829 static struct device_attribute dev_attr_wq_state =
830 		__ATTR(state, 0444, wq_state_show, NULL);
831 
832 static ssize_t wq_group_id_show(struct device *dev,
833 				struct device_attribute *attr, char *buf)
834 {
835 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
836 
837 	if (wq->group)
838 		return sprintf(buf, "%u\n", wq->group->id);
839 	else
840 		return sprintf(buf, "-1\n");
841 }
842 
843 static ssize_t wq_group_id_store(struct device *dev,
844 				 struct device_attribute *attr,
845 				 const char *buf, size_t count)
846 {
847 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
848 	struct idxd_device *idxd = wq->idxd;
849 	long id;
850 	int rc;
851 	struct idxd_group *prevg, *group;
852 
853 	rc = kstrtol(buf, 10, &id);
854 	if (rc < 0)
855 		return -EINVAL;
856 
857 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
858 		return -EPERM;
859 
860 	if (wq->state != IDXD_WQ_DISABLED)
861 		return -EPERM;
862 
863 	if (id > idxd->max_groups - 1 || id < -1)
864 		return -EINVAL;
865 
866 	if (id == -1) {
867 		if (wq->group) {
868 			wq->group->num_wqs--;
869 			wq->group = NULL;
870 		}
871 		return count;
872 	}
873 
874 	group = &idxd->groups[id];
875 	prevg = wq->group;
876 
877 	if (prevg)
878 		prevg->num_wqs--;
879 	wq->group = group;
880 	group->num_wqs++;
881 	return count;
882 }
883 
884 static struct device_attribute dev_attr_wq_group_id =
885 		__ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
886 
887 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
888 			    char *buf)
889 {
890 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
891 
892 	return sprintf(buf, "%s\n",
893 			wq_dedicated(wq) ? "dedicated" : "shared");
894 }
895 
896 static ssize_t wq_mode_store(struct device *dev,
897 			     struct device_attribute *attr, const char *buf,
898 			     size_t count)
899 {
900 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
901 	struct idxd_device *idxd = wq->idxd;
902 
903 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
904 		return -EPERM;
905 
906 	if (wq->state != IDXD_WQ_DISABLED)
907 		return -EPERM;
908 
909 	if (sysfs_streq(buf, "dedicated")) {
910 		set_bit(WQ_FLAG_DEDICATED, &wq->flags);
911 		wq->threshold = 0;
912 	} else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
913 		clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
914 	} else {
915 		return -EINVAL;
916 	}
917 
918 	return count;
919 }
920 
921 static struct device_attribute dev_attr_wq_mode =
922 		__ATTR(mode, 0644, wq_mode_show, wq_mode_store);
923 
924 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
925 			    char *buf)
926 {
927 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
928 
929 	return sprintf(buf, "%u\n", wq->size);
930 }
931 
932 static int total_claimed_wq_size(struct idxd_device *idxd)
933 {
934 	int i;
935 	int wq_size = 0;
936 
937 	for (i = 0; i < idxd->max_wqs; i++) {
938 		struct idxd_wq *wq = &idxd->wqs[i];
939 
940 		wq_size += wq->size;
941 	}
942 
943 	return wq_size;
944 }
945 
946 static ssize_t wq_size_store(struct device *dev,
947 			     struct device_attribute *attr, const char *buf,
948 			     size_t count)
949 {
950 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
951 	unsigned long size;
952 	struct idxd_device *idxd = wq->idxd;
953 	int rc;
954 
955 	rc = kstrtoul(buf, 10, &size);
956 	if (rc < 0)
957 		return -EINVAL;
958 
959 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
960 		return -EPERM;
961 
962 	if (idxd->state == IDXD_DEV_ENABLED)
963 		return -EPERM;
964 
965 	if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
966 		return -EINVAL;
967 
968 	wq->size = size;
969 	return count;
970 }
971 
972 static struct device_attribute dev_attr_wq_size =
973 		__ATTR(size, 0644, wq_size_show, wq_size_store);
974 
975 static ssize_t wq_priority_show(struct device *dev,
976 				struct device_attribute *attr, char *buf)
977 {
978 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
979 
980 	return sprintf(buf, "%u\n", wq->priority);
981 }
982 
983 static ssize_t wq_priority_store(struct device *dev,
984 				 struct device_attribute *attr,
985 				 const char *buf, size_t count)
986 {
987 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
988 	unsigned long prio;
989 	struct idxd_device *idxd = wq->idxd;
990 	int rc;
991 
992 	rc = kstrtoul(buf, 10, &prio);
993 	if (rc < 0)
994 		return -EINVAL;
995 
996 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
997 		return -EPERM;
998 
999 	if (wq->state != IDXD_WQ_DISABLED)
1000 		return -EPERM;
1001 
1002 	if (prio > IDXD_MAX_PRIORITY)
1003 		return -EINVAL;
1004 
1005 	wq->priority = prio;
1006 	return count;
1007 }
1008 
1009 static struct device_attribute dev_attr_wq_priority =
1010 		__ATTR(priority, 0644, wq_priority_show, wq_priority_store);
1011 
1012 static ssize_t wq_block_on_fault_show(struct device *dev,
1013 				      struct device_attribute *attr, char *buf)
1014 {
1015 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1016 
1017 	return sprintf(buf, "%u\n",
1018 		       test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
1019 }
1020 
1021 static ssize_t wq_block_on_fault_store(struct device *dev,
1022 				       struct device_attribute *attr,
1023 				       const char *buf, size_t count)
1024 {
1025 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1026 	struct idxd_device *idxd = wq->idxd;
1027 	bool bof;
1028 	int rc;
1029 
1030 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1031 		return -EPERM;
1032 
1033 	if (wq->state != IDXD_WQ_DISABLED)
1034 		return -ENXIO;
1035 
1036 	rc = kstrtobool(buf, &bof);
1037 	if (rc < 0)
1038 		return rc;
1039 
1040 	if (bof)
1041 		set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
1042 	else
1043 		clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
1044 
1045 	return count;
1046 }
1047 
1048 static struct device_attribute dev_attr_wq_block_on_fault =
1049 		__ATTR(block_on_fault, 0644, wq_block_on_fault_show,
1050 		       wq_block_on_fault_store);
1051 
1052 static ssize_t wq_threshold_show(struct device *dev,
1053 				 struct device_attribute *attr, char *buf)
1054 {
1055 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1056 
1057 	return sprintf(buf, "%u\n", wq->threshold);
1058 }
1059 
1060 static ssize_t wq_threshold_store(struct device *dev,
1061 				  struct device_attribute *attr,
1062 				  const char *buf, size_t count)
1063 {
1064 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1065 	struct idxd_device *idxd = wq->idxd;
1066 	unsigned int val;
1067 	int rc;
1068 
1069 	rc = kstrtouint(buf, 0, &val);
1070 	if (rc < 0)
1071 		return -EINVAL;
1072 
1073 	if (val > wq->size || val <= 0)
1074 		return -EINVAL;
1075 
1076 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1077 		return -EPERM;
1078 
1079 	if (wq->state != IDXD_WQ_DISABLED)
1080 		return -ENXIO;
1081 
1082 	if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
1083 		return -EINVAL;
1084 
1085 	wq->threshold = val;
1086 
1087 	return count;
1088 }
1089 
1090 static struct device_attribute dev_attr_wq_threshold =
1091 		__ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
1092 
1093 static ssize_t wq_type_show(struct device *dev,
1094 			    struct device_attribute *attr, char *buf)
1095 {
1096 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1097 
1098 	switch (wq->type) {
1099 	case IDXD_WQT_KERNEL:
1100 		return sprintf(buf, "%s\n",
1101 			       idxd_wq_type_names[IDXD_WQT_KERNEL]);
1102 	case IDXD_WQT_USER:
1103 		return sprintf(buf, "%s\n",
1104 			       idxd_wq_type_names[IDXD_WQT_USER]);
1105 	case IDXD_WQT_NONE:
1106 	default:
1107 		return sprintf(buf, "%s\n",
1108 			       idxd_wq_type_names[IDXD_WQT_NONE]);
1109 	}
1110 
1111 	return -EINVAL;
1112 }
1113 
1114 static ssize_t wq_type_store(struct device *dev,
1115 			     struct device_attribute *attr, const char *buf,
1116 			     size_t count)
1117 {
1118 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1119 	enum idxd_wq_type old_type;
1120 
1121 	if (wq->state != IDXD_WQ_DISABLED)
1122 		return -EPERM;
1123 
1124 	old_type = wq->type;
1125 	if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1126 		wq->type = IDXD_WQT_NONE;
1127 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
1128 		wq->type = IDXD_WQT_KERNEL;
1129 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1130 		wq->type = IDXD_WQT_USER;
1131 	else
1132 		return -EINVAL;
1133 
1134 	/* If we are changing queue type, clear the name */
1135 	if (wq->type != old_type)
1136 		memset(wq->name, 0, WQ_NAME_SIZE + 1);
1137 
1138 	return count;
1139 }
1140 
1141 static struct device_attribute dev_attr_wq_type =
1142 		__ATTR(type, 0644, wq_type_show, wq_type_store);
1143 
1144 static ssize_t wq_name_show(struct device *dev,
1145 			    struct device_attribute *attr, char *buf)
1146 {
1147 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1148 
1149 	return sprintf(buf, "%s\n", wq->name);
1150 }
1151 
1152 static ssize_t wq_name_store(struct device *dev,
1153 			     struct device_attribute *attr, const char *buf,
1154 			     size_t count)
1155 {
1156 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1157 
1158 	if (wq->state != IDXD_WQ_DISABLED)
1159 		return -EPERM;
1160 
1161 	if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1162 		return -EINVAL;
1163 
1164 	/*
1165 	 * This is temporarily placed here until we have SVM support for
1166 	 * dmaengine.
1167 	 */
1168 	if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
1169 		return -EOPNOTSUPP;
1170 
1171 	memset(wq->name, 0, WQ_NAME_SIZE + 1);
1172 	strncpy(wq->name, buf, WQ_NAME_SIZE);
1173 	strreplace(wq->name, '\n', '\0');
1174 	return count;
1175 }
1176 
1177 static struct device_attribute dev_attr_wq_name =
1178 		__ATTR(name, 0644, wq_name_show, wq_name_store);
1179 
1180 static ssize_t wq_cdev_minor_show(struct device *dev,
1181 				  struct device_attribute *attr, char *buf)
1182 {
1183 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1184 
1185 	return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
1186 }
1187 
1188 static struct device_attribute dev_attr_wq_cdev_minor =
1189 		__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1190 
1191 static int __get_sysfs_u64(const char *buf, u64 *val)
1192 {
1193 	int rc;
1194 
1195 	rc = kstrtou64(buf, 0, val);
1196 	if (rc < 0)
1197 		return -EINVAL;
1198 
1199 	if (*val == 0)
1200 		return -EINVAL;
1201 
1202 	*val = roundup_pow_of_two(*val);
1203 	return 0;
1204 }
1205 
1206 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
1207 					 char *buf)
1208 {
1209 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1210 
1211 	return sprintf(buf, "%llu\n", wq->max_xfer_bytes);
1212 }
1213 
1214 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
1215 					  const char *buf, size_t count)
1216 {
1217 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1218 	struct idxd_device *idxd = wq->idxd;
1219 	u64 xfer_size;
1220 	int rc;
1221 
1222 	if (wq->state != IDXD_WQ_DISABLED)
1223 		return -EPERM;
1224 
1225 	rc = __get_sysfs_u64(buf, &xfer_size);
1226 	if (rc < 0)
1227 		return rc;
1228 
1229 	if (xfer_size > idxd->max_xfer_bytes)
1230 		return -EINVAL;
1231 
1232 	wq->max_xfer_bytes = xfer_size;
1233 
1234 	return count;
1235 }
1236 
1237 static struct device_attribute dev_attr_wq_max_transfer_size =
1238 		__ATTR(max_transfer_size, 0644,
1239 		       wq_max_transfer_size_show, wq_max_transfer_size_store);
1240 
1241 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
1242 {
1243 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1244 
1245 	return sprintf(buf, "%u\n", wq->max_batch_size);
1246 }
1247 
1248 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
1249 				       const char *buf, size_t count)
1250 {
1251 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1252 	struct idxd_device *idxd = wq->idxd;
1253 	u64 batch_size;
1254 	int rc;
1255 
1256 	if (wq->state != IDXD_WQ_DISABLED)
1257 		return -EPERM;
1258 
1259 	rc = __get_sysfs_u64(buf, &batch_size);
1260 	if (rc < 0)
1261 		return rc;
1262 
1263 	if (batch_size > idxd->max_batch_size)
1264 		return -EINVAL;
1265 
1266 	wq->max_batch_size = (u32)batch_size;
1267 
1268 	return count;
1269 }
1270 
1271 static struct device_attribute dev_attr_wq_max_batch_size =
1272 		__ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1273 
1274 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1275 {
1276 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1277 
1278 	return sprintf(buf, "%u\n", wq->ats_dis);
1279 }
1280 
1281 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
1282 				    const char *buf, size_t count)
1283 {
1284 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1285 	struct idxd_device *idxd = wq->idxd;
1286 	bool ats_dis;
1287 	int rc;
1288 
1289 	if (wq->state != IDXD_WQ_DISABLED)
1290 		return -EPERM;
1291 
1292 	if (!idxd->hw.wq_cap.wq_ats_support)
1293 		return -EOPNOTSUPP;
1294 
1295 	rc = kstrtobool(buf, &ats_dis);
1296 	if (rc < 0)
1297 		return rc;
1298 
1299 	wq->ats_dis = ats_dis;
1300 
1301 	return count;
1302 }
1303 
1304 static struct device_attribute dev_attr_wq_ats_disable =
1305 		__ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
1306 
1307 static struct attribute *idxd_wq_attributes[] = {
1308 	&dev_attr_wq_clients.attr,
1309 	&dev_attr_wq_state.attr,
1310 	&dev_attr_wq_group_id.attr,
1311 	&dev_attr_wq_mode.attr,
1312 	&dev_attr_wq_size.attr,
1313 	&dev_attr_wq_priority.attr,
1314 	&dev_attr_wq_block_on_fault.attr,
1315 	&dev_attr_wq_threshold.attr,
1316 	&dev_attr_wq_type.attr,
1317 	&dev_attr_wq_name.attr,
1318 	&dev_attr_wq_cdev_minor.attr,
1319 	&dev_attr_wq_max_transfer_size.attr,
1320 	&dev_attr_wq_max_batch_size.attr,
1321 	&dev_attr_wq_ats_disable.attr,
1322 	NULL,
1323 };
1324 
1325 static const struct attribute_group idxd_wq_attribute_group = {
1326 	.attrs = idxd_wq_attributes,
1327 };
1328 
1329 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1330 	&idxd_wq_attribute_group,
1331 	NULL,
1332 };
1333 
1334 /* IDXD device attribs */
1335 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1336 			    char *buf)
1337 {
1338 	struct idxd_device *idxd =
1339 		container_of(dev, struct idxd_device, conf_dev);
1340 
1341 	return sprintf(buf, "%#x\n", idxd->hw.version);
1342 }
1343 static DEVICE_ATTR_RO(version);
1344 
1345 static ssize_t max_work_queues_size_show(struct device *dev,
1346 					 struct device_attribute *attr,
1347 					 char *buf)
1348 {
1349 	struct idxd_device *idxd =
1350 		container_of(dev, struct idxd_device, conf_dev);
1351 
1352 	return sprintf(buf, "%u\n", idxd->max_wq_size);
1353 }
1354 static DEVICE_ATTR_RO(max_work_queues_size);
1355 
1356 static ssize_t max_groups_show(struct device *dev,
1357 			       struct device_attribute *attr, char *buf)
1358 {
1359 	struct idxd_device *idxd =
1360 		container_of(dev, struct idxd_device, conf_dev);
1361 
1362 	return sprintf(buf, "%u\n", idxd->max_groups);
1363 }
1364 static DEVICE_ATTR_RO(max_groups);
1365 
1366 static ssize_t max_work_queues_show(struct device *dev,
1367 				    struct device_attribute *attr, char *buf)
1368 {
1369 	struct idxd_device *idxd =
1370 		container_of(dev, struct idxd_device, conf_dev);
1371 
1372 	return sprintf(buf, "%u\n", idxd->max_wqs);
1373 }
1374 static DEVICE_ATTR_RO(max_work_queues);
1375 
1376 static ssize_t max_engines_show(struct device *dev,
1377 				struct device_attribute *attr, char *buf)
1378 {
1379 	struct idxd_device *idxd =
1380 		container_of(dev, struct idxd_device, conf_dev);
1381 
1382 	return sprintf(buf, "%u\n", idxd->max_engines);
1383 }
1384 static DEVICE_ATTR_RO(max_engines);
1385 
1386 static ssize_t numa_node_show(struct device *dev,
1387 			      struct device_attribute *attr, char *buf)
1388 {
1389 	struct idxd_device *idxd =
1390 		container_of(dev, struct idxd_device, conf_dev);
1391 
1392 	return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1393 }
1394 static DEVICE_ATTR_RO(numa_node);
1395 
1396 static ssize_t max_batch_size_show(struct device *dev,
1397 				   struct device_attribute *attr, char *buf)
1398 {
1399 	struct idxd_device *idxd =
1400 		container_of(dev, struct idxd_device, conf_dev);
1401 
1402 	return sprintf(buf, "%u\n", idxd->max_batch_size);
1403 }
1404 static DEVICE_ATTR_RO(max_batch_size);
1405 
1406 static ssize_t max_transfer_size_show(struct device *dev,
1407 				      struct device_attribute *attr,
1408 				      char *buf)
1409 {
1410 	struct idxd_device *idxd =
1411 		container_of(dev, struct idxd_device, conf_dev);
1412 
1413 	return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
1414 }
1415 static DEVICE_ATTR_RO(max_transfer_size);
1416 
1417 static ssize_t op_cap_show(struct device *dev,
1418 			   struct device_attribute *attr, char *buf)
1419 {
1420 	struct idxd_device *idxd =
1421 		container_of(dev, struct idxd_device, conf_dev);
1422 	int i, rc = 0;
1423 
1424 	for (i = 0; i < 4; i++)
1425 		rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1426 
1427 	rc--;
1428 	rc += sysfs_emit_at(buf, rc, "\n");
1429 	return rc;
1430 }
1431 static DEVICE_ATTR_RO(op_cap);
1432 
1433 static ssize_t gen_cap_show(struct device *dev,
1434 			    struct device_attribute *attr, char *buf)
1435 {
1436 	struct idxd_device *idxd =
1437 		container_of(dev, struct idxd_device, conf_dev);
1438 
1439 	return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1440 }
1441 static DEVICE_ATTR_RO(gen_cap);
1442 
1443 static ssize_t configurable_show(struct device *dev,
1444 				 struct device_attribute *attr, char *buf)
1445 {
1446 	struct idxd_device *idxd =
1447 		container_of(dev, struct idxd_device, conf_dev);
1448 
1449 	return sprintf(buf, "%u\n",
1450 			test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1451 }
1452 static DEVICE_ATTR_RO(configurable);
1453 
1454 static ssize_t clients_show(struct device *dev,
1455 			    struct device_attribute *attr, char *buf)
1456 {
1457 	struct idxd_device *idxd =
1458 		container_of(dev, struct idxd_device, conf_dev);
1459 	unsigned long flags;
1460 	int count = 0, i;
1461 
1462 	spin_lock_irqsave(&idxd->dev_lock, flags);
1463 	for (i = 0; i < idxd->max_wqs; i++) {
1464 		struct idxd_wq *wq = &idxd->wqs[i];
1465 
1466 		count += wq->client_count;
1467 	}
1468 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1469 
1470 	return sprintf(buf, "%d\n", count);
1471 }
1472 static DEVICE_ATTR_RO(clients);
1473 
1474 static ssize_t pasid_enabled_show(struct device *dev,
1475 				  struct device_attribute *attr, char *buf)
1476 {
1477 	struct idxd_device *idxd =
1478 		container_of(dev, struct idxd_device, conf_dev);
1479 
1480 	return sprintf(buf, "%u\n", device_pasid_enabled(idxd));
1481 }
1482 static DEVICE_ATTR_RO(pasid_enabled);
1483 
1484 static ssize_t state_show(struct device *dev,
1485 			  struct device_attribute *attr, char *buf)
1486 {
1487 	struct idxd_device *idxd =
1488 		container_of(dev, struct idxd_device, conf_dev);
1489 
1490 	switch (idxd->state) {
1491 	case IDXD_DEV_DISABLED:
1492 	case IDXD_DEV_CONF_READY:
1493 		return sprintf(buf, "disabled\n");
1494 	case IDXD_DEV_ENABLED:
1495 		return sprintf(buf, "enabled\n");
1496 	case IDXD_DEV_HALTED:
1497 		return sprintf(buf, "halted\n");
1498 	}
1499 
1500 	return sprintf(buf, "unknown\n");
1501 }
1502 static DEVICE_ATTR_RO(state);
1503 
1504 static ssize_t errors_show(struct device *dev,
1505 			   struct device_attribute *attr, char *buf)
1506 {
1507 	struct idxd_device *idxd =
1508 		container_of(dev, struct idxd_device, conf_dev);
1509 	int i, out = 0;
1510 	unsigned long flags;
1511 
1512 	spin_lock_irqsave(&idxd->dev_lock, flags);
1513 	for (i = 0; i < 4; i++)
1514 		out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
1515 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1516 	out--;
1517 	out += sprintf(buf + out, "\n");
1518 	return out;
1519 }
1520 static DEVICE_ATTR_RO(errors);
1521 
1522 static ssize_t max_tokens_show(struct device *dev,
1523 			       struct device_attribute *attr, char *buf)
1524 {
1525 	struct idxd_device *idxd =
1526 		container_of(dev, struct idxd_device, conf_dev);
1527 
1528 	return sprintf(buf, "%u\n", idxd->max_tokens);
1529 }
1530 static DEVICE_ATTR_RO(max_tokens);
1531 
1532 static ssize_t token_limit_show(struct device *dev,
1533 				struct device_attribute *attr, char *buf)
1534 {
1535 	struct idxd_device *idxd =
1536 		container_of(dev, struct idxd_device, conf_dev);
1537 
1538 	return sprintf(buf, "%u\n", idxd->token_limit);
1539 }
1540 
1541 static ssize_t token_limit_store(struct device *dev,
1542 				 struct device_attribute *attr,
1543 				 const char *buf, size_t count)
1544 {
1545 	struct idxd_device *idxd =
1546 		container_of(dev, struct idxd_device, conf_dev);
1547 	unsigned long val;
1548 	int rc;
1549 
1550 	rc = kstrtoul(buf, 10, &val);
1551 	if (rc < 0)
1552 		return -EINVAL;
1553 
1554 	if (idxd->state == IDXD_DEV_ENABLED)
1555 		return -EPERM;
1556 
1557 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1558 		return -EPERM;
1559 
1560 	if (!idxd->hw.group_cap.token_limit)
1561 		return -EPERM;
1562 
1563 	if (val > idxd->hw.group_cap.total_tokens)
1564 		return -EINVAL;
1565 
1566 	idxd->token_limit = val;
1567 	return count;
1568 }
1569 static DEVICE_ATTR_RW(token_limit);
1570 
1571 static ssize_t cdev_major_show(struct device *dev,
1572 			       struct device_attribute *attr, char *buf)
1573 {
1574 	struct idxd_device *idxd =
1575 		container_of(dev, struct idxd_device, conf_dev);
1576 
1577 	return sprintf(buf, "%u\n", idxd->major);
1578 }
1579 static DEVICE_ATTR_RO(cdev_major);
1580 
1581 static ssize_t cmd_status_show(struct device *dev,
1582 			       struct device_attribute *attr, char *buf)
1583 {
1584 	struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1585 
1586 	return sprintf(buf, "%#x\n", idxd->cmd_status);
1587 }
1588 static DEVICE_ATTR_RO(cmd_status);
1589 
1590 static struct attribute *idxd_device_attributes[] = {
1591 	&dev_attr_version.attr,
1592 	&dev_attr_max_groups.attr,
1593 	&dev_attr_max_work_queues.attr,
1594 	&dev_attr_max_work_queues_size.attr,
1595 	&dev_attr_max_engines.attr,
1596 	&dev_attr_numa_node.attr,
1597 	&dev_attr_max_batch_size.attr,
1598 	&dev_attr_max_transfer_size.attr,
1599 	&dev_attr_op_cap.attr,
1600 	&dev_attr_gen_cap.attr,
1601 	&dev_attr_configurable.attr,
1602 	&dev_attr_clients.attr,
1603 	&dev_attr_pasid_enabled.attr,
1604 	&dev_attr_state.attr,
1605 	&dev_attr_errors.attr,
1606 	&dev_attr_max_tokens.attr,
1607 	&dev_attr_token_limit.attr,
1608 	&dev_attr_cdev_major.attr,
1609 	&dev_attr_cmd_status.attr,
1610 	NULL,
1611 };
1612 
1613 static const struct attribute_group idxd_device_attribute_group = {
1614 	.attrs = idxd_device_attributes,
1615 };
1616 
1617 static const struct attribute_group *idxd_attribute_groups[] = {
1618 	&idxd_device_attribute_group,
1619 	NULL,
1620 };
1621 
1622 static void idxd_conf_device_release(struct device *dev)
1623 {
1624 	struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1625 
1626 	kfree(idxd->groups);
1627 	kfree(idxd->wqs);
1628 	kfree(idxd->engines);
1629 	kfree(idxd->irq_entries);
1630 	ida_free(idxd_ida(idxd), idxd->id);
1631 	kfree(idxd);
1632 }
1633 
1634 struct device_type dsa_device_type = {
1635 	.name = "dsa",
1636 	.release = idxd_conf_device_release,
1637 	.groups = idxd_attribute_groups,
1638 };
1639 
1640 struct device_type iax_device_type = {
1641 	.name = "iax",
1642 	.release = idxd_conf_device_release,
1643 	.groups = idxd_attribute_groups,
1644 };
1645 
1646 static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
1647 {
1648 	struct device *dev = &idxd->pdev->dev;
1649 	int i, rc;
1650 
1651 	for (i = 0; i < idxd->max_engines; i++) {
1652 		struct idxd_engine *engine = &idxd->engines[i];
1653 
1654 		engine->conf_dev.parent = &idxd->conf_dev;
1655 		dev_set_name(&engine->conf_dev, "engine%d.%d",
1656 			     idxd->id, engine->id);
1657 		engine->conf_dev.bus = idxd_get_bus_type(idxd);
1658 		engine->conf_dev.groups = idxd_engine_attribute_groups;
1659 		engine->conf_dev.type = &idxd_engine_device_type;
1660 		dev_dbg(dev, "Engine device register: %s\n",
1661 			dev_name(&engine->conf_dev));
1662 		rc = device_register(&engine->conf_dev);
1663 		if (rc < 0) {
1664 			put_device(&engine->conf_dev);
1665 			goto cleanup;
1666 		}
1667 	}
1668 
1669 	return 0;
1670 
1671 cleanup:
1672 	while (i--) {
1673 		struct idxd_engine *engine = &idxd->engines[i];
1674 
1675 		device_unregister(&engine->conf_dev);
1676 	}
1677 	return rc;
1678 }
1679 
1680 static int idxd_setup_group_sysfs(struct idxd_device *idxd)
1681 {
1682 	struct device *dev = &idxd->pdev->dev;
1683 	int i, rc;
1684 
1685 	for (i = 0; i < idxd->max_groups; i++) {
1686 		struct idxd_group *group = &idxd->groups[i];
1687 
1688 		group->conf_dev.parent = &idxd->conf_dev;
1689 		dev_set_name(&group->conf_dev, "group%d.%d",
1690 			     idxd->id, group->id);
1691 		group->conf_dev.bus = idxd_get_bus_type(idxd);
1692 		group->conf_dev.groups = idxd_group_attribute_groups;
1693 		group->conf_dev.type = &idxd_group_device_type;
1694 		dev_dbg(dev, "Group device register: %s\n",
1695 			dev_name(&group->conf_dev));
1696 		rc = device_register(&group->conf_dev);
1697 		if (rc < 0) {
1698 			put_device(&group->conf_dev);
1699 			goto cleanup;
1700 		}
1701 	}
1702 
1703 	return 0;
1704 
1705 cleanup:
1706 	while (i--) {
1707 		struct idxd_group *group = &idxd->groups[i];
1708 
1709 		device_unregister(&group->conf_dev);
1710 	}
1711 	return rc;
1712 }
1713 
1714 static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
1715 {
1716 	struct device *dev = &idxd->pdev->dev;
1717 	int i, rc;
1718 
1719 	for (i = 0; i < idxd->max_wqs; i++) {
1720 		struct idxd_wq *wq = &idxd->wqs[i];
1721 
1722 		wq->conf_dev.parent = &idxd->conf_dev;
1723 		dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
1724 		wq->conf_dev.bus = idxd_get_bus_type(idxd);
1725 		wq->conf_dev.groups = idxd_wq_attribute_groups;
1726 		wq->conf_dev.type = &idxd_wq_device_type;
1727 		dev_dbg(dev, "WQ device register: %s\n",
1728 			dev_name(&wq->conf_dev));
1729 		rc = device_register(&wq->conf_dev);
1730 		if (rc < 0) {
1731 			put_device(&wq->conf_dev);
1732 			goto cleanup;
1733 		}
1734 	}
1735 
1736 	return 0;
1737 
1738 cleanup:
1739 	while (i--) {
1740 		struct idxd_wq *wq = &idxd->wqs[i];
1741 
1742 		device_unregister(&wq->conf_dev);
1743 	}
1744 	return rc;
1745 }
1746 
1747 int idxd_register_devices(struct idxd_device *idxd)
1748 {
1749 	struct device *dev = &idxd->pdev->dev;
1750 	int rc;
1751 
1752 	rc = device_add(&idxd->conf_dev);
1753 	if (rc < 0)
1754 		return rc;
1755 
1756 	rc = idxd_setup_wq_sysfs(idxd);
1757 	if (rc < 0) {
1758 		/* unregister conf dev */
1759 		dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
1760 		return rc;
1761 	}
1762 
1763 	rc = idxd_setup_group_sysfs(idxd);
1764 	if (rc < 0) {
1765 		/* unregister conf dev */
1766 		dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
1767 		return rc;
1768 	}
1769 
1770 	rc = idxd_setup_engine_sysfs(idxd);
1771 	if (rc < 0) {
1772 		/* unregister conf dev */
1773 		dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
1774 		return rc;
1775 	}
1776 
1777 	return 0;
1778 }
1779 
1780 void idxd_unregister_devices(struct idxd_device *idxd)
1781 {
1782 	int i;
1783 
1784 	for (i = 0; i < idxd->max_wqs; i++) {
1785 		struct idxd_wq *wq = &idxd->wqs[i];
1786 
1787 		device_unregister(&wq->conf_dev);
1788 	}
1789 
1790 	for (i = 0; i < idxd->max_engines; i++) {
1791 		struct idxd_engine *engine = &idxd->engines[i];
1792 
1793 		device_unregister(&engine->conf_dev);
1794 	}
1795 
1796 	for (i = 0; i < idxd->max_groups; i++) {
1797 		struct idxd_group *group = &idxd->groups[i];
1798 
1799 		device_unregister(&group->conf_dev);
1800 	}
1801 
1802 	device_unregister(&idxd->conf_dev);
1803 }
1804 
1805 int idxd_register_bus_type(void)
1806 {
1807 	int i, rc;
1808 
1809 	for (i = 0; i < IDXD_TYPE_MAX; i++) {
1810 		rc = bus_register(idxd_bus_types[i]);
1811 		if (rc < 0)
1812 			goto bus_err;
1813 	}
1814 
1815 	return 0;
1816 
1817 bus_err:
1818 	while (--i >= 0)
1819 		bus_unregister(idxd_bus_types[i]);
1820 	return rc;
1821 }
1822 
1823 void idxd_unregister_bus_type(void)
1824 {
1825 	int i;
1826 
1827 	for (i = 0; i < IDXD_TYPE_MAX; i++)
1828 		bus_unregister(idxd_bus_types[i]);
1829 }
1830