xref: /linux/drivers/dma/idxd/sysfs.c (revision 87c9c16317882dd6dbbc07e349bc3223e14f3244)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
11 #include "idxd.h"
12 
13 static char *idxd_wq_type_names[] = {
14 	[IDXD_WQT_NONE]		= "none",
15 	[IDXD_WQT_KERNEL]	= "kernel",
16 	[IDXD_WQT_USER]		= "user",
17 };
18 
19 static int idxd_config_bus_match(struct device *dev,
20 				 struct device_driver *drv)
21 {
22 	int matched = 0;
23 
24 	if (is_idxd_dev(dev)) {
25 		struct idxd_device *idxd = confdev_to_idxd(dev);
26 
27 		if (idxd->state != IDXD_DEV_CONF_READY)
28 			return 0;
29 		matched = 1;
30 	} else if (is_idxd_wq_dev(dev)) {
31 		struct idxd_wq *wq = confdev_to_wq(dev);
32 		struct idxd_device *idxd = wq->idxd;
33 
34 		if (idxd->state < IDXD_DEV_CONF_READY)
35 			return 0;
36 
37 		if (wq->state != IDXD_WQ_DISABLED) {
38 			dev_dbg(dev, "%s not disabled\n", dev_name(dev));
39 			return 0;
40 		}
41 		matched = 1;
42 	}
43 
44 	if (matched)
45 		dev_dbg(dev, "%s matched\n", dev_name(dev));
46 
47 	return matched;
48 }
49 
50 static int enable_wq(struct idxd_wq *wq)
51 {
52 	struct idxd_device *idxd = wq->idxd;
53 	struct device *dev = &idxd->pdev->dev;
54 	unsigned long flags;
55 	int rc;
56 
57 	mutex_lock(&wq->wq_lock);
58 
59 	if (idxd->state != IDXD_DEV_ENABLED) {
60 		mutex_unlock(&wq->wq_lock);
61 		dev_warn(dev, "Enabling while device not enabled.\n");
62 		return -EPERM;
63 	}
64 
65 	if (wq->state != IDXD_WQ_DISABLED) {
66 		mutex_unlock(&wq->wq_lock);
67 		dev_warn(dev, "WQ %d already enabled.\n", wq->id);
68 		return -EBUSY;
69 	}
70 
71 	if (!wq->group) {
72 		mutex_unlock(&wq->wq_lock);
73 		dev_warn(dev, "WQ not attached to group.\n");
74 		return -EINVAL;
75 	}
76 
77 	if (strlen(wq->name) == 0) {
78 		mutex_unlock(&wq->wq_lock);
79 		dev_warn(dev, "WQ name not set.\n");
80 		return -EINVAL;
81 	}
82 
83 	/* Shared WQ checks */
84 	if (wq_shared(wq)) {
85 		if (!device_swq_supported(idxd)) {
86 			dev_warn(dev, "PASID not enabled and shared WQ.\n");
87 			mutex_unlock(&wq->wq_lock);
88 			return -ENXIO;
89 		}
90 		/*
91 		 * Shared wq with the threshold set to 0 means the user
92 		 * did not set the threshold or transitioned from a
93 		 * dedicated wq but did not set threshold. A value
94 		 * of 0 would effectively disable the shared wq. The
95 		 * driver does not allow a value of 0 to be set for
96 		 * threshold via sysfs.
97 		 */
98 		if (wq->threshold == 0) {
99 			dev_warn(dev, "Shared WQ and threshold 0.\n");
100 			mutex_unlock(&wq->wq_lock);
101 			return -EINVAL;
102 		}
103 	}
104 
105 	rc = idxd_wq_alloc_resources(wq);
106 	if (rc < 0) {
107 		mutex_unlock(&wq->wq_lock);
108 		dev_warn(dev, "WQ resource alloc failed\n");
109 		return rc;
110 	}
111 
112 	spin_lock_irqsave(&idxd->dev_lock, flags);
113 	if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
114 		rc = idxd_device_config(idxd);
115 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
116 	if (rc < 0) {
117 		mutex_unlock(&wq->wq_lock);
118 		dev_warn(dev, "Writing WQ %d config failed: %d\n", wq->id, rc);
119 		return rc;
120 	}
121 
122 	rc = idxd_wq_enable(wq);
123 	if (rc < 0) {
124 		mutex_unlock(&wq->wq_lock);
125 		dev_warn(dev, "WQ %d enabling failed: %d\n", wq->id, rc);
126 		return rc;
127 	}
128 
129 	rc = idxd_wq_map_portal(wq);
130 	if (rc < 0) {
131 		dev_warn(dev, "wq portal mapping failed: %d\n", rc);
132 		rc = idxd_wq_disable(wq);
133 		if (rc < 0)
134 			dev_warn(dev, "IDXD wq disable failed\n");
135 		mutex_unlock(&wq->wq_lock);
136 		return rc;
137 	}
138 
139 	wq->client_count = 0;
140 
141 	if (wq->type == IDXD_WQT_KERNEL) {
142 		rc = idxd_wq_init_percpu_ref(wq);
143 		if (rc < 0) {
144 			dev_dbg(dev, "percpu_ref setup failed\n");
145 			mutex_unlock(&wq->wq_lock);
146 			return rc;
147 		}
148 	}
149 
150 	if (is_idxd_wq_dmaengine(wq)) {
151 		rc = idxd_register_dma_channel(wq);
152 		if (rc < 0) {
153 			dev_dbg(dev, "DMA channel register failed\n");
154 			mutex_unlock(&wq->wq_lock);
155 			return rc;
156 		}
157 	} else if (is_idxd_wq_cdev(wq)) {
158 		rc = idxd_wq_add_cdev(wq);
159 		if (rc < 0) {
160 			dev_dbg(dev, "Cdev creation failed\n");
161 			mutex_unlock(&wq->wq_lock);
162 			return rc;
163 		}
164 	}
165 
166 	mutex_unlock(&wq->wq_lock);
167 	dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
168 
169 	return 0;
170 }
171 
172 static int idxd_config_bus_probe(struct device *dev)
173 {
174 	int rc = 0;
175 	unsigned long flags;
176 
177 	dev_dbg(dev, "%s called\n", __func__);
178 
179 	if (is_idxd_dev(dev)) {
180 		struct idxd_device *idxd = confdev_to_idxd(dev);
181 
182 		if (idxd->state != IDXD_DEV_CONF_READY) {
183 			dev_warn(dev, "Device not ready for config\n");
184 			return -EBUSY;
185 		}
186 
187 		if (!try_module_get(THIS_MODULE))
188 			return -ENXIO;
189 
190 		/* Perform IDXD configuration and enabling */
191 		spin_lock_irqsave(&idxd->dev_lock, flags);
192 		if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
193 			rc = idxd_device_config(idxd);
194 		spin_unlock_irqrestore(&idxd->dev_lock, flags);
195 		if (rc < 0) {
196 			module_put(THIS_MODULE);
197 			dev_warn(dev, "Device config failed: %d\n", rc);
198 			return rc;
199 		}
200 
201 		/* start device */
202 		rc = idxd_device_enable(idxd);
203 		if (rc < 0) {
204 			module_put(THIS_MODULE);
205 			dev_warn(dev, "Device enable failed: %d\n", rc);
206 			return rc;
207 		}
208 
209 		dev_info(dev, "Device %s enabled\n", dev_name(dev));
210 
211 		rc = idxd_register_dma_device(idxd);
212 		if (rc < 0) {
213 			module_put(THIS_MODULE);
214 			dev_dbg(dev, "Failed to register dmaengine device\n");
215 			return rc;
216 		}
217 		return 0;
218 	} else if (is_idxd_wq_dev(dev)) {
219 		struct idxd_wq *wq = confdev_to_wq(dev);
220 
221 		return enable_wq(wq);
222 	}
223 
224 	return -ENODEV;
225 }
226 
227 static void disable_wq(struct idxd_wq *wq)
228 {
229 	struct idxd_device *idxd = wq->idxd;
230 	struct device *dev = &idxd->pdev->dev;
231 
232 	mutex_lock(&wq->wq_lock);
233 	dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
234 	if (wq->state == IDXD_WQ_DISABLED) {
235 		mutex_unlock(&wq->wq_lock);
236 		return;
237 	}
238 
239 	if (wq->type == IDXD_WQT_KERNEL)
240 		idxd_wq_quiesce(wq);
241 
242 	if (is_idxd_wq_dmaengine(wq))
243 		idxd_unregister_dma_channel(wq);
244 	else if (is_idxd_wq_cdev(wq))
245 		idxd_wq_del_cdev(wq);
246 
247 	if (idxd_wq_refcount(wq))
248 		dev_warn(dev, "Clients has claim on wq %d: %d\n",
249 			 wq->id, idxd_wq_refcount(wq));
250 
251 	idxd_wq_unmap_portal(wq);
252 
253 	idxd_wq_drain(wq);
254 	idxd_wq_reset(wq);
255 
256 	idxd_wq_free_resources(wq);
257 	wq->client_count = 0;
258 	mutex_unlock(&wq->wq_lock);
259 
260 	dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
261 }
262 
263 static int idxd_config_bus_remove(struct device *dev)
264 {
265 	int rc;
266 
267 	dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
268 
269 	/* disable workqueue here */
270 	if (is_idxd_wq_dev(dev)) {
271 		struct idxd_wq *wq = confdev_to_wq(dev);
272 
273 		disable_wq(wq);
274 	} else if (is_idxd_dev(dev)) {
275 		struct idxd_device *idxd = confdev_to_idxd(dev);
276 		int i;
277 
278 		dev_dbg(dev, "%s removing dev %s\n", __func__,
279 			dev_name(&idxd->conf_dev));
280 		for (i = 0; i < idxd->max_wqs; i++) {
281 			struct idxd_wq *wq = idxd->wqs[i];
282 
283 			if (wq->state == IDXD_WQ_DISABLED)
284 				continue;
285 			dev_warn(dev, "Active wq %d on disable %s.\n", i,
286 				 dev_name(&idxd->conf_dev));
287 			device_release_driver(&wq->conf_dev);
288 		}
289 
290 		idxd_unregister_dma_device(idxd);
291 		rc = idxd_device_disable(idxd);
292 		if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
293 			for (i = 0; i < idxd->max_wqs; i++) {
294 				struct idxd_wq *wq = idxd->wqs[i];
295 
296 				mutex_lock(&wq->wq_lock);
297 				idxd_wq_disable_cleanup(wq);
298 				mutex_unlock(&wq->wq_lock);
299 			}
300 		}
301 		module_put(THIS_MODULE);
302 		if (rc < 0)
303 			dev_warn(dev, "Device disable failed\n");
304 		else
305 			dev_info(dev, "Device %s disabled\n", dev_name(dev));
306 
307 	}
308 
309 	return 0;
310 }
311 
312 static void idxd_config_bus_shutdown(struct device *dev)
313 {
314 	dev_dbg(dev, "%s called\n", __func__);
315 }
316 
317 struct bus_type dsa_bus_type = {
318 	.name = "dsa",
319 	.match = idxd_config_bus_match,
320 	.probe = idxd_config_bus_probe,
321 	.remove = idxd_config_bus_remove,
322 	.shutdown = idxd_config_bus_shutdown,
323 };
324 
325 static struct idxd_device_driver dsa_drv = {
326 	.drv = {
327 		.name = "dsa",
328 		.bus = &dsa_bus_type,
329 		.owner = THIS_MODULE,
330 		.mod_name = KBUILD_MODNAME,
331 	},
332 };
333 
334 /* IDXD generic driver setup */
335 int idxd_register_driver(void)
336 {
337 	return driver_register(&dsa_drv.drv);
338 }
339 
340 void idxd_unregister_driver(void)
341 {
342 	driver_unregister(&dsa_drv.drv);
343 }
344 
345 /* IDXD engine attributes */
346 static ssize_t engine_group_id_show(struct device *dev,
347 				    struct device_attribute *attr, char *buf)
348 {
349 	struct idxd_engine *engine =
350 		container_of(dev, struct idxd_engine, conf_dev);
351 
352 	if (engine->group)
353 		return sysfs_emit(buf, "%d\n", engine->group->id);
354 	else
355 		return sysfs_emit(buf, "%d\n", -1);
356 }
357 
358 static ssize_t engine_group_id_store(struct device *dev,
359 				     struct device_attribute *attr,
360 				     const char *buf, size_t count)
361 {
362 	struct idxd_engine *engine =
363 		container_of(dev, struct idxd_engine, conf_dev);
364 	struct idxd_device *idxd = engine->idxd;
365 	long id;
366 	int rc;
367 	struct idxd_group *prevg;
368 
369 	rc = kstrtol(buf, 10, &id);
370 	if (rc < 0)
371 		return -EINVAL;
372 
373 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
374 		return -EPERM;
375 
376 	if (id > idxd->max_groups - 1 || id < -1)
377 		return -EINVAL;
378 
379 	if (id == -1) {
380 		if (engine->group) {
381 			engine->group->num_engines--;
382 			engine->group = NULL;
383 		}
384 		return count;
385 	}
386 
387 	prevg = engine->group;
388 
389 	if (prevg)
390 		prevg->num_engines--;
391 	engine->group = idxd->groups[id];
392 	engine->group->num_engines++;
393 
394 	return count;
395 }
396 
397 static struct device_attribute dev_attr_engine_group =
398 		__ATTR(group_id, 0644, engine_group_id_show,
399 		       engine_group_id_store);
400 
401 static struct attribute *idxd_engine_attributes[] = {
402 	&dev_attr_engine_group.attr,
403 	NULL,
404 };
405 
406 static const struct attribute_group idxd_engine_attribute_group = {
407 	.attrs = idxd_engine_attributes,
408 };
409 
410 static const struct attribute_group *idxd_engine_attribute_groups[] = {
411 	&idxd_engine_attribute_group,
412 	NULL,
413 };
414 
415 static void idxd_conf_engine_release(struct device *dev)
416 {
417 	struct idxd_engine *engine = container_of(dev, struct idxd_engine, conf_dev);
418 
419 	kfree(engine);
420 }
421 
422 struct device_type idxd_engine_device_type = {
423 	.name = "engine",
424 	.release = idxd_conf_engine_release,
425 	.groups = idxd_engine_attribute_groups,
426 };
427 
428 /* Group attributes */
429 
430 static void idxd_set_free_tokens(struct idxd_device *idxd)
431 {
432 	int i, tokens;
433 
434 	for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
435 		struct idxd_group *g = idxd->groups[i];
436 
437 		tokens += g->tokens_reserved;
438 	}
439 
440 	idxd->nr_tokens = idxd->max_tokens - tokens;
441 }
442 
443 static ssize_t group_tokens_reserved_show(struct device *dev,
444 					  struct device_attribute *attr,
445 					  char *buf)
446 {
447 	struct idxd_group *group =
448 		container_of(dev, struct idxd_group, conf_dev);
449 
450 	return sysfs_emit(buf, "%u\n", group->tokens_reserved);
451 }
452 
453 static ssize_t group_tokens_reserved_store(struct device *dev,
454 					   struct device_attribute *attr,
455 					   const char *buf, size_t count)
456 {
457 	struct idxd_group *group =
458 		container_of(dev, struct idxd_group, conf_dev);
459 	struct idxd_device *idxd = group->idxd;
460 	unsigned long val;
461 	int rc;
462 
463 	rc = kstrtoul(buf, 10, &val);
464 	if (rc < 0)
465 		return -EINVAL;
466 
467 	if (idxd->data->type == IDXD_TYPE_IAX)
468 		return -EOPNOTSUPP;
469 
470 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
471 		return -EPERM;
472 
473 	if (idxd->state == IDXD_DEV_ENABLED)
474 		return -EPERM;
475 
476 	if (val > idxd->max_tokens)
477 		return -EINVAL;
478 
479 	if (val > idxd->nr_tokens + group->tokens_reserved)
480 		return -EINVAL;
481 
482 	group->tokens_reserved = val;
483 	idxd_set_free_tokens(idxd);
484 	return count;
485 }
486 
487 static struct device_attribute dev_attr_group_tokens_reserved =
488 		__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
489 		       group_tokens_reserved_store);
490 
491 static ssize_t group_tokens_allowed_show(struct device *dev,
492 					 struct device_attribute *attr,
493 					 char *buf)
494 {
495 	struct idxd_group *group =
496 		container_of(dev, struct idxd_group, conf_dev);
497 
498 	return sysfs_emit(buf, "%u\n", group->tokens_allowed);
499 }
500 
501 static ssize_t group_tokens_allowed_store(struct device *dev,
502 					  struct device_attribute *attr,
503 					  const char *buf, size_t count)
504 {
505 	struct idxd_group *group =
506 		container_of(dev, struct idxd_group, conf_dev);
507 	struct idxd_device *idxd = group->idxd;
508 	unsigned long val;
509 	int rc;
510 
511 	rc = kstrtoul(buf, 10, &val);
512 	if (rc < 0)
513 		return -EINVAL;
514 
515 	if (idxd->data->type == IDXD_TYPE_IAX)
516 		return -EOPNOTSUPP;
517 
518 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
519 		return -EPERM;
520 
521 	if (idxd->state == IDXD_DEV_ENABLED)
522 		return -EPERM;
523 
524 	if (val < 4 * group->num_engines ||
525 	    val > group->tokens_reserved + idxd->nr_tokens)
526 		return -EINVAL;
527 
528 	group->tokens_allowed = val;
529 	return count;
530 }
531 
532 static struct device_attribute dev_attr_group_tokens_allowed =
533 		__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
534 		       group_tokens_allowed_store);
535 
536 static ssize_t group_use_token_limit_show(struct device *dev,
537 					  struct device_attribute *attr,
538 					  char *buf)
539 {
540 	struct idxd_group *group =
541 		container_of(dev, struct idxd_group, conf_dev);
542 
543 	return sysfs_emit(buf, "%u\n", group->use_token_limit);
544 }
545 
546 static ssize_t group_use_token_limit_store(struct device *dev,
547 					   struct device_attribute *attr,
548 					   const char *buf, size_t count)
549 {
550 	struct idxd_group *group =
551 		container_of(dev, struct idxd_group, conf_dev);
552 	struct idxd_device *idxd = group->idxd;
553 	unsigned long val;
554 	int rc;
555 
556 	rc = kstrtoul(buf, 10, &val);
557 	if (rc < 0)
558 		return -EINVAL;
559 
560 	if (idxd->data->type == IDXD_TYPE_IAX)
561 		return -EOPNOTSUPP;
562 
563 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
564 		return -EPERM;
565 
566 	if (idxd->state == IDXD_DEV_ENABLED)
567 		return -EPERM;
568 
569 	if (idxd->token_limit == 0)
570 		return -EPERM;
571 
572 	group->use_token_limit = !!val;
573 	return count;
574 }
575 
576 static struct device_attribute dev_attr_group_use_token_limit =
577 		__ATTR(use_token_limit, 0644, group_use_token_limit_show,
578 		       group_use_token_limit_store);
579 
580 static ssize_t group_engines_show(struct device *dev,
581 				  struct device_attribute *attr, char *buf)
582 {
583 	struct idxd_group *group =
584 		container_of(dev, struct idxd_group, conf_dev);
585 	int i, rc = 0;
586 	struct idxd_device *idxd = group->idxd;
587 
588 	for (i = 0; i < idxd->max_engines; i++) {
589 		struct idxd_engine *engine = idxd->engines[i];
590 
591 		if (!engine->group)
592 			continue;
593 
594 		if (engine->group->id == group->id)
595 			rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id);
596 	}
597 
598 	if (!rc)
599 		return 0;
600 	rc--;
601 	rc += sysfs_emit_at(buf, rc, "\n");
602 
603 	return rc;
604 }
605 
606 static struct device_attribute dev_attr_group_engines =
607 		__ATTR(engines, 0444, group_engines_show, NULL);
608 
609 static ssize_t group_work_queues_show(struct device *dev,
610 				      struct device_attribute *attr, char *buf)
611 {
612 	struct idxd_group *group =
613 		container_of(dev, struct idxd_group, conf_dev);
614 	int i, rc = 0;
615 	struct idxd_device *idxd = group->idxd;
616 
617 	for (i = 0; i < idxd->max_wqs; i++) {
618 		struct idxd_wq *wq = idxd->wqs[i];
619 
620 		if (!wq->group)
621 			continue;
622 
623 		if (wq->group->id == group->id)
624 			rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id);
625 	}
626 
627 	if (!rc)
628 		return 0;
629 	rc--;
630 	rc += sysfs_emit_at(buf, rc, "\n");
631 
632 	return rc;
633 }
634 
635 static struct device_attribute dev_attr_group_work_queues =
636 		__ATTR(work_queues, 0444, group_work_queues_show, NULL);
637 
638 static ssize_t group_traffic_class_a_show(struct device *dev,
639 					  struct device_attribute *attr,
640 					  char *buf)
641 {
642 	struct idxd_group *group =
643 		container_of(dev, struct idxd_group, conf_dev);
644 
645 	return sysfs_emit(buf, "%d\n", group->tc_a);
646 }
647 
648 static ssize_t group_traffic_class_a_store(struct device *dev,
649 					   struct device_attribute *attr,
650 					   const char *buf, size_t count)
651 {
652 	struct idxd_group *group =
653 		container_of(dev, struct idxd_group, conf_dev);
654 	struct idxd_device *idxd = group->idxd;
655 	long val;
656 	int rc;
657 
658 	rc = kstrtol(buf, 10, &val);
659 	if (rc < 0)
660 		return -EINVAL;
661 
662 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
663 		return -EPERM;
664 
665 	if (idxd->state == IDXD_DEV_ENABLED)
666 		return -EPERM;
667 
668 	if (val < 0 || val > 7)
669 		return -EINVAL;
670 
671 	group->tc_a = val;
672 	return count;
673 }
674 
675 static struct device_attribute dev_attr_group_traffic_class_a =
676 		__ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
677 		       group_traffic_class_a_store);
678 
679 static ssize_t group_traffic_class_b_show(struct device *dev,
680 					  struct device_attribute *attr,
681 					  char *buf)
682 {
683 	struct idxd_group *group =
684 		container_of(dev, struct idxd_group, conf_dev);
685 
686 	return sysfs_emit(buf, "%d\n", group->tc_b);
687 }
688 
689 static ssize_t group_traffic_class_b_store(struct device *dev,
690 					   struct device_attribute *attr,
691 					   const char *buf, size_t count)
692 {
693 	struct idxd_group *group =
694 		container_of(dev, struct idxd_group, conf_dev);
695 	struct idxd_device *idxd = group->idxd;
696 	long val;
697 	int rc;
698 
699 	rc = kstrtol(buf, 10, &val);
700 	if (rc < 0)
701 		return -EINVAL;
702 
703 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
704 		return -EPERM;
705 
706 	if (idxd->state == IDXD_DEV_ENABLED)
707 		return -EPERM;
708 
709 	if (val < 0 || val > 7)
710 		return -EINVAL;
711 
712 	group->tc_b = val;
713 	return count;
714 }
715 
716 static struct device_attribute dev_attr_group_traffic_class_b =
717 		__ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
718 		       group_traffic_class_b_store);
719 
720 static struct attribute *idxd_group_attributes[] = {
721 	&dev_attr_group_work_queues.attr,
722 	&dev_attr_group_engines.attr,
723 	&dev_attr_group_use_token_limit.attr,
724 	&dev_attr_group_tokens_allowed.attr,
725 	&dev_attr_group_tokens_reserved.attr,
726 	&dev_attr_group_traffic_class_a.attr,
727 	&dev_attr_group_traffic_class_b.attr,
728 	NULL,
729 };
730 
731 static const struct attribute_group idxd_group_attribute_group = {
732 	.attrs = idxd_group_attributes,
733 };
734 
735 static const struct attribute_group *idxd_group_attribute_groups[] = {
736 	&idxd_group_attribute_group,
737 	NULL,
738 };
739 
740 static void idxd_conf_group_release(struct device *dev)
741 {
742 	struct idxd_group *group = container_of(dev, struct idxd_group, conf_dev);
743 
744 	kfree(group);
745 }
746 
747 struct device_type idxd_group_device_type = {
748 	.name = "group",
749 	.release = idxd_conf_group_release,
750 	.groups = idxd_group_attribute_groups,
751 };
752 
753 /* IDXD work queue attribs */
754 static ssize_t wq_clients_show(struct device *dev,
755 			       struct device_attribute *attr, char *buf)
756 {
757 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
758 
759 	return sysfs_emit(buf, "%d\n", wq->client_count);
760 }
761 
762 static struct device_attribute dev_attr_wq_clients =
763 		__ATTR(clients, 0444, wq_clients_show, NULL);
764 
765 static ssize_t wq_state_show(struct device *dev,
766 			     struct device_attribute *attr, char *buf)
767 {
768 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
769 
770 	switch (wq->state) {
771 	case IDXD_WQ_DISABLED:
772 		return sysfs_emit(buf, "disabled\n");
773 	case IDXD_WQ_ENABLED:
774 		return sysfs_emit(buf, "enabled\n");
775 	}
776 
777 	return sysfs_emit(buf, "unknown\n");
778 }
779 
780 static struct device_attribute dev_attr_wq_state =
781 		__ATTR(state, 0444, wq_state_show, NULL);
782 
783 static ssize_t wq_group_id_show(struct device *dev,
784 				struct device_attribute *attr, char *buf)
785 {
786 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
787 
788 	if (wq->group)
789 		return sysfs_emit(buf, "%u\n", wq->group->id);
790 	else
791 		return sysfs_emit(buf, "-1\n");
792 }
793 
794 static ssize_t wq_group_id_store(struct device *dev,
795 				 struct device_attribute *attr,
796 				 const char *buf, size_t count)
797 {
798 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
799 	struct idxd_device *idxd = wq->idxd;
800 	long id;
801 	int rc;
802 	struct idxd_group *prevg, *group;
803 
804 	rc = kstrtol(buf, 10, &id);
805 	if (rc < 0)
806 		return -EINVAL;
807 
808 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
809 		return -EPERM;
810 
811 	if (wq->state != IDXD_WQ_DISABLED)
812 		return -EPERM;
813 
814 	if (id > idxd->max_groups - 1 || id < -1)
815 		return -EINVAL;
816 
817 	if (id == -1) {
818 		if (wq->group) {
819 			wq->group->num_wqs--;
820 			wq->group = NULL;
821 		}
822 		return count;
823 	}
824 
825 	group = idxd->groups[id];
826 	prevg = wq->group;
827 
828 	if (prevg)
829 		prevg->num_wqs--;
830 	wq->group = group;
831 	group->num_wqs++;
832 	return count;
833 }
834 
835 static struct device_attribute dev_attr_wq_group_id =
836 		__ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
837 
838 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
839 			    char *buf)
840 {
841 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
842 
843 	return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared");
844 }
845 
846 static ssize_t wq_mode_store(struct device *dev,
847 			     struct device_attribute *attr, const char *buf,
848 			     size_t count)
849 {
850 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
851 	struct idxd_device *idxd = wq->idxd;
852 
853 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
854 		return -EPERM;
855 
856 	if (wq->state != IDXD_WQ_DISABLED)
857 		return -EPERM;
858 
859 	if (sysfs_streq(buf, "dedicated")) {
860 		set_bit(WQ_FLAG_DEDICATED, &wq->flags);
861 		wq->threshold = 0;
862 	} else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
863 		clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
864 	} else {
865 		return -EINVAL;
866 	}
867 
868 	return count;
869 }
870 
871 static struct device_attribute dev_attr_wq_mode =
872 		__ATTR(mode, 0644, wq_mode_show, wq_mode_store);
873 
874 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
875 			    char *buf)
876 {
877 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
878 
879 	return sysfs_emit(buf, "%u\n", wq->size);
880 }
881 
882 static int total_claimed_wq_size(struct idxd_device *idxd)
883 {
884 	int i;
885 	int wq_size = 0;
886 
887 	for (i = 0; i < idxd->max_wqs; i++) {
888 		struct idxd_wq *wq = idxd->wqs[i];
889 
890 		wq_size += wq->size;
891 	}
892 
893 	return wq_size;
894 }
895 
896 static ssize_t wq_size_store(struct device *dev,
897 			     struct device_attribute *attr, const char *buf,
898 			     size_t count)
899 {
900 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
901 	unsigned long size;
902 	struct idxd_device *idxd = wq->idxd;
903 	int rc;
904 
905 	rc = kstrtoul(buf, 10, &size);
906 	if (rc < 0)
907 		return -EINVAL;
908 
909 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
910 		return -EPERM;
911 
912 	if (idxd->state == IDXD_DEV_ENABLED)
913 		return -EPERM;
914 
915 	if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
916 		return -EINVAL;
917 
918 	wq->size = size;
919 	return count;
920 }
921 
922 static struct device_attribute dev_attr_wq_size =
923 		__ATTR(size, 0644, wq_size_show, wq_size_store);
924 
925 static ssize_t wq_priority_show(struct device *dev,
926 				struct device_attribute *attr, char *buf)
927 {
928 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
929 
930 	return sysfs_emit(buf, "%u\n", wq->priority);
931 }
932 
933 static ssize_t wq_priority_store(struct device *dev,
934 				 struct device_attribute *attr,
935 				 const char *buf, size_t count)
936 {
937 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
938 	unsigned long prio;
939 	struct idxd_device *idxd = wq->idxd;
940 	int rc;
941 
942 	rc = kstrtoul(buf, 10, &prio);
943 	if (rc < 0)
944 		return -EINVAL;
945 
946 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
947 		return -EPERM;
948 
949 	if (wq->state != IDXD_WQ_DISABLED)
950 		return -EPERM;
951 
952 	if (prio > IDXD_MAX_PRIORITY)
953 		return -EINVAL;
954 
955 	wq->priority = prio;
956 	return count;
957 }
958 
959 static struct device_attribute dev_attr_wq_priority =
960 		__ATTR(priority, 0644, wq_priority_show, wq_priority_store);
961 
962 static ssize_t wq_block_on_fault_show(struct device *dev,
963 				      struct device_attribute *attr, char *buf)
964 {
965 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
966 
967 	return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
968 }
969 
970 static ssize_t wq_block_on_fault_store(struct device *dev,
971 				       struct device_attribute *attr,
972 				       const char *buf, size_t count)
973 {
974 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
975 	struct idxd_device *idxd = wq->idxd;
976 	bool bof;
977 	int rc;
978 
979 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
980 		return -EPERM;
981 
982 	if (wq->state != IDXD_WQ_DISABLED)
983 		return -ENXIO;
984 
985 	rc = kstrtobool(buf, &bof);
986 	if (rc < 0)
987 		return rc;
988 
989 	if (bof)
990 		set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
991 	else
992 		clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
993 
994 	return count;
995 }
996 
997 static struct device_attribute dev_attr_wq_block_on_fault =
998 		__ATTR(block_on_fault, 0644, wq_block_on_fault_show,
999 		       wq_block_on_fault_store);
1000 
1001 static ssize_t wq_threshold_show(struct device *dev,
1002 				 struct device_attribute *attr, char *buf)
1003 {
1004 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1005 
1006 	return sysfs_emit(buf, "%u\n", wq->threshold);
1007 }
1008 
1009 static ssize_t wq_threshold_store(struct device *dev,
1010 				  struct device_attribute *attr,
1011 				  const char *buf, size_t count)
1012 {
1013 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1014 	struct idxd_device *idxd = wq->idxd;
1015 	unsigned int val;
1016 	int rc;
1017 
1018 	rc = kstrtouint(buf, 0, &val);
1019 	if (rc < 0)
1020 		return -EINVAL;
1021 
1022 	if (val > wq->size || val <= 0)
1023 		return -EINVAL;
1024 
1025 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1026 		return -EPERM;
1027 
1028 	if (wq->state != IDXD_WQ_DISABLED)
1029 		return -ENXIO;
1030 
1031 	if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
1032 		return -EINVAL;
1033 
1034 	wq->threshold = val;
1035 
1036 	return count;
1037 }
1038 
1039 static struct device_attribute dev_attr_wq_threshold =
1040 		__ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
1041 
1042 static ssize_t wq_type_show(struct device *dev,
1043 			    struct device_attribute *attr, char *buf)
1044 {
1045 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1046 
1047 	switch (wq->type) {
1048 	case IDXD_WQT_KERNEL:
1049 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]);
1050 	case IDXD_WQT_USER:
1051 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]);
1052 	case IDXD_WQT_NONE:
1053 	default:
1054 		return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]);
1055 	}
1056 
1057 	return -EINVAL;
1058 }
1059 
1060 static ssize_t wq_type_store(struct device *dev,
1061 			     struct device_attribute *attr, const char *buf,
1062 			     size_t count)
1063 {
1064 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1065 	enum idxd_wq_type old_type;
1066 
1067 	if (wq->state != IDXD_WQ_DISABLED)
1068 		return -EPERM;
1069 
1070 	old_type = wq->type;
1071 	if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1072 		wq->type = IDXD_WQT_NONE;
1073 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
1074 		wq->type = IDXD_WQT_KERNEL;
1075 	else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1076 		wq->type = IDXD_WQT_USER;
1077 	else
1078 		return -EINVAL;
1079 
1080 	/* If we are changing queue type, clear the name */
1081 	if (wq->type != old_type)
1082 		memset(wq->name, 0, WQ_NAME_SIZE + 1);
1083 
1084 	return count;
1085 }
1086 
1087 static struct device_attribute dev_attr_wq_type =
1088 		__ATTR(type, 0644, wq_type_show, wq_type_store);
1089 
1090 static ssize_t wq_name_show(struct device *dev,
1091 			    struct device_attribute *attr, char *buf)
1092 {
1093 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1094 
1095 	return sysfs_emit(buf, "%s\n", wq->name);
1096 }
1097 
1098 static ssize_t wq_name_store(struct device *dev,
1099 			     struct device_attribute *attr, const char *buf,
1100 			     size_t count)
1101 {
1102 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1103 
1104 	if (wq->state != IDXD_WQ_DISABLED)
1105 		return -EPERM;
1106 
1107 	if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1108 		return -EINVAL;
1109 
1110 	/*
1111 	 * This is temporarily placed here until we have SVM support for
1112 	 * dmaengine.
1113 	 */
1114 	if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
1115 		return -EOPNOTSUPP;
1116 
1117 	memset(wq->name, 0, WQ_NAME_SIZE + 1);
1118 	strncpy(wq->name, buf, WQ_NAME_SIZE);
1119 	strreplace(wq->name, '\n', '\0');
1120 	return count;
1121 }
1122 
1123 static struct device_attribute dev_attr_wq_name =
1124 		__ATTR(name, 0644, wq_name_show, wq_name_store);
1125 
1126 static ssize_t wq_cdev_minor_show(struct device *dev,
1127 				  struct device_attribute *attr, char *buf)
1128 {
1129 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1130 	int minor = -1;
1131 
1132 	mutex_lock(&wq->wq_lock);
1133 	if (wq->idxd_cdev)
1134 		minor = wq->idxd_cdev->minor;
1135 	mutex_unlock(&wq->wq_lock);
1136 
1137 	if (minor == -1)
1138 		return -ENXIO;
1139 	return sysfs_emit(buf, "%d\n", minor);
1140 }
1141 
1142 static struct device_attribute dev_attr_wq_cdev_minor =
1143 		__ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1144 
1145 static int __get_sysfs_u64(const char *buf, u64 *val)
1146 {
1147 	int rc;
1148 
1149 	rc = kstrtou64(buf, 0, val);
1150 	if (rc < 0)
1151 		return -EINVAL;
1152 
1153 	if (*val == 0)
1154 		return -EINVAL;
1155 
1156 	*val = roundup_pow_of_two(*val);
1157 	return 0;
1158 }
1159 
1160 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr,
1161 					 char *buf)
1162 {
1163 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1164 
1165 	return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes);
1166 }
1167 
1168 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr,
1169 					  const char *buf, size_t count)
1170 {
1171 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1172 	struct idxd_device *idxd = wq->idxd;
1173 	u64 xfer_size;
1174 	int rc;
1175 
1176 	if (wq->state != IDXD_WQ_DISABLED)
1177 		return -EPERM;
1178 
1179 	rc = __get_sysfs_u64(buf, &xfer_size);
1180 	if (rc < 0)
1181 		return rc;
1182 
1183 	if (xfer_size > idxd->max_xfer_bytes)
1184 		return -EINVAL;
1185 
1186 	wq->max_xfer_bytes = xfer_size;
1187 
1188 	return count;
1189 }
1190 
1191 static struct device_attribute dev_attr_wq_max_transfer_size =
1192 		__ATTR(max_transfer_size, 0644,
1193 		       wq_max_transfer_size_show, wq_max_transfer_size_store);
1194 
1195 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf)
1196 {
1197 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1198 
1199 	return sysfs_emit(buf, "%u\n", wq->max_batch_size);
1200 }
1201 
1202 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr,
1203 				       const char *buf, size_t count)
1204 {
1205 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1206 	struct idxd_device *idxd = wq->idxd;
1207 	u64 batch_size;
1208 	int rc;
1209 
1210 	if (wq->state != IDXD_WQ_DISABLED)
1211 		return -EPERM;
1212 
1213 	rc = __get_sysfs_u64(buf, &batch_size);
1214 	if (rc < 0)
1215 		return rc;
1216 
1217 	if (batch_size > idxd->max_batch_size)
1218 		return -EINVAL;
1219 
1220 	wq->max_batch_size = (u32)batch_size;
1221 
1222 	return count;
1223 }
1224 
1225 static struct device_attribute dev_attr_wq_max_batch_size =
1226 		__ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
1227 
1228 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
1229 {
1230 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1231 
1232 	return sysfs_emit(buf, "%u\n", wq->ats_dis);
1233 }
1234 
1235 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
1236 				    const char *buf, size_t count)
1237 {
1238 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1239 	struct idxd_device *idxd = wq->idxd;
1240 	bool ats_dis;
1241 	int rc;
1242 
1243 	if (wq->state != IDXD_WQ_DISABLED)
1244 		return -EPERM;
1245 
1246 	if (!idxd->hw.wq_cap.wq_ats_support)
1247 		return -EOPNOTSUPP;
1248 
1249 	rc = kstrtobool(buf, &ats_dis);
1250 	if (rc < 0)
1251 		return rc;
1252 
1253 	wq->ats_dis = ats_dis;
1254 
1255 	return count;
1256 }
1257 
1258 static struct device_attribute dev_attr_wq_ats_disable =
1259 		__ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
1260 
1261 static struct attribute *idxd_wq_attributes[] = {
1262 	&dev_attr_wq_clients.attr,
1263 	&dev_attr_wq_state.attr,
1264 	&dev_attr_wq_group_id.attr,
1265 	&dev_attr_wq_mode.attr,
1266 	&dev_attr_wq_size.attr,
1267 	&dev_attr_wq_priority.attr,
1268 	&dev_attr_wq_block_on_fault.attr,
1269 	&dev_attr_wq_threshold.attr,
1270 	&dev_attr_wq_type.attr,
1271 	&dev_attr_wq_name.attr,
1272 	&dev_attr_wq_cdev_minor.attr,
1273 	&dev_attr_wq_max_transfer_size.attr,
1274 	&dev_attr_wq_max_batch_size.attr,
1275 	&dev_attr_wq_ats_disable.attr,
1276 	NULL,
1277 };
1278 
1279 static const struct attribute_group idxd_wq_attribute_group = {
1280 	.attrs = idxd_wq_attributes,
1281 };
1282 
1283 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1284 	&idxd_wq_attribute_group,
1285 	NULL,
1286 };
1287 
1288 static void idxd_conf_wq_release(struct device *dev)
1289 {
1290 	struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1291 
1292 	kfree(wq->wqcfg);
1293 	kfree(wq);
1294 }
1295 
1296 struct device_type idxd_wq_device_type = {
1297 	.name = "wq",
1298 	.release = idxd_conf_wq_release,
1299 	.groups = idxd_wq_attribute_groups,
1300 };
1301 
1302 /* IDXD device attribs */
1303 static ssize_t version_show(struct device *dev, struct device_attribute *attr,
1304 			    char *buf)
1305 {
1306 	struct idxd_device *idxd =
1307 		container_of(dev, struct idxd_device, conf_dev);
1308 
1309 	return sysfs_emit(buf, "%#x\n", idxd->hw.version);
1310 }
1311 static DEVICE_ATTR_RO(version);
1312 
1313 static ssize_t max_work_queues_size_show(struct device *dev,
1314 					 struct device_attribute *attr,
1315 					 char *buf)
1316 {
1317 	struct idxd_device *idxd =
1318 		container_of(dev, struct idxd_device, conf_dev);
1319 
1320 	return sysfs_emit(buf, "%u\n", idxd->max_wq_size);
1321 }
1322 static DEVICE_ATTR_RO(max_work_queues_size);
1323 
1324 static ssize_t max_groups_show(struct device *dev,
1325 			       struct device_attribute *attr, char *buf)
1326 {
1327 	struct idxd_device *idxd =
1328 		container_of(dev, struct idxd_device, conf_dev);
1329 
1330 	return sysfs_emit(buf, "%u\n", idxd->max_groups);
1331 }
1332 static DEVICE_ATTR_RO(max_groups);
1333 
1334 static ssize_t max_work_queues_show(struct device *dev,
1335 				    struct device_attribute *attr, char *buf)
1336 {
1337 	struct idxd_device *idxd =
1338 		container_of(dev, struct idxd_device, conf_dev);
1339 
1340 	return sysfs_emit(buf, "%u\n", idxd->max_wqs);
1341 }
1342 static DEVICE_ATTR_RO(max_work_queues);
1343 
1344 static ssize_t max_engines_show(struct device *dev,
1345 				struct device_attribute *attr, char *buf)
1346 {
1347 	struct idxd_device *idxd =
1348 		container_of(dev, struct idxd_device, conf_dev);
1349 
1350 	return sysfs_emit(buf, "%u\n", idxd->max_engines);
1351 }
1352 static DEVICE_ATTR_RO(max_engines);
1353 
1354 static ssize_t numa_node_show(struct device *dev,
1355 			      struct device_attribute *attr, char *buf)
1356 {
1357 	struct idxd_device *idxd =
1358 		container_of(dev, struct idxd_device, conf_dev);
1359 
1360 	return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1361 }
1362 static DEVICE_ATTR_RO(numa_node);
1363 
1364 static ssize_t max_batch_size_show(struct device *dev,
1365 				   struct device_attribute *attr, char *buf)
1366 {
1367 	struct idxd_device *idxd =
1368 		container_of(dev, struct idxd_device, conf_dev);
1369 
1370 	return sysfs_emit(buf, "%u\n", idxd->max_batch_size);
1371 }
1372 static DEVICE_ATTR_RO(max_batch_size);
1373 
1374 static ssize_t max_transfer_size_show(struct device *dev,
1375 				      struct device_attribute *attr,
1376 				      char *buf)
1377 {
1378 	struct idxd_device *idxd =
1379 		container_of(dev, struct idxd_device, conf_dev);
1380 
1381 	return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes);
1382 }
1383 static DEVICE_ATTR_RO(max_transfer_size);
1384 
1385 static ssize_t op_cap_show(struct device *dev,
1386 			   struct device_attribute *attr, char *buf)
1387 {
1388 	struct idxd_device *idxd =
1389 		container_of(dev, struct idxd_device, conf_dev);
1390 	int i, rc = 0;
1391 
1392 	for (i = 0; i < 4; i++)
1393 		rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
1394 
1395 	rc--;
1396 	rc += sysfs_emit_at(buf, rc, "\n");
1397 	return rc;
1398 }
1399 static DEVICE_ATTR_RO(op_cap);
1400 
1401 static ssize_t gen_cap_show(struct device *dev,
1402 			    struct device_attribute *attr, char *buf)
1403 {
1404 	struct idxd_device *idxd =
1405 		container_of(dev, struct idxd_device, conf_dev);
1406 
1407 	return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1408 }
1409 static DEVICE_ATTR_RO(gen_cap);
1410 
1411 static ssize_t configurable_show(struct device *dev,
1412 				 struct device_attribute *attr, char *buf)
1413 {
1414 	struct idxd_device *idxd =
1415 		container_of(dev, struct idxd_device, conf_dev);
1416 
1417 	return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1418 }
1419 static DEVICE_ATTR_RO(configurable);
1420 
1421 static ssize_t clients_show(struct device *dev,
1422 			    struct device_attribute *attr, char *buf)
1423 {
1424 	struct idxd_device *idxd =
1425 		container_of(dev, struct idxd_device, conf_dev);
1426 	unsigned long flags;
1427 	int count = 0, i;
1428 
1429 	spin_lock_irqsave(&idxd->dev_lock, flags);
1430 	for (i = 0; i < idxd->max_wqs; i++) {
1431 		struct idxd_wq *wq = idxd->wqs[i];
1432 
1433 		count += wq->client_count;
1434 	}
1435 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1436 
1437 	return sysfs_emit(buf, "%d\n", count);
1438 }
1439 static DEVICE_ATTR_RO(clients);
1440 
1441 static ssize_t pasid_enabled_show(struct device *dev,
1442 				  struct device_attribute *attr, char *buf)
1443 {
1444 	struct idxd_device *idxd =
1445 		container_of(dev, struct idxd_device, conf_dev);
1446 
1447 	return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
1448 }
1449 static DEVICE_ATTR_RO(pasid_enabled);
1450 
1451 static ssize_t state_show(struct device *dev,
1452 			  struct device_attribute *attr, char *buf)
1453 {
1454 	struct idxd_device *idxd =
1455 		container_of(dev, struct idxd_device, conf_dev);
1456 
1457 	switch (idxd->state) {
1458 	case IDXD_DEV_DISABLED:
1459 	case IDXD_DEV_CONF_READY:
1460 		return sysfs_emit(buf, "disabled\n");
1461 	case IDXD_DEV_ENABLED:
1462 		return sysfs_emit(buf, "enabled\n");
1463 	case IDXD_DEV_HALTED:
1464 		return sysfs_emit(buf, "halted\n");
1465 	}
1466 
1467 	return sysfs_emit(buf, "unknown\n");
1468 }
1469 static DEVICE_ATTR_RO(state);
1470 
1471 static ssize_t errors_show(struct device *dev,
1472 			   struct device_attribute *attr, char *buf)
1473 {
1474 	struct idxd_device *idxd =
1475 		container_of(dev, struct idxd_device, conf_dev);
1476 	int i, out = 0;
1477 	unsigned long flags;
1478 
1479 	spin_lock_irqsave(&idxd->dev_lock, flags);
1480 	for (i = 0; i < 4; i++)
1481 		out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
1482 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
1483 	out--;
1484 	out += sysfs_emit_at(buf, out, "\n");
1485 	return out;
1486 }
1487 static DEVICE_ATTR_RO(errors);
1488 
1489 static ssize_t max_tokens_show(struct device *dev,
1490 			       struct device_attribute *attr, char *buf)
1491 {
1492 	struct idxd_device *idxd =
1493 		container_of(dev, struct idxd_device, conf_dev);
1494 
1495 	return sysfs_emit(buf, "%u\n", idxd->max_tokens);
1496 }
1497 static DEVICE_ATTR_RO(max_tokens);
1498 
1499 static ssize_t token_limit_show(struct device *dev,
1500 				struct device_attribute *attr, char *buf)
1501 {
1502 	struct idxd_device *idxd =
1503 		container_of(dev, struct idxd_device, conf_dev);
1504 
1505 	return sysfs_emit(buf, "%u\n", idxd->token_limit);
1506 }
1507 
1508 static ssize_t token_limit_store(struct device *dev,
1509 				 struct device_attribute *attr,
1510 				 const char *buf, size_t count)
1511 {
1512 	struct idxd_device *idxd =
1513 		container_of(dev, struct idxd_device, conf_dev);
1514 	unsigned long val;
1515 	int rc;
1516 
1517 	rc = kstrtoul(buf, 10, &val);
1518 	if (rc < 0)
1519 		return -EINVAL;
1520 
1521 	if (idxd->state == IDXD_DEV_ENABLED)
1522 		return -EPERM;
1523 
1524 	if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1525 		return -EPERM;
1526 
1527 	if (!idxd->hw.group_cap.token_limit)
1528 		return -EPERM;
1529 
1530 	if (val > idxd->hw.group_cap.total_tokens)
1531 		return -EINVAL;
1532 
1533 	idxd->token_limit = val;
1534 	return count;
1535 }
1536 static DEVICE_ATTR_RW(token_limit);
1537 
1538 static ssize_t cdev_major_show(struct device *dev,
1539 			       struct device_attribute *attr, char *buf)
1540 {
1541 	struct idxd_device *idxd =
1542 		container_of(dev, struct idxd_device, conf_dev);
1543 
1544 	return sysfs_emit(buf, "%u\n", idxd->major);
1545 }
1546 static DEVICE_ATTR_RO(cdev_major);
1547 
1548 static ssize_t cmd_status_show(struct device *dev,
1549 			       struct device_attribute *attr, char *buf)
1550 {
1551 	struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1552 
1553 	return sysfs_emit(buf, "%#x\n", idxd->cmd_status);
1554 }
1555 static DEVICE_ATTR_RO(cmd_status);
1556 
1557 static struct attribute *idxd_device_attributes[] = {
1558 	&dev_attr_version.attr,
1559 	&dev_attr_max_groups.attr,
1560 	&dev_attr_max_work_queues.attr,
1561 	&dev_attr_max_work_queues_size.attr,
1562 	&dev_attr_max_engines.attr,
1563 	&dev_attr_numa_node.attr,
1564 	&dev_attr_max_batch_size.attr,
1565 	&dev_attr_max_transfer_size.attr,
1566 	&dev_attr_op_cap.attr,
1567 	&dev_attr_gen_cap.attr,
1568 	&dev_attr_configurable.attr,
1569 	&dev_attr_clients.attr,
1570 	&dev_attr_pasid_enabled.attr,
1571 	&dev_attr_state.attr,
1572 	&dev_attr_errors.attr,
1573 	&dev_attr_max_tokens.attr,
1574 	&dev_attr_token_limit.attr,
1575 	&dev_attr_cdev_major.attr,
1576 	&dev_attr_cmd_status.attr,
1577 	NULL,
1578 };
1579 
1580 static const struct attribute_group idxd_device_attribute_group = {
1581 	.attrs = idxd_device_attributes,
1582 };
1583 
1584 static const struct attribute_group *idxd_attribute_groups[] = {
1585 	&idxd_device_attribute_group,
1586 	NULL,
1587 };
1588 
1589 static void idxd_conf_device_release(struct device *dev)
1590 {
1591 	struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev);
1592 
1593 	kfree(idxd->groups);
1594 	kfree(idxd->wqs);
1595 	kfree(idxd->engines);
1596 	kfree(idxd->irq_entries);
1597 	kfree(idxd->int_handles);
1598 	ida_free(&idxd_ida, idxd->id);
1599 	kfree(idxd);
1600 }
1601 
1602 struct device_type dsa_device_type = {
1603 	.name = "dsa",
1604 	.release = idxd_conf_device_release,
1605 	.groups = idxd_attribute_groups,
1606 };
1607 
1608 struct device_type iax_device_type = {
1609 	.name = "iax",
1610 	.release = idxd_conf_device_release,
1611 	.groups = idxd_attribute_groups,
1612 };
1613 
1614 static int idxd_register_engine_devices(struct idxd_device *idxd)
1615 {
1616 	int i, j, rc;
1617 
1618 	for (i = 0; i < idxd->max_engines; i++) {
1619 		struct idxd_engine *engine = idxd->engines[i];
1620 
1621 		rc = device_add(&engine->conf_dev);
1622 		if (rc < 0)
1623 			goto cleanup;
1624 	}
1625 
1626 	return 0;
1627 
1628 cleanup:
1629 	j = i - 1;
1630 	for (; i < idxd->max_engines; i++)
1631 		put_device(&idxd->engines[i]->conf_dev);
1632 
1633 	while (j--)
1634 		device_unregister(&idxd->engines[j]->conf_dev);
1635 	return rc;
1636 }
1637 
1638 static int idxd_register_group_devices(struct idxd_device *idxd)
1639 {
1640 	int i, j, rc;
1641 
1642 	for (i = 0; i < idxd->max_groups; i++) {
1643 		struct idxd_group *group = idxd->groups[i];
1644 
1645 		rc = device_add(&group->conf_dev);
1646 		if (rc < 0)
1647 			goto cleanup;
1648 	}
1649 
1650 	return 0;
1651 
1652 cleanup:
1653 	j = i - 1;
1654 	for (; i < idxd->max_groups; i++)
1655 		put_device(&idxd->groups[i]->conf_dev);
1656 
1657 	while (j--)
1658 		device_unregister(&idxd->groups[j]->conf_dev);
1659 	return rc;
1660 }
1661 
1662 static int idxd_register_wq_devices(struct idxd_device *idxd)
1663 {
1664 	int i, rc, j;
1665 
1666 	for (i = 0; i < idxd->max_wqs; i++) {
1667 		struct idxd_wq *wq = idxd->wqs[i];
1668 
1669 		rc = device_add(&wq->conf_dev);
1670 		if (rc < 0)
1671 			goto cleanup;
1672 	}
1673 
1674 	return 0;
1675 
1676 cleanup:
1677 	j = i - 1;
1678 	for (; i < idxd->max_wqs; i++)
1679 		put_device(&idxd->wqs[i]->conf_dev);
1680 
1681 	while (j--)
1682 		device_unregister(&idxd->wqs[j]->conf_dev);
1683 	return rc;
1684 }
1685 
1686 int idxd_register_devices(struct idxd_device *idxd)
1687 {
1688 	struct device *dev = &idxd->pdev->dev;
1689 	int rc, i;
1690 
1691 	rc = device_add(&idxd->conf_dev);
1692 	if (rc < 0)
1693 		return rc;
1694 
1695 	rc = idxd_register_wq_devices(idxd);
1696 	if (rc < 0) {
1697 		dev_dbg(dev, "WQ devices registering failed: %d\n", rc);
1698 		goto err_wq;
1699 	}
1700 
1701 	rc = idxd_register_engine_devices(idxd);
1702 	if (rc < 0) {
1703 		dev_dbg(dev, "Engine devices registering failed: %d\n", rc);
1704 		goto err_engine;
1705 	}
1706 
1707 	rc = idxd_register_group_devices(idxd);
1708 	if (rc < 0) {
1709 		dev_dbg(dev, "Group device registering failed: %d\n", rc);
1710 		goto err_group;
1711 	}
1712 
1713 	return 0;
1714 
1715  err_group:
1716 	for (i = 0; i < idxd->max_engines; i++)
1717 		device_unregister(&idxd->engines[i]->conf_dev);
1718  err_engine:
1719 	for (i = 0; i < idxd->max_wqs; i++)
1720 		device_unregister(&idxd->wqs[i]->conf_dev);
1721  err_wq:
1722 	device_del(&idxd->conf_dev);
1723 	return rc;
1724 }
1725 
1726 void idxd_unregister_devices(struct idxd_device *idxd)
1727 {
1728 	int i;
1729 
1730 	for (i = 0; i < idxd->max_wqs; i++) {
1731 		struct idxd_wq *wq = idxd->wqs[i];
1732 
1733 		device_unregister(&wq->conf_dev);
1734 	}
1735 
1736 	for (i = 0; i < idxd->max_engines; i++) {
1737 		struct idxd_engine *engine = idxd->engines[i];
1738 
1739 		device_unregister(&engine->conf_dev);
1740 	}
1741 
1742 	for (i = 0; i < idxd->max_groups; i++) {
1743 		struct idxd_group *group = idxd->groups[i];
1744 
1745 		device_unregister(&group->conf_dev);
1746 	}
1747 
1748 	device_unregister(&idxd->conf_dev);
1749 }
1750 
1751 int idxd_register_bus_type(void)
1752 {
1753 	return bus_register(&dsa_bus_type);
1754 }
1755 
1756 void idxd_unregister_bus_type(void)
1757 {
1758 	bus_unregister(&dsa_bus_type);
1759 }
1760