1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/device.h> 8 #include <linux/io-64-nonatomic-lo-hi.h> 9 #include <uapi/linux/idxd.h> 10 #include "registers.h" 11 #include "idxd.h" 12 13 static char *idxd_wq_type_names[] = { 14 [IDXD_WQT_NONE] = "none", 15 [IDXD_WQT_KERNEL] = "kernel", 16 [IDXD_WQT_USER] = "user", 17 }; 18 19 static void idxd_conf_sub_device_release(struct device *dev) 20 { 21 dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev)); 22 } 23 24 static struct device_type idxd_group_device_type = { 25 .name = "group", 26 .release = idxd_conf_sub_device_release, 27 }; 28 29 static struct device_type idxd_engine_device_type = { 30 .name = "engine", 31 .release = idxd_conf_sub_device_release, 32 }; 33 34 static int idxd_config_bus_match(struct device *dev, 35 struct device_driver *drv) 36 { 37 int matched = 0; 38 39 if (is_idxd_dev(dev)) { 40 struct idxd_device *idxd = confdev_to_idxd(dev); 41 42 if (idxd->state != IDXD_DEV_CONF_READY) 43 return 0; 44 matched = 1; 45 } else if (is_idxd_wq_dev(dev)) { 46 struct idxd_wq *wq = confdev_to_wq(dev); 47 struct idxd_device *idxd = wq->idxd; 48 49 if (idxd->state < IDXD_DEV_CONF_READY) 50 return 0; 51 52 if (wq->state != IDXD_WQ_DISABLED) { 53 dev_dbg(dev, "%s not disabled\n", dev_name(dev)); 54 return 0; 55 } 56 matched = 1; 57 } 58 59 if (matched) 60 dev_dbg(dev, "%s matched\n", dev_name(dev)); 61 62 return matched; 63 } 64 65 static int idxd_config_bus_probe(struct device *dev) 66 { 67 int rc; 68 unsigned long flags; 69 70 dev_dbg(dev, "%s called\n", __func__); 71 72 if (is_idxd_dev(dev)) { 73 struct idxd_device *idxd = confdev_to_idxd(dev); 74 75 if (idxd->state != IDXD_DEV_CONF_READY) { 76 dev_warn(dev, "Device not ready for config\n"); 77 return -EBUSY; 78 } 79 80 if (!try_module_get(THIS_MODULE)) 81 return -ENXIO; 82 83 /* Perform IDXD configuration and enabling */ 84 spin_lock_irqsave(&idxd->dev_lock, flags); 85 rc = idxd_device_config(idxd); 86 spin_unlock_irqrestore(&idxd->dev_lock, flags); 87 if (rc < 0) { 88 module_put(THIS_MODULE); 89 dev_warn(dev, "Device config failed: %d\n", rc); 90 return rc; 91 } 92 93 /* start device */ 94 rc = idxd_device_enable(idxd); 95 if (rc < 0) { 96 module_put(THIS_MODULE); 97 dev_warn(dev, "Device enable failed: %d\n", rc); 98 return rc; 99 } 100 101 dev_info(dev, "Device %s enabled\n", dev_name(dev)); 102 103 rc = idxd_register_dma_device(idxd); 104 if (rc < 0) { 105 module_put(THIS_MODULE); 106 dev_dbg(dev, "Failed to register dmaengine device\n"); 107 return rc; 108 } 109 return 0; 110 } else if (is_idxd_wq_dev(dev)) { 111 struct idxd_wq *wq = confdev_to_wq(dev); 112 struct idxd_device *idxd = wq->idxd; 113 114 mutex_lock(&wq->wq_lock); 115 116 if (idxd->state != IDXD_DEV_ENABLED) { 117 mutex_unlock(&wq->wq_lock); 118 dev_warn(dev, "Enabling while device not enabled.\n"); 119 return -EPERM; 120 } 121 122 if (wq->state != IDXD_WQ_DISABLED) { 123 mutex_unlock(&wq->wq_lock); 124 dev_warn(dev, "WQ %d already enabled.\n", wq->id); 125 return -EBUSY; 126 } 127 128 if (!wq->group) { 129 mutex_unlock(&wq->wq_lock); 130 dev_warn(dev, "WQ not attached to group.\n"); 131 return -EINVAL; 132 } 133 134 if (strlen(wq->name) == 0) { 135 mutex_unlock(&wq->wq_lock); 136 dev_warn(dev, "WQ name not set.\n"); 137 return -EINVAL; 138 } 139 140 /* Shared WQ checks */ 141 if (wq_shared(wq)) { 142 if (!device_swq_supported(idxd)) { 143 dev_warn(dev, 144 "PASID not enabled and shared WQ.\n"); 145 mutex_unlock(&wq->wq_lock); 146 return -ENXIO; 147 } 148 /* 149 * Shared wq with the threshold set to 0 means the user 150 * did not set the threshold or transitioned from a 151 * dedicated wq but did not set threshold. A value 152 * of 0 would effectively disable the shared wq. The 153 * driver does not allow a value of 0 to be set for 154 * threshold via sysfs. 155 */ 156 if (wq->threshold == 0) { 157 dev_warn(dev, 158 "Shared WQ and threshold 0.\n"); 159 mutex_unlock(&wq->wq_lock); 160 return -EINVAL; 161 } 162 } 163 164 rc = idxd_wq_alloc_resources(wq); 165 if (rc < 0) { 166 mutex_unlock(&wq->wq_lock); 167 dev_warn(dev, "WQ resource alloc failed\n"); 168 return rc; 169 } 170 171 spin_lock_irqsave(&idxd->dev_lock, flags); 172 rc = idxd_device_config(idxd); 173 spin_unlock_irqrestore(&idxd->dev_lock, flags); 174 if (rc < 0) { 175 mutex_unlock(&wq->wq_lock); 176 dev_warn(dev, "Writing WQ %d config failed: %d\n", 177 wq->id, rc); 178 return rc; 179 } 180 181 rc = idxd_wq_enable(wq); 182 if (rc < 0) { 183 mutex_unlock(&wq->wq_lock); 184 dev_warn(dev, "WQ %d enabling failed: %d\n", 185 wq->id, rc); 186 return rc; 187 } 188 189 rc = idxd_wq_map_portal(wq); 190 if (rc < 0) { 191 dev_warn(dev, "wq portal mapping failed: %d\n", rc); 192 rc = idxd_wq_disable(wq); 193 if (rc < 0) 194 dev_warn(dev, "IDXD wq disable failed\n"); 195 mutex_unlock(&wq->wq_lock); 196 return rc; 197 } 198 199 wq->client_count = 0; 200 201 dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev)); 202 203 if (is_idxd_wq_dmaengine(wq)) { 204 rc = idxd_register_dma_channel(wq); 205 if (rc < 0) { 206 dev_dbg(dev, "DMA channel register failed\n"); 207 mutex_unlock(&wq->wq_lock); 208 return rc; 209 } 210 } else if (is_idxd_wq_cdev(wq)) { 211 rc = idxd_wq_add_cdev(wq); 212 if (rc < 0) { 213 dev_dbg(dev, "Cdev creation failed\n"); 214 mutex_unlock(&wq->wq_lock); 215 return rc; 216 } 217 } 218 219 mutex_unlock(&wq->wq_lock); 220 return 0; 221 } 222 223 return -ENODEV; 224 } 225 226 static void disable_wq(struct idxd_wq *wq) 227 { 228 struct idxd_device *idxd = wq->idxd; 229 struct device *dev = &idxd->pdev->dev; 230 231 mutex_lock(&wq->wq_lock); 232 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev)); 233 if (wq->state == IDXD_WQ_DISABLED) { 234 mutex_unlock(&wq->wq_lock); 235 return; 236 } 237 238 if (is_idxd_wq_dmaengine(wq)) 239 idxd_unregister_dma_channel(wq); 240 else if (is_idxd_wq_cdev(wq)) 241 idxd_wq_del_cdev(wq); 242 243 if (idxd_wq_refcount(wq)) 244 dev_warn(dev, "Clients has claim on wq %d: %d\n", 245 wq->id, idxd_wq_refcount(wq)); 246 247 idxd_wq_unmap_portal(wq); 248 249 idxd_wq_drain(wq); 250 idxd_wq_reset(wq); 251 252 idxd_wq_free_resources(wq); 253 wq->client_count = 0; 254 mutex_unlock(&wq->wq_lock); 255 256 dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev)); 257 } 258 259 static int idxd_config_bus_remove(struct device *dev) 260 { 261 int rc; 262 263 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev)); 264 265 /* disable workqueue here */ 266 if (is_idxd_wq_dev(dev)) { 267 struct idxd_wq *wq = confdev_to_wq(dev); 268 269 disable_wq(wq); 270 } else if (is_idxd_dev(dev)) { 271 struct idxd_device *idxd = confdev_to_idxd(dev); 272 int i; 273 274 dev_dbg(dev, "%s removing dev %s\n", __func__, 275 dev_name(&idxd->conf_dev)); 276 for (i = 0; i < idxd->max_wqs; i++) { 277 struct idxd_wq *wq = idxd->wqs[i]; 278 279 if (wq->state == IDXD_WQ_DISABLED) 280 continue; 281 dev_warn(dev, "Active wq %d on disable %s.\n", i, 282 dev_name(&idxd->conf_dev)); 283 device_release_driver(&wq->conf_dev); 284 } 285 286 idxd_unregister_dma_device(idxd); 287 rc = idxd_device_disable(idxd); 288 for (i = 0; i < idxd->max_wqs; i++) { 289 struct idxd_wq *wq = idxd->wqs[i]; 290 291 mutex_lock(&wq->wq_lock); 292 idxd_wq_disable_cleanup(wq); 293 mutex_unlock(&wq->wq_lock); 294 } 295 module_put(THIS_MODULE); 296 if (rc < 0) 297 dev_warn(dev, "Device disable failed\n"); 298 else 299 dev_info(dev, "Device %s disabled\n", dev_name(dev)); 300 301 } 302 303 return 0; 304 } 305 306 static void idxd_config_bus_shutdown(struct device *dev) 307 { 308 dev_dbg(dev, "%s called\n", __func__); 309 } 310 311 struct bus_type dsa_bus_type = { 312 .name = "dsa", 313 .match = idxd_config_bus_match, 314 .probe = idxd_config_bus_probe, 315 .remove = idxd_config_bus_remove, 316 .shutdown = idxd_config_bus_shutdown, 317 }; 318 319 struct bus_type iax_bus_type = { 320 .name = "iax", 321 .match = idxd_config_bus_match, 322 .probe = idxd_config_bus_probe, 323 .remove = idxd_config_bus_remove, 324 .shutdown = idxd_config_bus_shutdown, 325 }; 326 327 static struct bus_type *idxd_bus_types[] = { 328 &dsa_bus_type, 329 &iax_bus_type 330 }; 331 332 static struct idxd_device_driver dsa_drv = { 333 .drv = { 334 .name = "dsa", 335 .bus = &dsa_bus_type, 336 .owner = THIS_MODULE, 337 .mod_name = KBUILD_MODNAME, 338 }, 339 }; 340 341 static struct idxd_device_driver iax_drv = { 342 .drv = { 343 .name = "iax", 344 .bus = &iax_bus_type, 345 .owner = THIS_MODULE, 346 .mod_name = KBUILD_MODNAME, 347 }, 348 }; 349 350 static struct idxd_device_driver *idxd_drvs[] = { 351 &dsa_drv, 352 &iax_drv 353 }; 354 355 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd) 356 { 357 return idxd_bus_types[idxd->type]; 358 } 359 360 struct device_type *idxd_get_device_type(struct idxd_device *idxd) 361 { 362 if (idxd->type == IDXD_TYPE_DSA) 363 return &dsa_device_type; 364 else if (idxd->type == IDXD_TYPE_IAX) 365 return &iax_device_type; 366 else 367 return NULL; 368 } 369 370 /* IDXD generic driver setup */ 371 int idxd_register_driver(void) 372 { 373 int i, rc; 374 375 for (i = 0; i < IDXD_TYPE_MAX; i++) { 376 rc = driver_register(&idxd_drvs[i]->drv); 377 if (rc < 0) 378 goto drv_fail; 379 } 380 381 return 0; 382 383 drv_fail: 384 while (--i >= 0) 385 driver_unregister(&idxd_drvs[i]->drv); 386 return rc; 387 } 388 389 void idxd_unregister_driver(void) 390 { 391 int i; 392 393 for (i = 0; i < IDXD_TYPE_MAX; i++) 394 driver_unregister(&idxd_drvs[i]->drv); 395 } 396 397 /* IDXD engine attributes */ 398 static ssize_t engine_group_id_show(struct device *dev, 399 struct device_attribute *attr, char *buf) 400 { 401 struct idxd_engine *engine = 402 container_of(dev, struct idxd_engine, conf_dev); 403 404 if (engine->group) 405 return sprintf(buf, "%d\n", engine->group->id); 406 else 407 return sprintf(buf, "%d\n", -1); 408 } 409 410 static ssize_t engine_group_id_store(struct device *dev, 411 struct device_attribute *attr, 412 const char *buf, size_t count) 413 { 414 struct idxd_engine *engine = 415 container_of(dev, struct idxd_engine, conf_dev); 416 struct idxd_device *idxd = engine->idxd; 417 long id; 418 int rc; 419 struct idxd_group *prevg; 420 421 rc = kstrtol(buf, 10, &id); 422 if (rc < 0) 423 return -EINVAL; 424 425 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 426 return -EPERM; 427 428 if (id > idxd->max_groups - 1 || id < -1) 429 return -EINVAL; 430 431 if (id == -1) { 432 if (engine->group) { 433 engine->group->num_engines--; 434 engine->group = NULL; 435 } 436 return count; 437 } 438 439 prevg = engine->group; 440 441 if (prevg) 442 prevg->num_engines--; 443 engine->group = &idxd->groups[id]; 444 engine->group->num_engines++; 445 446 return count; 447 } 448 449 static struct device_attribute dev_attr_engine_group = 450 __ATTR(group_id, 0644, engine_group_id_show, 451 engine_group_id_store); 452 453 static struct attribute *idxd_engine_attributes[] = { 454 &dev_attr_engine_group.attr, 455 NULL, 456 }; 457 458 static const struct attribute_group idxd_engine_attribute_group = { 459 .attrs = idxd_engine_attributes, 460 }; 461 462 static const struct attribute_group *idxd_engine_attribute_groups[] = { 463 &idxd_engine_attribute_group, 464 NULL, 465 }; 466 467 /* Group attributes */ 468 469 static void idxd_set_free_tokens(struct idxd_device *idxd) 470 { 471 int i, tokens; 472 473 for (i = 0, tokens = 0; i < idxd->max_groups; i++) { 474 struct idxd_group *g = &idxd->groups[i]; 475 476 tokens += g->tokens_reserved; 477 } 478 479 idxd->nr_tokens = idxd->max_tokens - tokens; 480 } 481 482 static ssize_t group_tokens_reserved_show(struct device *dev, 483 struct device_attribute *attr, 484 char *buf) 485 { 486 struct idxd_group *group = 487 container_of(dev, struct idxd_group, conf_dev); 488 489 return sprintf(buf, "%u\n", group->tokens_reserved); 490 } 491 492 static ssize_t group_tokens_reserved_store(struct device *dev, 493 struct device_attribute *attr, 494 const char *buf, size_t count) 495 { 496 struct idxd_group *group = 497 container_of(dev, struct idxd_group, conf_dev); 498 struct idxd_device *idxd = group->idxd; 499 unsigned long val; 500 int rc; 501 502 rc = kstrtoul(buf, 10, &val); 503 if (rc < 0) 504 return -EINVAL; 505 506 if (idxd->type == IDXD_TYPE_IAX) 507 return -EOPNOTSUPP; 508 509 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 510 return -EPERM; 511 512 if (idxd->state == IDXD_DEV_ENABLED) 513 return -EPERM; 514 515 if (val > idxd->max_tokens) 516 return -EINVAL; 517 518 if (val > idxd->nr_tokens + group->tokens_reserved) 519 return -EINVAL; 520 521 group->tokens_reserved = val; 522 idxd_set_free_tokens(idxd); 523 return count; 524 } 525 526 static struct device_attribute dev_attr_group_tokens_reserved = 527 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show, 528 group_tokens_reserved_store); 529 530 static ssize_t group_tokens_allowed_show(struct device *dev, 531 struct device_attribute *attr, 532 char *buf) 533 { 534 struct idxd_group *group = 535 container_of(dev, struct idxd_group, conf_dev); 536 537 return sprintf(buf, "%u\n", group->tokens_allowed); 538 } 539 540 static ssize_t group_tokens_allowed_store(struct device *dev, 541 struct device_attribute *attr, 542 const char *buf, size_t count) 543 { 544 struct idxd_group *group = 545 container_of(dev, struct idxd_group, conf_dev); 546 struct idxd_device *idxd = group->idxd; 547 unsigned long val; 548 int rc; 549 550 rc = kstrtoul(buf, 10, &val); 551 if (rc < 0) 552 return -EINVAL; 553 554 if (idxd->type == IDXD_TYPE_IAX) 555 return -EOPNOTSUPP; 556 557 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 558 return -EPERM; 559 560 if (idxd->state == IDXD_DEV_ENABLED) 561 return -EPERM; 562 563 if (val < 4 * group->num_engines || 564 val > group->tokens_reserved + idxd->nr_tokens) 565 return -EINVAL; 566 567 group->tokens_allowed = val; 568 return count; 569 } 570 571 static struct device_attribute dev_attr_group_tokens_allowed = 572 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show, 573 group_tokens_allowed_store); 574 575 static ssize_t group_use_token_limit_show(struct device *dev, 576 struct device_attribute *attr, 577 char *buf) 578 { 579 struct idxd_group *group = 580 container_of(dev, struct idxd_group, conf_dev); 581 582 return sprintf(buf, "%u\n", group->use_token_limit); 583 } 584 585 static ssize_t group_use_token_limit_store(struct device *dev, 586 struct device_attribute *attr, 587 const char *buf, size_t count) 588 { 589 struct idxd_group *group = 590 container_of(dev, struct idxd_group, conf_dev); 591 struct idxd_device *idxd = group->idxd; 592 unsigned long val; 593 int rc; 594 595 rc = kstrtoul(buf, 10, &val); 596 if (rc < 0) 597 return -EINVAL; 598 599 if (idxd->type == IDXD_TYPE_IAX) 600 return -EOPNOTSUPP; 601 602 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 603 return -EPERM; 604 605 if (idxd->state == IDXD_DEV_ENABLED) 606 return -EPERM; 607 608 if (idxd->token_limit == 0) 609 return -EPERM; 610 611 group->use_token_limit = !!val; 612 return count; 613 } 614 615 static struct device_attribute dev_attr_group_use_token_limit = 616 __ATTR(use_token_limit, 0644, group_use_token_limit_show, 617 group_use_token_limit_store); 618 619 static ssize_t group_engines_show(struct device *dev, 620 struct device_attribute *attr, char *buf) 621 { 622 struct idxd_group *group = 623 container_of(dev, struct idxd_group, conf_dev); 624 int i, rc = 0; 625 char *tmp = buf; 626 struct idxd_device *idxd = group->idxd; 627 628 for (i = 0; i < idxd->max_engines; i++) { 629 struct idxd_engine *engine = &idxd->engines[i]; 630 631 if (!engine->group) 632 continue; 633 634 if (engine->group->id == group->id) 635 rc += sprintf(tmp + rc, "engine%d.%d ", 636 idxd->id, engine->id); 637 } 638 639 rc--; 640 rc += sprintf(tmp + rc, "\n"); 641 642 return rc; 643 } 644 645 static struct device_attribute dev_attr_group_engines = 646 __ATTR(engines, 0444, group_engines_show, NULL); 647 648 static ssize_t group_work_queues_show(struct device *dev, 649 struct device_attribute *attr, char *buf) 650 { 651 struct idxd_group *group = 652 container_of(dev, struct idxd_group, conf_dev); 653 int i, rc = 0; 654 char *tmp = buf; 655 struct idxd_device *idxd = group->idxd; 656 657 for (i = 0; i < idxd->max_wqs; i++) { 658 struct idxd_wq *wq = idxd->wqs[i]; 659 660 if (!wq->group) 661 continue; 662 663 if (wq->group->id == group->id) 664 rc += sprintf(tmp + rc, "wq%d.%d ", 665 idxd->id, wq->id); 666 } 667 668 rc--; 669 rc += sprintf(tmp + rc, "\n"); 670 671 return rc; 672 } 673 674 static struct device_attribute dev_attr_group_work_queues = 675 __ATTR(work_queues, 0444, group_work_queues_show, NULL); 676 677 static ssize_t group_traffic_class_a_show(struct device *dev, 678 struct device_attribute *attr, 679 char *buf) 680 { 681 struct idxd_group *group = 682 container_of(dev, struct idxd_group, conf_dev); 683 684 return sprintf(buf, "%d\n", group->tc_a); 685 } 686 687 static ssize_t group_traffic_class_a_store(struct device *dev, 688 struct device_attribute *attr, 689 const char *buf, size_t count) 690 { 691 struct idxd_group *group = 692 container_of(dev, struct idxd_group, conf_dev); 693 struct idxd_device *idxd = group->idxd; 694 long val; 695 int rc; 696 697 rc = kstrtol(buf, 10, &val); 698 if (rc < 0) 699 return -EINVAL; 700 701 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 702 return -EPERM; 703 704 if (idxd->state == IDXD_DEV_ENABLED) 705 return -EPERM; 706 707 if (val < 0 || val > 7) 708 return -EINVAL; 709 710 group->tc_a = val; 711 return count; 712 } 713 714 static struct device_attribute dev_attr_group_traffic_class_a = 715 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show, 716 group_traffic_class_a_store); 717 718 static ssize_t group_traffic_class_b_show(struct device *dev, 719 struct device_attribute *attr, 720 char *buf) 721 { 722 struct idxd_group *group = 723 container_of(dev, struct idxd_group, conf_dev); 724 725 return sprintf(buf, "%d\n", group->tc_b); 726 } 727 728 static ssize_t group_traffic_class_b_store(struct device *dev, 729 struct device_attribute *attr, 730 const char *buf, size_t count) 731 { 732 struct idxd_group *group = 733 container_of(dev, struct idxd_group, conf_dev); 734 struct idxd_device *idxd = group->idxd; 735 long val; 736 int rc; 737 738 rc = kstrtol(buf, 10, &val); 739 if (rc < 0) 740 return -EINVAL; 741 742 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 743 return -EPERM; 744 745 if (idxd->state == IDXD_DEV_ENABLED) 746 return -EPERM; 747 748 if (val < 0 || val > 7) 749 return -EINVAL; 750 751 group->tc_b = val; 752 return count; 753 } 754 755 static struct device_attribute dev_attr_group_traffic_class_b = 756 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show, 757 group_traffic_class_b_store); 758 759 static struct attribute *idxd_group_attributes[] = { 760 &dev_attr_group_work_queues.attr, 761 &dev_attr_group_engines.attr, 762 &dev_attr_group_use_token_limit.attr, 763 &dev_attr_group_tokens_allowed.attr, 764 &dev_attr_group_tokens_reserved.attr, 765 &dev_attr_group_traffic_class_a.attr, 766 &dev_attr_group_traffic_class_b.attr, 767 NULL, 768 }; 769 770 static const struct attribute_group idxd_group_attribute_group = { 771 .attrs = idxd_group_attributes, 772 }; 773 774 static const struct attribute_group *idxd_group_attribute_groups[] = { 775 &idxd_group_attribute_group, 776 NULL, 777 }; 778 779 /* IDXD work queue attribs */ 780 static ssize_t wq_clients_show(struct device *dev, 781 struct device_attribute *attr, char *buf) 782 { 783 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 784 785 return sprintf(buf, "%d\n", wq->client_count); 786 } 787 788 static struct device_attribute dev_attr_wq_clients = 789 __ATTR(clients, 0444, wq_clients_show, NULL); 790 791 static ssize_t wq_state_show(struct device *dev, 792 struct device_attribute *attr, char *buf) 793 { 794 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 795 796 switch (wq->state) { 797 case IDXD_WQ_DISABLED: 798 return sprintf(buf, "disabled\n"); 799 case IDXD_WQ_ENABLED: 800 return sprintf(buf, "enabled\n"); 801 } 802 803 return sprintf(buf, "unknown\n"); 804 } 805 806 static struct device_attribute dev_attr_wq_state = 807 __ATTR(state, 0444, wq_state_show, NULL); 808 809 static ssize_t wq_group_id_show(struct device *dev, 810 struct device_attribute *attr, char *buf) 811 { 812 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 813 814 if (wq->group) 815 return sprintf(buf, "%u\n", wq->group->id); 816 else 817 return sprintf(buf, "-1\n"); 818 } 819 820 static ssize_t wq_group_id_store(struct device *dev, 821 struct device_attribute *attr, 822 const char *buf, size_t count) 823 { 824 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 825 struct idxd_device *idxd = wq->idxd; 826 long id; 827 int rc; 828 struct idxd_group *prevg, *group; 829 830 rc = kstrtol(buf, 10, &id); 831 if (rc < 0) 832 return -EINVAL; 833 834 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 835 return -EPERM; 836 837 if (wq->state != IDXD_WQ_DISABLED) 838 return -EPERM; 839 840 if (id > idxd->max_groups - 1 || id < -1) 841 return -EINVAL; 842 843 if (id == -1) { 844 if (wq->group) { 845 wq->group->num_wqs--; 846 wq->group = NULL; 847 } 848 return count; 849 } 850 851 group = &idxd->groups[id]; 852 prevg = wq->group; 853 854 if (prevg) 855 prevg->num_wqs--; 856 wq->group = group; 857 group->num_wqs++; 858 return count; 859 } 860 861 static struct device_attribute dev_attr_wq_group_id = 862 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store); 863 864 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr, 865 char *buf) 866 { 867 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 868 869 return sprintf(buf, "%s\n", 870 wq_dedicated(wq) ? "dedicated" : "shared"); 871 } 872 873 static ssize_t wq_mode_store(struct device *dev, 874 struct device_attribute *attr, const char *buf, 875 size_t count) 876 { 877 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 878 struct idxd_device *idxd = wq->idxd; 879 880 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 881 return -EPERM; 882 883 if (wq->state != IDXD_WQ_DISABLED) 884 return -EPERM; 885 886 if (sysfs_streq(buf, "dedicated")) { 887 set_bit(WQ_FLAG_DEDICATED, &wq->flags); 888 wq->threshold = 0; 889 } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) { 890 clear_bit(WQ_FLAG_DEDICATED, &wq->flags); 891 } else { 892 return -EINVAL; 893 } 894 895 return count; 896 } 897 898 static struct device_attribute dev_attr_wq_mode = 899 __ATTR(mode, 0644, wq_mode_show, wq_mode_store); 900 901 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr, 902 char *buf) 903 { 904 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 905 906 return sprintf(buf, "%u\n", wq->size); 907 } 908 909 static int total_claimed_wq_size(struct idxd_device *idxd) 910 { 911 int i; 912 int wq_size = 0; 913 914 for (i = 0; i < idxd->max_wqs; i++) { 915 struct idxd_wq *wq = idxd->wqs[i]; 916 917 wq_size += wq->size; 918 } 919 920 return wq_size; 921 } 922 923 static ssize_t wq_size_store(struct device *dev, 924 struct device_attribute *attr, const char *buf, 925 size_t count) 926 { 927 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 928 unsigned long size; 929 struct idxd_device *idxd = wq->idxd; 930 int rc; 931 932 rc = kstrtoul(buf, 10, &size); 933 if (rc < 0) 934 return -EINVAL; 935 936 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 937 return -EPERM; 938 939 if (idxd->state == IDXD_DEV_ENABLED) 940 return -EPERM; 941 942 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size) 943 return -EINVAL; 944 945 wq->size = size; 946 return count; 947 } 948 949 static struct device_attribute dev_attr_wq_size = 950 __ATTR(size, 0644, wq_size_show, wq_size_store); 951 952 static ssize_t wq_priority_show(struct device *dev, 953 struct device_attribute *attr, char *buf) 954 { 955 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 956 957 return sprintf(buf, "%u\n", wq->priority); 958 } 959 960 static ssize_t wq_priority_store(struct device *dev, 961 struct device_attribute *attr, 962 const char *buf, size_t count) 963 { 964 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 965 unsigned long prio; 966 struct idxd_device *idxd = wq->idxd; 967 int rc; 968 969 rc = kstrtoul(buf, 10, &prio); 970 if (rc < 0) 971 return -EINVAL; 972 973 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 974 return -EPERM; 975 976 if (wq->state != IDXD_WQ_DISABLED) 977 return -EPERM; 978 979 if (prio > IDXD_MAX_PRIORITY) 980 return -EINVAL; 981 982 wq->priority = prio; 983 return count; 984 } 985 986 static struct device_attribute dev_attr_wq_priority = 987 __ATTR(priority, 0644, wq_priority_show, wq_priority_store); 988 989 static ssize_t wq_block_on_fault_show(struct device *dev, 990 struct device_attribute *attr, char *buf) 991 { 992 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 993 994 return sprintf(buf, "%u\n", 995 test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags)); 996 } 997 998 static ssize_t wq_block_on_fault_store(struct device *dev, 999 struct device_attribute *attr, 1000 const char *buf, size_t count) 1001 { 1002 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1003 struct idxd_device *idxd = wq->idxd; 1004 bool bof; 1005 int rc; 1006 1007 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1008 return -EPERM; 1009 1010 if (wq->state != IDXD_WQ_DISABLED) 1011 return -ENXIO; 1012 1013 rc = kstrtobool(buf, &bof); 1014 if (rc < 0) 1015 return rc; 1016 1017 if (bof) 1018 set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags); 1019 else 1020 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags); 1021 1022 return count; 1023 } 1024 1025 static struct device_attribute dev_attr_wq_block_on_fault = 1026 __ATTR(block_on_fault, 0644, wq_block_on_fault_show, 1027 wq_block_on_fault_store); 1028 1029 static ssize_t wq_threshold_show(struct device *dev, 1030 struct device_attribute *attr, char *buf) 1031 { 1032 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1033 1034 return sprintf(buf, "%u\n", wq->threshold); 1035 } 1036 1037 static ssize_t wq_threshold_store(struct device *dev, 1038 struct device_attribute *attr, 1039 const char *buf, size_t count) 1040 { 1041 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1042 struct idxd_device *idxd = wq->idxd; 1043 unsigned int val; 1044 int rc; 1045 1046 rc = kstrtouint(buf, 0, &val); 1047 if (rc < 0) 1048 return -EINVAL; 1049 1050 if (val > wq->size || val <= 0) 1051 return -EINVAL; 1052 1053 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1054 return -EPERM; 1055 1056 if (wq->state != IDXD_WQ_DISABLED) 1057 return -ENXIO; 1058 1059 if (test_bit(WQ_FLAG_DEDICATED, &wq->flags)) 1060 return -EINVAL; 1061 1062 wq->threshold = val; 1063 1064 return count; 1065 } 1066 1067 static struct device_attribute dev_attr_wq_threshold = 1068 __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store); 1069 1070 static ssize_t wq_type_show(struct device *dev, 1071 struct device_attribute *attr, char *buf) 1072 { 1073 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1074 1075 switch (wq->type) { 1076 case IDXD_WQT_KERNEL: 1077 return sprintf(buf, "%s\n", 1078 idxd_wq_type_names[IDXD_WQT_KERNEL]); 1079 case IDXD_WQT_USER: 1080 return sprintf(buf, "%s\n", 1081 idxd_wq_type_names[IDXD_WQT_USER]); 1082 case IDXD_WQT_NONE: 1083 default: 1084 return sprintf(buf, "%s\n", 1085 idxd_wq_type_names[IDXD_WQT_NONE]); 1086 } 1087 1088 return -EINVAL; 1089 } 1090 1091 static ssize_t wq_type_store(struct device *dev, 1092 struct device_attribute *attr, const char *buf, 1093 size_t count) 1094 { 1095 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1096 enum idxd_wq_type old_type; 1097 1098 if (wq->state != IDXD_WQ_DISABLED) 1099 return -EPERM; 1100 1101 old_type = wq->type; 1102 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE])) 1103 wq->type = IDXD_WQT_NONE; 1104 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) 1105 wq->type = IDXD_WQT_KERNEL; 1106 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER])) 1107 wq->type = IDXD_WQT_USER; 1108 else 1109 return -EINVAL; 1110 1111 /* If we are changing queue type, clear the name */ 1112 if (wq->type != old_type) 1113 memset(wq->name, 0, WQ_NAME_SIZE + 1); 1114 1115 return count; 1116 } 1117 1118 static struct device_attribute dev_attr_wq_type = 1119 __ATTR(type, 0644, wq_type_show, wq_type_store); 1120 1121 static ssize_t wq_name_show(struct device *dev, 1122 struct device_attribute *attr, char *buf) 1123 { 1124 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1125 1126 return sprintf(buf, "%s\n", wq->name); 1127 } 1128 1129 static ssize_t wq_name_store(struct device *dev, 1130 struct device_attribute *attr, const char *buf, 1131 size_t count) 1132 { 1133 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1134 1135 if (wq->state != IDXD_WQ_DISABLED) 1136 return -EPERM; 1137 1138 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0) 1139 return -EINVAL; 1140 1141 /* 1142 * This is temporarily placed here until we have SVM support for 1143 * dmaengine. 1144 */ 1145 if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd)) 1146 return -EOPNOTSUPP; 1147 1148 memset(wq->name, 0, WQ_NAME_SIZE + 1); 1149 strncpy(wq->name, buf, WQ_NAME_SIZE); 1150 strreplace(wq->name, '\n', '\0'); 1151 return count; 1152 } 1153 1154 static struct device_attribute dev_attr_wq_name = 1155 __ATTR(name, 0644, wq_name_show, wq_name_store); 1156 1157 static ssize_t wq_cdev_minor_show(struct device *dev, 1158 struct device_attribute *attr, char *buf) 1159 { 1160 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1161 1162 return sprintf(buf, "%d\n", wq->idxd_cdev.minor); 1163 } 1164 1165 static struct device_attribute dev_attr_wq_cdev_minor = 1166 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL); 1167 1168 static int __get_sysfs_u64(const char *buf, u64 *val) 1169 { 1170 int rc; 1171 1172 rc = kstrtou64(buf, 0, val); 1173 if (rc < 0) 1174 return -EINVAL; 1175 1176 if (*val == 0) 1177 return -EINVAL; 1178 1179 *val = roundup_pow_of_two(*val); 1180 return 0; 1181 } 1182 1183 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr, 1184 char *buf) 1185 { 1186 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1187 1188 return sprintf(buf, "%llu\n", wq->max_xfer_bytes); 1189 } 1190 1191 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr, 1192 const char *buf, size_t count) 1193 { 1194 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1195 struct idxd_device *idxd = wq->idxd; 1196 u64 xfer_size; 1197 int rc; 1198 1199 if (wq->state != IDXD_WQ_DISABLED) 1200 return -EPERM; 1201 1202 rc = __get_sysfs_u64(buf, &xfer_size); 1203 if (rc < 0) 1204 return rc; 1205 1206 if (xfer_size > idxd->max_xfer_bytes) 1207 return -EINVAL; 1208 1209 wq->max_xfer_bytes = xfer_size; 1210 1211 return count; 1212 } 1213 1214 static struct device_attribute dev_attr_wq_max_transfer_size = 1215 __ATTR(max_transfer_size, 0644, 1216 wq_max_transfer_size_show, wq_max_transfer_size_store); 1217 1218 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf) 1219 { 1220 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1221 1222 return sprintf(buf, "%u\n", wq->max_batch_size); 1223 } 1224 1225 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr, 1226 const char *buf, size_t count) 1227 { 1228 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1229 struct idxd_device *idxd = wq->idxd; 1230 u64 batch_size; 1231 int rc; 1232 1233 if (wq->state != IDXD_WQ_DISABLED) 1234 return -EPERM; 1235 1236 rc = __get_sysfs_u64(buf, &batch_size); 1237 if (rc < 0) 1238 return rc; 1239 1240 if (batch_size > idxd->max_batch_size) 1241 return -EINVAL; 1242 1243 wq->max_batch_size = (u32)batch_size; 1244 1245 return count; 1246 } 1247 1248 static struct device_attribute dev_attr_wq_max_batch_size = 1249 __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store); 1250 1251 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf) 1252 { 1253 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1254 1255 return sprintf(buf, "%u\n", wq->ats_dis); 1256 } 1257 1258 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr, 1259 const char *buf, size_t count) 1260 { 1261 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1262 struct idxd_device *idxd = wq->idxd; 1263 bool ats_dis; 1264 int rc; 1265 1266 if (wq->state != IDXD_WQ_DISABLED) 1267 return -EPERM; 1268 1269 if (!idxd->hw.wq_cap.wq_ats_support) 1270 return -EOPNOTSUPP; 1271 1272 rc = kstrtobool(buf, &ats_dis); 1273 if (rc < 0) 1274 return rc; 1275 1276 wq->ats_dis = ats_dis; 1277 1278 return count; 1279 } 1280 1281 static struct device_attribute dev_attr_wq_ats_disable = 1282 __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store); 1283 1284 static struct attribute *idxd_wq_attributes[] = { 1285 &dev_attr_wq_clients.attr, 1286 &dev_attr_wq_state.attr, 1287 &dev_attr_wq_group_id.attr, 1288 &dev_attr_wq_mode.attr, 1289 &dev_attr_wq_size.attr, 1290 &dev_attr_wq_priority.attr, 1291 &dev_attr_wq_block_on_fault.attr, 1292 &dev_attr_wq_threshold.attr, 1293 &dev_attr_wq_type.attr, 1294 &dev_attr_wq_name.attr, 1295 &dev_attr_wq_cdev_minor.attr, 1296 &dev_attr_wq_max_transfer_size.attr, 1297 &dev_attr_wq_max_batch_size.attr, 1298 &dev_attr_wq_ats_disable.attr, 1299 NULL, 1300 }; 1301 1302 static const struct attribute_group idxd_wq_attribute_group = { 1303 .attrs = idxd_wq_attributes, 1304 }; 1305 1306 static const struct attribute_group *idxd_wq_attribute_groups[] = { 1307 &idxd_wq_attribute_group, 1308 NULL, 1309 }; 1310 1311 static void idxd_conf_wq_release(struct device *dev) 1312 { 1313 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); 1314 1315 kfree(wq->wqcfg); 1316 kfree(wq); 1317 } 1318 1319 struct device_type idxd_wq_device_type = { 1320 .name = "wq", 1321 .release = idxd_conf_wq_release, 1322 .groups = idxd_wq_attribute_groups, 1323 }; 1324 1325 /* IDXD device attribs */ 1326 static ssize_t version_show(struct device *dev, struct device_attribute *attr, 1327 char *buf) 1328 { 1329 struct idxd_device *idxd = 1330 container_of(dev, struct idxd_device, conf_dev); 1331 1332 return sprintf(buf, "%#x\n", idxd->hw.version); 1333 } 1334 static DEVICE_ATTR_RO(version); 1335 1336 static ssize_t max_work_queues_size_show(struct device *dev, 1337 struct device_attribute *attr, 1338 char *buf) 1339 { 1340 struct idxd_device *idxd = 1341 container_of(dev, struct idxd_device, conf_dev); 1342 1343 return sprintf(buf, "%u\n", idxd->max_wq_size); 1344 } 1345 static DEVICE_ATTR_RO(max_work_queues_size); 1346 1347 static ssize_t max_groups_show(struct device *dev, 1348 struct device_attribute *attr, char *buf) 1349 { 1350 struct idxd_device *idxd = 1351 container_of(dev, struct idxd_device, conf_dev); 1352 1353 return sprintf(buf, "%u\n", idxd->max_groups); 1354 } 1355 static DEVICE_ATTR_RO(max_groups); 1356 1357 static ssize_t max_work_queues_show(struct device *dev, 1358 struct device_attribute *attr, char *buf) 1359 { 1360 struct idxd_device *idxd = 1361 container_of(dev, struct idxd_device, conf_dev); 1362 1363 return sprintf(buf, "%u\n", idxd->max_wqs); 1364 } 1365 static DEVICE_ATTR_RO(max_work_queues); 1366 1367 static ssize_t max_engines_show(struct device *dev, 1368 struct device_attribute *attr, char *buf) 1369 { 1370 struct idxd_device *idxd = 1371 container_of(dev, struct idxd_device, conf_dev); 1372 1373 return sprintf(buf, "%u\n", idxd->max_engines); 1374 } 1375 static DEVICE_ATTR_RO(max_engines); 1376 1377 static ssize_t numa_node_show(struct device *dev, 1378 struct device_attribute *attr, char *buf) 1379 { 1380 struct idxd_device *idxd = 1381 container_of(dev, struct idxd_device, conf_dev); 1382 1383 return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); 1384 } 1385 static DEVICE_ATTR_RO(numa_node); 1386 1387 static ssize_t max_batch_size_show(struct device *dev, 1388 struct device_attribute *attr, char *buf) 1389 { 1390 struct idxd_device *idxd = 1391 container_of(dev, struct idxd_device, conf_dev); 1392 1393 return sprintf(buf, "%u\n", idxd->max_batch_size); 1394 } 1395 static DEVICE_ATTR_RO(max_batch_size); 1396 1397 static ssize_t max_transfer_size_show(struct device *dev, 1398 struct device_attribute *attr, 1399 char *buf) 1400 { 1401 struct idxd_device *idxd = 1402 container_of(dev, struct idxd_device, conf_dev); 1403 1404 return sprintf(buf, "%llu\n", idxd->max_xfer_bytes); 1405 } 1406 static DEVICE_ATTR_RO(max_transfer_size); 1407 1408 static ssize_t op_cap_show(struct device *dev, 1409 struct device_attribute *attr, char *buf) 1410 { 1411 struct idxd_device *idxd = 1412 container_of(dev, struct idxd_device, conf_dev); 1413 int i, rc = 0; 1414 1415 for (i = 0; i < 4; i++) 1416 rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]); 1417 1418 rc--; 1419 rc += sysfs_emit_at(buf, rc, "\n"); 1420 return rc; 1421 } 1422 static DEVICE_ATTR_RO(op_cap); 1423 1424 static ssize_t gen_cap_show(struct device *dev, 1425 struct device_attribute *attr, char *buf) 1426 { 1427 struct idxd_device *idxd = 1428 container_of(dev, struct idxd_device, conf_dev); 1429 1430 return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits); 1431 } 1432 static DEVICE_ATTR_RO(gen_cap); 1433 1434 static ssize_t configurable_show(struct device *dev, 1435 struct device_attribute *attr, char *buf) 1436 { 1437 struct idxd_device *idxd = 1438 container_of(dev, struct idxd_device, conf_dev); 1439 1440 return sprintf(buf, "%u\n", 1441 test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); 1442 } 1443 static DEVICE_ATTR_RO(configurable); 1444 1445 static ssize_t clients_show(struct device *dev, 1446 struct device_attribute *attr, char *buf) 1447 { 1448 struct idxd_device *idxd = 1449 container_of(dev, struct idxd_device, conf_dev); 1450 unsigned long flags; 1451 int count = 0, i; 1452 1453 spin_lock_irqsave(&idxd->dev_lock, flags); 1454 for (i = 0; i < idxd->max_wqs; i++) { 1455 struct idxd_wq *wq = idxd->wqs[i]; 1456 1457 count += wq->client_count; 1458 } 1459 spin_unlock_irqrestore(&idxd->dev_lock, flags); 1460 1461 return sprintf(buf, "%d\n", count); 1462 } 1463 static DEVICE_ATTR_RO(clients); 1464 1465 static ssize_t pasid_enabled_show(struct device *dev, 1466 struct device_attribute *attr, char *buf) 1467 { 1468 struct idxd_device *idxd = 1469 container_of(dev, struct idxd_device, conf_dev); 1470 1471 return sprintf(buf, "%u\n", device_pasid_enabled(idxd)); 1472 } 1473 static DEVICE_ATTR_RO(pasid_enabled); 1474 1475 static ssize_t state_show(struct device *dev, 1476 struct device_attribute *attr, char *buf) 1477 { 1478 struct idxd_device *idxd = 1479 container_of(dev, struct idxd_device, conf_dev); 1480 1481 switch (idxd->state) { 1482 case IDXD_DEV_DISABLED: 1483 case IDXD_DEV_CONF_READY: 1484 return sprintf(buf, "disabled\n"); 1485 case IDXD_DEV_ENABLED: 1486 return sprintf(buf, "enabled\n"); 1487 case IDXD_DEV_HALTED: 1488 return sprintf(buf, "halted\n"); 1489 } 1490 1491 return sprintf(buf, "unknown\n"); 1492 } 1493 static DEVICE_ATTR_RO(state); 1494 1495 static ssize_t errors_show(struct device *dev, 1496 struct device_attribute *attr, char *buf) 1497 { 1498 struct idxd_device *idxd = 1499 container_of(dev, struct idxd_device, conf_dev); 1500 int i, out = 0; 1501 unsigned long flags; 1502 1503 spin_lock_irqsave(&idxd->dev_lock, flags); 1504 for (i = 0; i < 4; i++) 1505 out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]); 1506 spin_unlock_irqrestore(&idxd->dev_lock, flags); 1507 out--; 1508 out += sprintf(buf + out, "\n"); 1509 return out; 1510 } 1511 static DEVICE_ATTR_RO(errors); 1512 1513 static ssize_t max_tokens_show(struct device *dev, 1514 struct device_attribute *attr, char *buf) 1515 { 1516 struct idxd_device *idxd = 1517 container_of(dev, struct idxd_device, conf_dev); 1518 1519 return sprintf(buf, "%u\n", idxd->max_tokens); 1520 } 1521 static DEVICE_ATTR_RO(max_tokens); 1522 1523 static ssize_t token_limit_show(struct device *dev, 1524 struct device_attribute *attr, char *buf) 1525 { 1526 struct idxd_device *idxd = 1527 container_of(dev, struct idxd_device, conf_dev); 1528 1529 return sprintf(buf, "%u\n", idxd->token_limit); 1530 } 1531 1532 static ssize_t token_limit_store(struct device *dev, 1533 struct device_attribute *attr, 1534 const char *buf, size_t count) 1535 { 1536 struct idxd_device *idxd = 1537 container_of(dev, struct idxd_device, conf_dev); 1538 unsigned long val; 1539 int rc; 1540 1541 rc = kstrtoul(buf, 10, &val); 1542 if (rc < 0) 1543 return -EINVAL; 1544 1545 if (idxd->state == IDXD_DEV_ENABLED) 1546 return -EPERM; 1547 1548 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1549 return -EPERM; 1550 1551 if (!idxd->hw.group_cap.token_limit) 1552 return -EPERM; 1553 1554 if (val > idxd->hw.group_cap.total_tokens) 1555 return -EINVAL; 1556 1557 idxd->token_limit = val; 1558 return count; 1559 } 1560 static DEVICE_ATTR_RW(token_limit); 1561 1562 static ssize_t cdev_major_show(struct device *dev, 1563 struct device_attribute *attr, char *buf) 1564 { 1565 struct idxd_device *idxd = 1566 container_of(dev, struct idxd_device, conf_dev); 1567 1568 return sprintf(buf, "%u\n", idxd->major); 1569 } 1570 static DEVICE_ATTR_RO(cdev_major); 1571 1572 static ssize_t cmd_status_show(struct device *dev, 1573 struct device_attribute *attr, char *buf) 1574 { 1575 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); 1576 1577 return sprintf(buf, "%#x\n", idxd->cmd_status); 1578 } 1579 static DEVICE_ATTR_RO(cmd_status); 1580 1581 static struct attribute *idxd_device_attributes[] = { 1582 &dev_attr_version.attr, 1583 &dev_attr_max_groups.attr, 1584 &dev_attr_max_work_queues.attr, 1585 &dev_attr_max_work_queues_size.attr, 1586 &dev_attr_max_engines.attr, 1587 &dev_attr_numa_node.attr, 1588 &dev_attr_max_batch_size.attr, 1589 &dev_attr_max_transfer_size.attr, 1590 &dev_attr_op_cap.attr, 1591 &dev_attr_gen_cap.attr, 1592 &dev_attr_configurable.attr, 1593 &dev_attr_clients.attr, 1594 &dev_attr_pasid_enabled.attr, 1595 &dev_attr_state.attr, 1596 &dev_attr_errors.attr, 1597 &dev_attr_max_tokens.attr, 1598 &dev_attr_token_limit.attr, 1599 &dev_attr_cdev_major.attr, 1600 &dev_attr_cmd_status.attr, 1601 NULL, 1602 }; 1603 1604 static const struct attribute_group idxd_device_attribute_group = { 1605 .attrs = idxd_device_attributes, 1606 }; 1607 1608 static const struct attribute_group *idxd_attribute_groups[] = { 1609 &idxd_device_attribute_group, 1610 NULL, 1611 }; 1612 1613 static void idxd_conf_device_release(struct device *dev) 1614 { 1615 struct idxd_device *idxd = container_of(dev, struct idxd_device, conf_dev); 1616 1617 kfree(idxd->groups); 1618 kfree(idxd->wqs); 1619 kfree(idxd->engines); 1620 kfree(idxd->irq_entries); 1621 ida_free(idxd_ida(idxd), idxd->id); 1622 kfree(idxd); 1623 } 1624 1625 struct device_type dsa_device_type = { 1626 .name = "dsa", 1627 .release = idxd_conf_device_release, 1628 .groups = idxd_attribute_groups, 1629 }; 1630 1631 struct device_type iax_device_type = { 1632 .name = "iax", 1633 .release = idxd_conf_device_release, 1634 .groups = idxd_attribute_groups, 1635 }; 1636 1637 static int idxd_setup_engine_sysfs(struct idxd_device *idxd) 1638 { 1639 struct device *dev = &idxd->pdev->dev; 1640 int i, rc; 1641 1642 for (i = 0; i < idxd->max_engines; i++) { 1643 struct idxd_engine *engine = &idxd->engines[i]; 1644 1645 engine->conf_dev.parent = &idxd->conf_dev; 1646 dev_set_name(&engine->conf_dev, "engine%d.%d", 1647 idxd->id, engine->id); 1648 engine->conf_dev.bus = idxd_get_bus_type(idxd); 1649 engine->conf_dev.groups = idxd_engine_attribute_groups; 1650 engine->conf_dev.type = &idxd_engine_device_type; 1651 dev_dbg(dev, "Engine device register: %s\n", 1652 dev_name(&engine->conf_dev)); 1653 rc = device_register(&engine->conf_dev); 1654 if (rc < 0) { 1655 put_device(&engine->conf_dev); 1656 goto cleanup; 1657 } 1658 } 1659 1660 return 0; 1661 1662 cleanup: 1663 while (i--) { 1664 struct idxd_engine *engine = &idxd->engines[i]; 1665 1666 device_unregister(&engine->conf_dev); 1667 } 1668 return rc; 1669 } 1670 1671 static int idxd_setup_group_sysfs(struct idxd_device *idxd) 1672 { 1673 struct device *dev = &idxd->pdev->dev; 1674 int i, rc; 1675 1676 for (i = 0; i < idxd->max_groups; i++) { 1677 struct idxd_group *group = &idxd->groups[i]; 1678 1679 group->conf_dev.parent = &idxd->conf_dev; 1680 dev_set_name(&group->conf_dev, "group%d.%d", 1681 idxd->id, group->id); 1682 group->conf_dev.bus = idxd_get_bus_type(idxd); 1683 group->conf_dev.groups = idxd_group_attribute_groups; 1684 group->conf_dev.type = &idxd_group_device_type; 1685 dev_dbg(dev, "Group device register: %s\n", 1686 dev_name(&group->conf_dev)); 1687 rc = device_register(&group->conf_dev); 1688 if (rc < 0) { 1689 put_device(&group->conf_dev); 1690 goto cleanup; 1691 } 1692 } 1693 1694 return 0; 1695 1696 cleanup: 1697 while (i--) { 1698 struct idxd_group *group = &idxd->groups[i]; 1699 1700 device_unregister(&group->conf_dev); 1701 } 1702 return rc; 1703 } 1704 1705 static int idxd_register_wq_devices(struct idxd_device *idxd) 1706 { 1707 int i, rc, j; 1708 1709 for (i = 0; i < idxd->max_wqs; i++) { 1710 struct idxd_wq *wq = idxd->wqs[i]; 1711 1712 rc = device_add(&wq->conf_dev); 1713 if (rc < 0) 1714 goto cleanup; 1715 } 1716 1717 return 0; 1718 1719 cleanup: 1720 j = i - 1; 1721 for (; i < idxd->max_wqs; i++) 1722 put_device(&idxd->wqs[i]->conf_dev); 1723 1724 while (j--) 1725 device_unregister(&idxd->wqs[j]->conf_dev); 1726 return rc; 1727 } 1728 1729 int idxd_register_devices(struct idxd_device *idxd) 1730 { 1731 struct device *dev = &idxd->pdev->dev; 1732 int rc, i; 1733 1734 rc = device_add(&idxd->conf_dev); 1735 if (rc < 0) 1736 return rc; 1737 1738 rc = idxd_register_wq_devices(idxd); 1739 if (rc < 0) { 1740 dev_dbg(dev, "WQ devices registering failed: %d\n", rc); 1741 goto err_wq; 1742 } 1743 1744 rc = idxd_setup_group_sysfs(idxd); 1745 if (rc < 0) { 1746 /* unregister conf dev */ 1747 dev_dbg(dev, "Group sysfs registering failed: %d\n", rc); 1748 goto err; 1749 } 1750 1751 rc = idxd_setup_engine_sysfs(idxd); 1752 if (rc < 0) { 1753 /* unregister conf dev */ 1754 dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc); 1755 goto err; 1756 } 1757 1758 return 0; 1759 1760 err: 1761 for (i = 0; i < idxd->max_wqs; i++) 1762 device_unregister(&idxd->wqs[i]->conf_dev); 1763 err_wq: 1764 device_del(&idxd->conf_dev); 1765 return rc; 1766 } 1767 1768 void idxd_unregister_devices(struct idxd_device *idxd) 1769 { 1770 int i; 1771 1772 for (i = 0; i < idxd->max_wqs; i++) { 1773 struct idxd_wq *wq = idxd->wqs[i]; 1774 1775 device_unregister(&wq->conf_dev); 1776 } 1777 1778 for (i = 0; i < idxd->max_engines; i++) { 1779 struct idxd_engine *engine = &idxd->engines[i]; 1780 1781 device_unregister(&engine->conf_dev); 1782 } 1783 1784 for (i = 0; i < idxd->max_groups; i++) { 1785 struct idxd_group *group = &idxd->groups[i]; 1786 1787 device_unregister(&group->conf_dev); 1788 } 1789 1790 device_unregister(&idxd->conf_dev); 1791 } 1792 1793 int idxd_register_bus_type(void) 1794 { 1795 int i, rc; 1796 1797 for (i = 0; i < IDXD_TYPE_MAX; i++) { 1798 rc = bus_register(idxd_bus_types[i]); 1799 if (rc < 0) 1800 goto bus_err; 1801 } 1802 1803 return 0; 1804 1805 bus_err: 1806 while (--i >= 0) 1807 bus_unregister(idxd_bus_types[i]); 1808 return rc; 1809 } 1810 1811 void idxd_unregister_bus_type(void) 1812 { 1813 int i; 1814 1815 for (i = 0; i < IDXD_TYPE_MAX; i++) 1816 bus_unregister(idxd_bus_types[i]); 1817 } 1818