1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/device.h> 8 #include <linux/io-64-nonatomic-lo-hi.h> 9 #include <uapi/linux/idxd.h> 10 #include "registers.h" 11 #include "idxd.h" 12 13 static char *idxd_wq_type_names[] = { 14 [IDXD_WQT_NONE] = "none", 15 [IDXD_WQT_KERNEL] = "kernel", 16 [IDXD_WQT_USER] = "user", 17 }; 18 19 /* IDXD engine attributes */ 20 static ssize_t engine_group_id_show(struct device *dev, 21 struct device_attribute *attr, char *buf) 22 { 23 struct idxd_engine *engine = confdev_to_engine(dev); 24 25 if (engine->group) 26 return sysfs_emit(buf, "%d\n", engine->group->id); 27 else 28 return sysfs_emit(buf, "%d\n", -1); 29 } 30 31 static ssize_t engine_group_id_store(struct device *dev, 32 struct device_attribute *attr, 33 const char *buf, size_t count) 34 { 35 struct idxd_engine *engine = confdev_to_engine(dev); 36 struct idxd_device *idxd = engine->idxd; 37 long id; 38 int rc; 39 struct idxd_group *prevg; 40 41 rc = kstrtol(buf, 10, &id); 42 if (rc < 0) 43 return -EINVAL; 44 45 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 46 return -EPERM; 47 48 if (id > idxd->max_groups - 1 || id < -1) 49 return -EINVAL; 50 51 if (id == -1) { 52 if (engine->group) { 53 engine->group->num_engines--; 54 engine->group = NULL; 55 } 56 return count; 57 } 58 59 prevg = engine->group; 60 61 if (prevg) 62 prevg->num_engines--; 63 engine->group = idxd->groups[id]; 64 engine->group->num_engines++; 65 66 return count; 67 } 68 69 static struct device_attribute dev_attr_engine_group = 70 __ATTR(group_id, 0644, engine_group_id_show, 71 engine_group_id_store); 72 73 static struct attribute *idxd_engine_attributes[] = { 74 &dev_attr_engine_group.attr, 75 NULL, 76 }; 77 78 static const struct attribute_group idxd_engine_attribute_group = { 79 .attrs = idxd_engine_attributes, 80 }; 81 82 static const struct attribute_group *idxd_engine_attribute_groups[] = { 83 &idxd_engine_attribute_group, 84 NULL, 85 }; 86 87 static void idxd_conf_engine_release(struct device *dev) 88 { 89 struct idxd_engine *engine = confdev_to_engine(dev); 90 91 kfree(engine); 92 } 93 94 struct device_type idxd_engine_device_type = { 95 .name = "engine", 96 .release = idxd_conf_engine_release, 97 .groups = idxd_engine_attribute_groups, 98 }; 99 100 /* Group attributes */ 101 102 static void idxd_set_free_rdbufs(struct idxd_device *idxd) 103 { 104 int i, rdbufs; 105 106 for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) { 107 struct idxd_group *g = idxd->groups[i]; 108 109 rdbufs += g->rdbufs_reserved; 110 } 111 112 idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs; 113 } 114 115 static ssize_t group_read_buffers_reserved_show(struct device *dev, 116 struct device_attribute *attr, 117 char *buf) 118 { 119 struct idxd_group *group = confdev_to_group(dev); 120 121 return sysfs_emit(buf, "%u\n", group->rdbufs_reserved); 122 } 123 124 static ssize_t group_tokens_reserved_show(struct device *dev, 125 struct device_attribute *attr, 126 char *buf) 127 { 128 dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n"); 129 return group_read_buffers_reserved_show(dev, attr, buf); 130 } 131 132 static ssize_t group_read_buffers_reserved_store(struct device *dev, 133 struct device_attribute *attr, 134 const char *buf, size_t count) 135 { 136 struct idxd_group *group = confdev_to_group(dev); 137 struct idxd_device *idxd = group->idxd; 138 unsigned long val; 139 int rc; 140 141 rc = kstrtoul(buf, 10, &val); 142 if (rc < 0) 143 return -EINVAL; 144 145 if (idxd->data->type == IDXD_TYPE_IAX) 146 return -EOPNOTSUPP; 147 148 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 149 return -EPERM; 150 151 if (idxd->state == IDXD_DEV_ENABLED) 152 return -EPERM; 153 154 if (val > idxd->max_rdbufs) 155 return -EINVAL; 156 157 if (val > idxd->nr_rdbufs + group->rdbufs_reserved) 158 return -EINVAL; 159 160 group->rdbufs_reserved = val; 161 idxd_set_free_rdbufs(idxd); 162 return count; 163 } 164 165 static ssize_t group_tokens_reserved_store(struct device *dev, 166 struct device_attribute *attr, 167 const char *buf, size_t count) 168 { 169 dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n"); 170 return group_read_buffers_reserved_store(dev, attr, buf, count); 171 } 172 173 static struct device_attribute dev_attr_group_tokens_reserved = 174 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show, 175 group_tokens_reserved_store); 176 177 static struct device_attribute dev_attr_group_read_buffers_reserved = 178 __ATTR(read_buffers_reserved, 0644, group_read_buffers_reserved_show, 179 group_read_buffers_reserved_store); 180 181 static ssize_t group_read_buffers_allowed_show(struct device *dev, 182 struct device_attribute *attr, 183 char *buf) 184 { 185 struct idxd_group *group = confdev_to_group(dev); 186 187 return sysfs_emit(buf, "%u\n", group->rdbufs_allowed); 188 } 189 190 static ssize_t group_tokens_allowed_show(struct device *dev, 191 struct device_attribute *attr, 192 char *buf) 193 { 194 dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n"); 195 return group_read_buffers_allowed_show(dev, attr, buf); 196 } 197 198 static ssize_t group_read_buffers_allowed_store(struct device *dev, 199 struct device_attribute *attr, 200 const char *buf, size_t count) 201 { 202 struct idxd_group *group = confdev_to_group(dev); 203 struct idxd_device *idxd = group->idxd; 204 unsigned long val; 205 int rc; 206 207 rc = kstrtoul(buf, 10, &val); 208 if (rc < 0) 209 return -EINVAL; 210 211 if (idxd->data->type == IDXD_TYPE_IAX) 212 return -EOPNOTSUPP; 213 214 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 215 return -EPERM; 216 217 if (idxd->state == IDXD_DEV_ENABLED) 218 return -EPERM; 219 220 if (val < 4 * group->num_engines || 221 val > group->rdbufs_reserved + idxd->nr_rdbufs) 222 return -EINVAL; 223 224 group->rdbufs_allowed = val; 225 return count; 226 } 227 228 static ssize_t group_tokens_allowed_store(struct device *dev, 229 struct device_attribute *attr, 230 const char *buf, size_t count) 231 { 232 dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n"); 233 return group_read_buffers_allowed_store(dev, attr, buf, count); 234 } 235 236 static struct device_attribute dev_attr_group_tokens_allowed = 237 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show, 238 group_tokens_allowed_store); 239 240 static struct device_attribute dev_attr_group_read_buffers_allowed = 241 __ATTR(read_buffers_allowed, 0644, group_read_buffers_allowed_show, 242 group_read_buffers_allowed_store); 243 244 static ssize_t group_use_read_buffer_limit_show(struct device *dev, 245 struct device_attribute *attr, 246 char *buf) 247 { 248 struct idxd_group *group = confdev_to_group(dev); 249 250 return sysfs_emit(buf, "%u\n", group->use_rdbuf_limit); 251 } 252 253 static ssize_t group_use_token_limit_show(struct device *dev, 254 struct device_attribute *attr, 255 char *buf) 256 { 257 dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n"); 258 return group_use_read_buffer_limit_show(dev, attr, buf); 259 } 260 261 static ssize_t group_use_read_buffer_limit_store(struct device *dev, 262 struct device_attribute *attr, 263 const char *buf, size_t count) 264 { 265 struct idxd_group *group = confdev_to_group(dev); 266 struct idxd_device *idxd = group->idxd; 267 unsigned long val; 268 int rc; 269 270 rc = kstrtoul(buf, 10, &val); 271 if (rc < 0) 272 return -EINVAL; 273 274 if (idxd->data->type == IDXD_TYPE_IAX) 275 return -EOPNOTSUPP; 276 277 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 278 return -EPERM; 279 280 if (idxd->state == IDXD_DEV_ENABLED) 281 return -EPERM; 282 283 if (idxd->rdbuf_limit == 0) 284 return -EPERM; 285 286 group->use_rdbuf_limit = !!val; 287 return count; 288 } 289 290 static ssize_t group_use_token_limit_store(struct device *dev, 291 struct device_attribute *attr, 292 const char *buf, size_t count) 293 { 294 dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n"); 295 return group_use_read_buffer_limit_store(dev, attr, buf, count); 296 } 297 298 static struct device_attribute dev_attr_group_use_token_limit = 299 __ATTR(use_token_limit, 0644, group_use_token_limit_show, 300 group_use_token_limit_store); 301 302 static struct device_attribute dev_attr_group_use_read_buffer_limit = 303 __ATTR(use_read_buffer_limit, 0644, group_use_read_buffer_limit_show, 304 group_use_read_buffer_limit_store); 305 306 static ssize_t group_engines_show(struct device *dev, 307 struct device_attribute *attr, char *buf) 308 { 309 struct idxd_group *group = confdev_to_group(dev); 310 int i, rc = 0; 311 struct idxd_device *idxd = group->idxd; 312 313 for (i = 0; i < idxd->max_engines; i++) { 314 struct idxd_engine *engine = idxd->engines[i]; 315 316 if (!engine->group) 317 continue; 318 319 if (engine->group->id == group->id) 320 rc += sysfs_emit_at(buf, rc, "engine%d.%d ", idxd->id, engine->id); 321 } 322 323 if (!rc) 324 return 0; 325 rc--; 326 rc += sysfs_emit_at(buf, rc, "\n"); 327 328 return rc; 329 } 330 331 static struct device_attribute dev_attr_group_engines = 332 __ATTR(engines, 0444, group_engines_show, NULL); 333 334 static ssize_t group_work_queues_show(struct device *dev, 335 struct device_attribute *attr, char *buf) 336 { 337 struct idxd_group *group = confdev_to_group(dev); 338 int i, rc = 0; 339 struct idxd_device *idxd = group->idxd; 340 341 for (i = 0; i < idxd->max_wqs; i++) { 342 struct idxd_wq *wq = idxd->wqs[i]; 343 344 if (!wq->group) 345 continue; 346 347 if (wq->group->id == group->id) 348 rc += sysfs_emit_at(buf, rc, "wq%d.%d ", idxd->id, wq->id); 349 } 350 351 if (!rc) 352 return 0; 353 rc--; 354 rc += sysfs_emit_at(buf, rc, "\n"); 355 356 return rc; 357 } 358 359 static struct device_attribute dev_attr_group_work_queues = 360 __ATTR(work_queues, 0444, group_work_queues_show, NULL); 361 362 static ssize_t group_traffic_class_a_show(struct device *dev, 363 struct device_attribute *attr, 364 char *buf) 365 { 366 struct idxd_group *group = confdev_to_group(dev); 367 368 return sysfs_emit(buf, "%d\n", group->tc_a); 369 } 370 371 static ssize_t group_traffic_class_a_store(struct device *dev, 372 struct device_attribute *attr, 373 const char *buf, size_t count) 374 { 375 struct idxd_group *group = confdev_to_group(dev); 376 struct idxd_device *idxd = group->idxd; 377 long val; 378 int rc; 379 380 rc = kstrtol(buf, 10, &val); 381 if (rc < 0) 382 return -EINVAL; 383 384 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 385 return -EPERM; 386 387 if (idxd->state == IDXD_DEV_ENABLED) 388 return -EPERM; 389 390 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) 391 return -EPERM; 392 393 if (val < 0 || val > 7) 394 return -EINVAL; 395 396 group->tc_a = val; 397 return count; 398 } 399 400 static struct device_attribute dev_attr_group_traffic_class_a = 401 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show, 402 group_traffic_class_a_store); 403 404 static ssize_t group_traffic_class_b_show(struct device *dev, 405 struct device_attribute *attr, 406 char *buf) 407 { 408 struct idxd_group *group = confdev_to_group(dev); 409 410 return sysfs_emit(buf, "%d\n", group->tc_b); 411 } 412 413 static ssize_t group_traffic_class_b_store(struct device *dev, 414 struct device_attribute *attr, 415 const char *buf, size_t count) 416 { 417 struct idxd_group *group = confdev_to_group(dev); 418 struct idxd_device *idxd = group->idxd; 419 long val; 420 int rc; 421 422 rc = kstrtol(buf, 10, &val); 423 if (rc < 0) 424 return -EINVAL; 425 426 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 427 return -EPERM; 428 429 if (idxd->state == IDXD_DEV_ENABLED) 430 return -EPERM; 431 432 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) 433 return -EPERM; 434 435 if (val < 0 || val > 7) 436 return -EINVAL; 437 438 group->tc_b = val; 439 return count; 440 } 441 442 static struct device_attribute dev_attr_group_traffic_class_b = 443 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show, 444 group_traffic_class_b_store); 445 446 static ssize_t group_desc_progress_limit_show(struct device *dev, 447 struct device_attribute *attr, 448 char *buf) 449 { 450 struct idxd_group *group = confdev_to_group(dev); 451 452 return sysfs_emit(buf, "%d\n", group->desc_progress_limit); 453 } 454 455 static ssize_t group_desc_progress_limit_store(struct device *dev, 456 struct device_attribute *attr, 457 const char *buf, size_t count) 458 { 459 struct idxd_group *group = confdev_to_group(dev); 460 int val, rc; 461 462 rc = kstrtoint(buf, 10, &val); 463 if (rc < 0) 464 return -EINVAL; 465 466 if (val & ~GENMASK(1, 0)) 467 return -EINVAL; 468 469 group->desc_progress_limit = val; 470 return count; 471 } 472 473 static struct device_attribute dev_attr_group_desc_progress_limit = 474 __ATTR(desc_progress_limit, 0644, group_desc_progress_limit_show, 475 group_desc_progress_limit_store); 476 477 static ssize_t group_batch_progress_limit_show(struct device *dev, 478 struct device_attribute *attr, 479 char *buf) 480 { 481 struct idxd_group *group = confdev_to_group(dev); 482 483 return sysfs_emit(buf, "%d\n", group->batch_progress_limit); 484 } 485 486 static ssize_t group_batch_progress_limit_store(struct device *dev, 487 struct device_attribute *attr, 488 const char *buf, size_t count) 489 { 490 struct idxd_group *group = confdev_to_group(dev); 491 int val, rc; 492 493 rc = kstrtoint(buf, 10, &val); 494 if (rc < 0) 495 return -EINVAL; 496 497 if (val & ~GENMASK(1, 0)) 498 return -EINVAL; 499 500 group->batch_progress_limit = val; 501 return count; 502 } 503 504 static struct device_attribute dev_attr_group_batch_progress_limit = 505 __ATTR(batch_progress_limit, 0644, group_batch_progress_limit_show, 506 group_batch_progress_limit_store); 507 static struct attribute *idxd_group_attributes[] = { 508 &dev_attr_group_work_queues.attr, 509 &dev_attr_group_engines.attr, 510 &dev_attr_group_use_token_limit.attr, 511 &dev_attr_group_use_read_buffer_limit.attr, 512 &dev_attr_group_tokens_allowed.attr, 513 &dev_attr_group_read_buffers_allowed.attr, 514 &dev_attr_group_tokens_reserved.attr, 515 &dev_attr_group_read_buffers_reserved.attr, 516 &dev_attr_group_traffic_class_a.attr, 517 &dev_attr_group_traffic_class_b.attr, 518 &dev_attr_group_desc_progress_limit.attr, 519 &dev_attr_group_batch_progress_limit.attr, 520 NULL, 521 }; 522 523 static bool idxd_group_attr_progress_limit_invisible(struct attribute *attr, 524 struct idxd_device *idxd) 525 { 526 return (attr == &dev_attr_group_desc_progress_limit.attr || 527 attr == &dev_attr_group_batch_progress_limit.attr) && 528 !idxd->hw.group_cap.progress_limit; 529 } 530 531 static umode_t idxd_group_attr_visible(struct kobject *kobj, 532 struct attribute *attr, int n) 533 { 534 struct device *dev = container_of(kobj, struct device, kobj); 535 struct idxd_group *group = confdev_to_group(dev); 536 struct idxd_device *idxd = group->idxd; 537 538 if (idxd_group_attr_progress_limit_invisible(attr, idxd)) 539 return 0; 540 541 return attr->mode; 542 } 543 544 static const struct attribute_group idxd_group_attribute_group = { 545 .attrs = idxd_group_attributes, 546 .is_visible = idxd_group_attr_visible, 547 }; 548 549 static const struct attribute_group *idxd_group_attribute_groups[] = { 550 &idxd_group_attribute_group, 551 NULL, 552 }; 553 554 static void idxd_conf_group_release(struct device *dev) 555 { 556 struct idxd_group *group = confdev_to_group(dev); 557 558 kfree(group); 559 } 560 561 struct device_type idxd_group_device_type = { 562 .name = "group", 563 .release = idxd_conf_group_release, 564 .groups = idxd_group_attribute_groups, 565 }; 566 567 /* IDXD work queue attribs */ 568 static ssize_t wq_clients_show(struct device *dev, 569 struct device_attribute *attr, char *buf) 570 { 571 struct idxd_wq *wq = confdev_to_wq(dev); 572 573 return sysfs_emit(buf, "%d\n", wq->client_count); 574 } 575 576 static struct device_attribute dev_attr_wq_clients = 577 __ATTR(clients, 0444, wq_clients_show, NULL); 578 579 static ssize_t wq_state_show(struct device *dev, 580 struct device_attribute *attr, char *buf) 581 { 582 struct idxd_wq *wq = confdev_to_wq(dev); 583 584 switch (wq->state) { 585 case IDXD_WQ_DISABLED: 586 return sysfs_emit(buf, "disabled\n"); 587 case IDXD_WQ_ENABLED: 588 return sysfs_emit(buf, "enabled\n"); 589 } 590 591 return sysfs_emit(buf, "unknown\n"); 592 } 593 594 static struct device_attribute dev_attr_wq_state = 595 __ATTR(state, 0444, wq_state_show, NULL); 596 597 static ssize_t wq_group_id_show(struct device *dev, 598 struct device_attribute *attr, char *buf) 599 { 600 struct idxd_wq *wq = confdev_to_wq(dev); 601 602 if (wq->group) 603 return sysfs_emit(buf, "%u\n", wq->group->id); 604 else 605 return sysfs_emit(buf, "-1\n"); 606 } 607 608 static ssize_t wq_group_id_store(struct device *dev, 609 struct device_attribute *attr, 610 const char *buf, size_t count) 611 { 612 struct idxd_wq *wq = confdev_to_wq(dev); 613 struct idxd_device *idxd = wq->idxd; 614 long id; 615 int rc; 616 struct idxd_group *prevg, *group; 617 618 rc = kstrtol(buf, 10, &id); 619 if (rc < 0) 620 return -EINVAL; 621 622 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 623 return -EPERM; 624 625 if (wq->state != IDXD_WQ_DISABLED) 626 return -EPERM; 627 628 if (id > idxd->max_groups - 1 || id < -1) 629 return -EINVAL; 630 631 if (id == -1) { 632 if (wq->group) { 633 wq->group->num_wqs--; 634 wq->group = NULL; 635 } 636 return count; 637 } 638 639 group = idxd->groups[id]; 640 prevg = wq->group; 641 642 if (prevg) 643 prevg->num_wqs--; 644 wq->group = group; 645 group->num_wqs++; 646 return count; 647 } 648 649 static struct device_attribute dev_attr_wq_group_id = 650 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store); 651 652 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr, 653 char *buf) 654 { 655 struct idxd_wq *wq = confdev_to_wq(dev); 656 657 return sysfs_emit(buf, "%s\n", wq_dedicated(wq) ? "dedicated" : "shared"); 658 } 659 660 static ssize_t wq_mode_store(struct device *dev, 661 struct device_attribute *attr, const char *buf, 662 size_t count) 663 { 664 struct idxd_wq *wq = confdev_to_wq(dev); 665 struct idxd_device *idxd = wq->idxd; 666 667 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 668 return -EPERM; 669 670 if (wq->state != IDXD_WQ_DISABLED) 671 return -EPERM; 672 673 if (sysfs_streq(buf, "dedicated")) { 674 set_bit(WQ_FLAG_DEDICATED, &wq->flags); 675 wq->threshold = 0; 676 } else if (sysfs_streq(buf, "shared")) { 677 clear_bit(WQ_FLAG_DEDICATED, &wq->flags); 678 } else { 679 return -EINVAL; 680 } 681 682 return count; 683 } 684 685 static struct device_attribute dev_attr_wq_mode = 686 __ATTR(mode, 0644, wq_mode_show, wq_mode_store); 687 688 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr, 689 char *buf) 690 { 691 struct idxd_wq *wq = confdev_to_wq(dev); 692 693 return sysfs_emit(buf, "%u\n", wq->size); 694 } 695 696 static int total_claimed_wq_size(struct idxd_device *idxd) 697 { 698 int i; 699 int wq_size = 0; 700 701 for (i = 0; i < idxd->max_wqs; i++) { 702 struct idxd_wq *wq = idxd->wqs[i]; 703 704 wq_size += wq->size; 705 } 706 707 return wq_size; 708 } 709 710 static ssize_t wq_size_store(struct device *dev, 711 struct device_attribute *attr, const char *buf, 712 size_t count) 713 { 714 struct idxd_wq *wq = confdev_to_wq(dev); 715 unsigned long size; 716 struct idxd_device *idxd = wq->idxd; 717 int rc; 718 719 rc = kstrtoul(buf, 10, &size); 720 if (rc < 0) 721 return -EINVAL; 722 723 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 724 return -EPERM; 725 726 if (idxd->state == IDXD_DEV_ENABLED) 727 return -EPERM; 728 729 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size) 730 return -EINVAL; 731 732 wq->size = size; 733 return count; 734 } 735 736 static struct device_attribute dev_attr_wq_size = 737 __ATTR(size, 0644, wq_size_show, wq_size_store); 738 739 static ssize_t wq_priority_show(struct device *dev, 740 struct device_attribute *attr, char *buf) 741 { 742 struct idxd_wq *wq = confdev_to_wq(dev); 743 744 return sysfs_emit(buf, "%u\n", wq->priority); 745 } 746 747 static ssize_t wq_priority_store(struct device *dev, 748 struct device_attribute *attr, 749 const char *buf, size_t count) 750 { 751 struct idxd_wq *wq = confdev_to_wq(dev); 752 unsigned long prio; 753 struct idxd_device *idxd = wq->idxd; 754 int rc; 755 756 rc = kstrtoul(buf, 10, &prio); 757 if (rc < 0) 758 return -EINVAL; 759 760 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 761 return -EPERM; 762 763 if (wq->state != IDXD_WQ_DISABLED) 764 return -EPERM; 765 766 if (prio > IDXD_MAX_PRIORITY) 767 return -EINVAL; 768 769 wq->priority = prio; 770 return count; 771 } 772 773 static struct device_attribute dev_attr_wq_priority = 774 __ATTR(priority, 0644, wq_priority_show, wq_priority_store); 775 776 static ssize_t wq_block_on_fault_show(struct device *dev, 777 struct device_attribute *attr, char *buf) 778 { 779 struct idxd_wq *wq = confdev_to_wq(dev); 780 781 return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags)); 782 } 783 784 static ssize_t wq_block_on_fault_store(struct device *dev, 785 struct device_attribute *attr, 786 const char *buf, size_t count) 787 { 788 struct idxd_wq *wq = confdev_to_wq(dev); 789 struct idxd_device *idxd = wq->idxd; 790 bool bof; 791 int rc; 792 793 if (!idxd->hw.gen_cap.block_on_fault) 794 return -EOPNOTSUPP; 795 796 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 797 return -EPERM; 798 799 if (wq->state != IDXD_WQ_DISABLED) 800 return -ENXIO; 801 802 rc = kstrtobool(buf, &bof); 803 if (rc < 0) 804 return rc; 805 806 if (bof) 807 set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags); 808 else 809 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags); 810 811 return count; 812 } 813 814 static struct device_attribute dev_attr_wq_block_on_fault = 815 __ATTR(block_on_fault, 0644, wq_block_on_fault_show, 816 wq_block_on_fault_store); 817 818 static ssize_t wq_threshold_show(struct device *dev, 819 struct device_attribute *attr, char *buf) 820 { 821 struct idxd_wq *wq = confdev_to_wq(dev); 822 823 return sysfs_emit(buf, "%u\n", wq->threshold); 824 } 825 826 static ssize_t wq_threshold_store(struct device *dev, 827 struct device_attribute *attr, 828 const char *buf, size_t count) 829 { 830 struct idxd_wq *wq = confdev_to_wq(dev); 831 struct idxd_device *idxd = wq->idxd; 832 unsigned int val; 833 int rc; 834 835 rc = kstrtouint(buf, 0, &val); 836 if (rc < 0) 837 return -EINVAL; 838 839 if (val > wq->size || val <= 0) 840 return -EINVAL; 841 842 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 843 return -EPERM; 844 845 if (wq->state != IDXD_WQ_DISABLED) 846 return -ENXIO; 847 848 if (test_bit(WQ_FLAG_DEDICATED, &wq->flags)) 849 return -EINVAL; 850 851 wq->threshold = val; 852 853 return count; 854 } 855 856 static struct device_attribute dev_attr_wq_threshold = 857 __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store); 858 859 static ssize_t wq_type_show(struct device *dev, 860 struct device_attribute *attr, char *buf) 861 { 862 struct idxd_wq *wq = confdev_to_wq(dev); 863 864 switch (wq->type) { 865 case IDXD_WQT_KERNEL: 866 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_KERNEL]); 867 case IDXD_WQT_USER: 868 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_USER]); 869 case IDXD_WQT_NONE: 870 default: 871 return sysfs_emit(buf, "%s\n", idxd_wq_type_names[IDXD_WQT_NONE]); 872 } 873 874 return -EINVAL; 875 } 876 877 static ssize_t wq_type_store(struct device *dev, 878 struct device_attribute *attr, const char *buf, 879 size_t count) 880 { 881 struct idxd_wq *wq = confdev_to_wq(dev); 882 enum idxd_wq_type old_type; 883 884 if (wq->state != IDXD_WQ_DISABLED) 885 return -EPERM; 886 887 old_type = wq->type; 888 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE])) 889 wq->type = IDXD_WQT_NONE; 890 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) 891 wq->type = IDXD_WQT_KERNEL; 892 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER])) 893 wq->type = IDXD_WQT_USER; 894 else 895 return -EINVAL; 896 897 /* If we are changing queue type, clear the name */ 898 if (wq->type != old_type) 899 memset(wq->name, 0, WQ_NAME_SIZE + 1); 900 901 return count; 902 } 903 904 static struct device_attribute dev_attr_wq_type = 905 __ATTR(type, 0644, wq_type_show, wq_type_store); 906 907 static ssize_t wq_name_show(struct device *dev, 908 struct device_attribute *attr, char *buf) 909 { 910 struct idxd_wq *wq = confdev_to_wq(dev); 911 912 return sysfs_emit(buf, "%s\n", wq->name); 913 } 914 915 static ssize_t wq_name_store(struct device *dev, 916 struct device_attribute *attr, const char *buf, 917 size_t count) 918 { 919 struct idxd_wq *wq = confdev_to_wq(dev); 920 char *input, *pos; 921 922 if (wq->state != IDXD_WQ_DISABLED) 923 return -EPERM; 924 925 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0) 926 return -EINVAL; 927 928 /* 929 * This is temporarily placed here until we have SVM support for 930 * dmaengine. 931 */ 932 if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd)) 933 return -EOPNOTSUPP; 934 935 input = kstrndup(buf, count, GFP_KERNEL); 936 if (!input) 937 return -ENOMEM; 938 939 pos = strim(input); 940 memset(wq->name, 0, WQ_NAME_SIZE + 1); 941 sprintf(wq->name, "%s", pos); 942 kfree(input); 943 return count; 944 } 945 946 static struct device_attribute dev_attr_wq_name = 947 __ATTR(name, 0644, wq_name_show, wq_name_store); 948 949 static ssize_t wq_cdev_minor_show(struct device *dev, 950 struct device_attribute *attr, char *buf) 951 { 952 struct idxd_wq *wq = confdev_to_wq(dev); 953 int minor = -1; 954 955 mutex_lock(&wq->wq_lock); 956 if (wq->idxd_cdev) 957 minor = wq->idxd_cdev->minor; 958 mutex_unlock(&wq->wq_lock); 959 960 if (minor == -1) 961 return -ENXIO; 962 return sysfs_emit(buf, "%d\n", minor); 963 } 964 965 static struct device_attribute dev_attr_wq_cdev_minor = 966 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL); 967 968 static int __get_sysfs_u64(const char *buf, u64 *val) 969 { 970 int rc; 971 972 rc = kstrtou64(buf, 0, val); 973 if (rc < 0) 974 return -EINVAL; 975 976 if (*val == 0) 977 return -EINVAL; 978 979 *val = roundup_pow_of_two(*val); 980 return 0; 981 } 982 983 static ssize_t wq_max_transfer_size_show(struct device *dev, struct device_attribute *attr, 984 char *buf) 985 { 986 struct idxd_wq *wq = confdev_to_wq(dev); 987 988 return sysfs_emit(buf, "%llu\n", wq->max_xfer_bytes); 989 } 990 991 static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attribute *attr, 992 const char *buf, size_t count) 993 { 994 struct idxd_wq *wq = confdev_to_wq(dev); 995 struct idxd_device *idxd = wq->idxd; 996 u64 xfer_size; 997 int rc; 998 999 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1000 return -EPERM; 1001 1002 if (wq->state != IDXD_WQ_DISABLED) 1003 return -EPERM; 1004 1005 rc = __get_sysfs_u64(buf, &xfer_size); 1006 if (rc < 0) 1007 return rc; 1008 1009 if (xfer_size > idxd->max_xfer_bytes) 1010 return -EINVAL; 1011 1012 wq->max_xfer_bytes = xfer_size; 1013 1014 return count; 1015 } 1016 1017 static struct device_attribute dev_attr_wq_max_transfer_size = 1018 __ATTR(max_transfer_size, 0644, 1019 wq_max_transfer_size_show, wq_max_transfer_size_store); 1020 1021 static ssize_t wq_max_batch_size_show(struct device *dev, struct device_attribute *attr, char *buf) 1022 { 1023 struct idxd_wq *wq = confdev_to_wq(dev); 1024 1025 return sysfs_emit(buf, "%u\n", wq->max_batch_size); 1026 } 1027 1028 static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribute *attr, 1029 const char *buf, size_t count) 1030 { 1031 struct idxd_wq *wq = confdev_to_wq(dev); 1032 struct idxd_device *idxd = wq->idxd; 1033 u64 batch_size; 1034 int rc; 1035 1036 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1037 return -EPERM; 1038 1039 if (wq->state != IDXD_WQ_DISABLED) 1040 return -EPERM; 1041 1042 rc = __get_sysfs_u64(buf, &batch_size); 1043 if (rc < 0) 1044 return rc; 1045 1046 if (batch_size > idxd->max_batch_size) 1047 return -EINVAL; 1048 1049 wq->max_batch_size = (u32)batch_size; 1050 1051 return count; 1052 } 1053 1054 static struct device_attribute dev_attr_wq_max_batch_size = 1055 __ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store); 1056 1057 static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf) 1058 { 1059 struct idxd_wq *wq = confdev_to_wq(dev); 1060 1061 return sysfs_emit(buf, "%u\n", test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags)); 1062 } 1063 1064 static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr, 1065 const char *buf, size_t count) 1066 { 1067 struct idxd_wq *wq = confdev_to_wq(dev); 1068 struct idxd_device *idxd = wq->idxd; 1069 bool ats_dis; 1070 int rc; 1071 1072 if (wq->state != IDXD_WQ_DISABLED) 1073 return -EPERM; 1074 1075 if (!idxd->hw.wq_cap.wq_ats_support) 1076 return -EOPNOTSUPP; 1077 1078 rc = kstrtobool(buf, &ats_dis); 1079 if (rc < 0) 1080 return rc; 1081 1082 if (ats_dis) 1083 set_bit(WQ_FLAG_ATS_DISABLE, &wq->flags); 1084 else 1085 clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags); 1086 1087 return count; 1088 } 1089 1090 static struct device_attribute dev_attr_wq_ats_disable = 1091 __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store); 1092 1093 static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *attr, char *buf) 1094 { 1095 struct idxd_wq *wq = confdev_to_wq(dev); 1096 struct idxd_device *idxd = wq->idxd; 1097 u32 occup, offset; 1098 1099 if (!idxd->hw.wq_cap.occupancy) 1100 return -EOPNOTSUPP; 1101 1102 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_OCCUP_IDX); 1103 occup = ioread32(idxd->reg_base + offset) & WQCFG_OCCUP_MASK; 1104 1105 return sysfs_emit(buf, "%u\n", occup); 1106 } 1107 1108 static struct device_attribute dev_attr_wq_occupancy = 1109 __ATTR(occupancy, 0444, wq_occupancy_show, NULL); 1110 1111 static ssize_t wq_enqcmds_retries_show(struct device *dev, 1112 struct device_attribute *attr, char *buf) 1113 { 1114 struct idxd_wq *wq = confdev_to_wq(dev); 1115 1116 if (wq_dedicated(wq)) 1117 return -EOPNOTSUPP; 1118 1119 return sysfs_emit(buf, "%u\n", wq->enqcmds_retries); 1120 } 1121 1122 static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attribute *attr, 1123 const char *buf, size_t count) 1124 { 1125 struct idxd_wq *wq = confdev_to_wq(dev); 1126 int rc; 1127 unsigned int retries; 1128 1129 if (wq_dedicated(wq)) 1130 return -EOPNOTSUPP; 1131 1132 rc = kstrtouint(buf, 10, &retries); 1133 if (rc < 0) 1134 return rc; 1135 1136 if (retries > IDXD_ENQCMDS_MAX_RETRIES) 1137 retries = IDXD_ENQCMDS_MAX_RETRIES; 1138 1139 wq->enqcmds_retries = retries; 1140 return count; 1141 } 1142 1143 static struct device_attribute dev_attr_wq_enqcmds_retries = 1144 __ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store); 1145 1146 static ssize_t wq_op_config_show(struct device *dev, 1147 struct device_attribute *attr, char *buf) 1148 { 1149 struct idxd_wq *wq = confdev_to_wq(dev); 1150 1151 return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap); 1152 } 1153 1154 static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask) 1155 { 1156 int bit; 1157 1158 /* 1159 * The OPCAP is defined as 256 bits that represents each operation the device 1160 * supports per bit. Iterate through all the bits and check if the input mask 1161 * is set for bits that are not set in the OPCAP for the device. If no OPCAP 1162 * bit is set and input mask has the bit set, then return error. 1163 */ 1164 for_each_set_bit(bit, opmask, IDXD_MAX_OPCAP_BITS) { 1165 if (!test_bit(bit, idxd->opcap_bmap)) 1166 return -EINVAL; 1167 } 1168 1169 return 0; 1170 } 1171 1172 static ssize_t wq_op_config_store(struct device *dev, struct device_attribute *attr, 1173 const char *buf, size_t count) 1174 { 1175 struct idxd_wq *wq = confdev_to_wq(dev); 1176 struct idxd_device *idxd = wq->idxd; 1177 unsigned long *opmask; 1178 int rc; 1179 1180 if (wq->state != IDXD_WQ_DISABLED) 1181 return -EPERM; 1182 1183 opmask = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); 1184 if (!opmask) 1185 return -ENOMEM; 1186 1187 rc = bitmap_parse(buf, count, opmask, IDXD_MAX_OPCAP_BITS); 1188 if (rc < 0) 1189 goto err; 1190 1191 rc = idxd_verify_supported_opcap(idxd, opmask); 1192 if (rc < 0) 1193 goto err; 1194 1195 bitmap_copy(wq->opcap_bmap, opmask, IDXD_MAX_OPCAP_BITS); 1196 1197 bitmap_free(opmask); 1198 return count; 1199 1200 err: 1201 bitmap_free(opmask); 1202 return rc; 1203 } 1204 1205 static struct device_attribute dev_attr_wq_op_config = 1206 __ATTR(op_config, 0644, wq_op_config_show, wq_op_config_store); 1207 1208 static struct attribute *idxd_wq_attributes[] = { 1209 &dev_attr_wq_clients.attr, 1210 &dev_attr_wq_state.attr, 1211 &dev_attr_wq_group_id.attr, 1212 &dev_attr_wq_mode.attr, 1213 &dev_attr_wq_size.attr, 1214 &dev_attr_wq_priority.attr, 1215 &dev_attr_wq_block_on_fault.attr, 1216 &dev_attr_wq_threshold.attr, 1217 &dev_attr_wq_type.attr, 1218 &dev_attr_wq_name.attr, 1219 &dev_attr_wq_cdev_minor.attr, 1220 &dev_attr_wq_max_transfer_size.attr, 1221 &dev_attr_wq_max_batch_size.attr, 1222 &dev_attr_wq_ats_disable.attr, 1223 &dev_attr_wq_occupancy.attr, 1224 &dev_attr_wq_enqcmds_retries.attr, 1225 &dev_attr_wq_op_config.attr, 1226 NULL, 1227 }; 1228 1229 static bool idxd_wq_attr_op_config_invisible(struct attribute *attr, 1230 struct idxd_device *idxd) 1231 { 1232 return attr == &dev_attr_wq_op_config.attr && 1233 !idxd->hw.wq_cap.op_config; 1234 } 1235 1236 static umode_t idxd_wq_attr_visible(struct kobject *kobj, 1237 struct attribute *attr, int n) 1238 { 1239 struct device *dev = container_of(kobj, struct device, kobj); 1240 struct idxd_wq *wq = confdev_to_wq(dev); 1241 struct idxd_device *idxd = wq->idxd; 1242 1243 if (idxd_wq_attr_op_config_invisible(attr, idxd)) 1244 return 0; 1245 1246 return attr->mode; 1247 } 1248 1249 static const struct attribute_group idxd_wq_attribute_group = { 1250 .attrs = idxd_wq_attributes, 1251 .is_visible = idxd_wq_attr_visible, 1252 }; 1253 1254 static const struct attribute_group *idxd_wq_attribute_groups[] = { 1255 &idxd_wq_attribute_group, 1256 NULL, 1257 }; 1258 1259 static void idxd_conf_wq_release(struct device *dev) 1260 { 1261 struct idxd_wq *wq = confdev_to_wq(dev); 1262 1263 bitmap_free(wq->opcap_bmap); 1264 kfree(wq->wqcfg); 1265 kfree(wq); 1266 } 1267 1268 struct device_type idxd_wq_device_type = { 1269 .name = "wq", 1270 .release = idxd_conf_wq_release, 1271 .groups = idxd_wq_attribute_groups, 1272 }; 1273 1274 /* IDXD device attribs */ 1275 static ssize_t version_show(struct device *dev, struct device_attribute *attr, 1276 char *buf) 1277 { 1278 struct idxd_device *idxd = confdev_to_idxd(dev); 1279 1280 return sysfs_emit(buf, "%#x\n", idxd->hw.version); 1281 } 1282 static DEVICE_ATTR_RO(version); 1283 1284 static ssize_t max_work_queues_size_show(struct device *dev, 1285 struct device_attribute *attr, 1286 char *buf) 1287 { 1288 struct idxd_device *idxd = confdev_to_idxd(dev); 1289 1290 return sysfs_emit(buf, "%u\n", idxd->max_wq_size); 1291 } 1292 static DEVICE_ATTR_RO(max_work_queues_size); 1293 1294 static ssize_t max_groups_show(struct device *dev, 1295 struct device_attribute *attr, char *buf) 1296 { 1297 struct idxd_device *idxd = confdev_to_idxd(dev); 1298 1299 return sysfs_emit(buf, "%u\n", idxd->max_groups); 1300 } 1301 static DEVICE_ATTR_RO(max_groups); 1302 1303 static ssize_t max_work_queues_show(struct device *dev, 1304 struct device_attribute *attr, char *buf) 1305 { 1306 struct idxd_device *idxd = confdev_to_idxd(dev); 1307 1308 return sysfs_emit(buf, "%u\n", idxd->max_wqs); 1309 } 1310 static DEVICE_ATTR_RO(max_work_queues); 1311 1312 static ssize_t max_engines_show(struct device *dev, 1313 struct device_attribute *attr, char *buf) 1314 { 1315 struct idxd_device *idxd = confdev_to_idxd(dev); 1316 1317 return sysfs_emit(buf, "%u\n", idxd->max_engines); 1318 } 1319 static DEVICE_ATTR_RO(max_engines); 1320 1321 static ssize_t numa_node_show(struct device *dev, 1322 struct device_attribute *attr, char *buf) 1323 { 1324 struct idxd_device *idxd = confdev_to_idxd(dev); 1325 1326 return sysfs_emit(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); 1327 } 1328 static DEVICE_ATTR_RO(numa_node); 1329 1330 static ssize_t max_batch_size_show(struct device *dev, 1331 struct device_attribute *attr, char *buf) 1332 { 1333 struct idxd_device *idxd = confdev_to_idxd(dev); 1334 1335 return sysfs_emit(buf, "%u\n", idxd->max_batch_size); 1336 } 1337 static DEVICE_ATTR_RO(max_batch_size); 1338 1339 static ssize_t max_transfer_size_show(struct device *dev, 1340 struct device_attribute *attr, 1341 char *buf) 1342 { 1343 struct idxd_device *idxd = confdev_to_idxd(dev); 1344 1345 return sysfs_emit(buf, "%llu\n", idxd->max_xfer_bytes); 1346 } 1347 static DEVICE_ATTR_RO(max_transfer_size); 1348 1349 static ssize_t op_cap_show(struct device *dev, 1350 struct device_attribute *attr, char *buf) 1351 { 1352 struct idxd_device *idxd = confdev_to_idxd(dev); 1353 1354 return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap); 1355 } 1356 static DEVICE_ATTR_RO(op_cap); 1357 1358 static ssize_t gen_cap_show(struct device *dev, 1359 struct device_attribute *attr, char *buf) 1360 { 1361 struct idxd_device *idxd = confdev_to_idxd(dev); 1362 1363 return sysfs_emit(buf, "%#llx\n", idxd->hw.gen_cap.bits); 1364 } 1365 static DEVICE_ATTR_RO(gen_cap); 1366 1367 static ssize_t configurable_show(struct device *dev, 1368 struct device_attribute *attr, char *buf) 1369 { 1370 struct idxd_device *idxd = confdev_to_idxd(dev); 1371 1372 return sysfs_emit(buf, "%u\n", test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); 1373 } 1374 static DEVICE_ATTR_RO(configurable); 1375 1376 static ssize_t clients_show(struct device *dev, 1377 struct device_attribute *attr, char *buf) 1378 { 1379 struct idxd_device *idxd = confdev_to_idxd(dev); 1380 int count = 0, i; 1381 1382 spin_lock(&idxd->dev_lock); 1383 for (i = 0; i < idxd->max_wqs; i++) { 1384 struct idxd_wq *wq = idxd->wqs[i]; 1385 1386 count += wq->client_count; 1387 } 1388 spin_unlock(&idxd->dev_lock); 1389 1390 return sysfs_emit(buf, "%d\n", count); 1391 } 1392 static DEVICE_ATTR_RO(clients); 1393 1394 static ssize_t pasid_enabled_show(struct device *dev, 1395 struct device_attribute *attr, char *buf) 1396 { 1397 struct idxd_device *idxd = confdev_to_idxd(dev); 1398 1399 return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd)); 1400 } 1401 static DEVICE_ATTR_RO(pasid_enabled); 1402 1403 static ssize_t state_show(struct device *dev, 1404 struct device_attribute *attr, char *buf) 1405 { 1406 struct idxd_device *idxd = confdev_to_idxd(dev); 1407 1408 switch (idxd->state) { 1409 case IDXD_DEV_DISABLED: 1410 return sysfs_emit(buf, "disabled\n"); 1411 case IDXD_DEV_ENABLED: 1412 return sysfs_emit(buf, "enabled\n"); 1413 case IDXD_DEV_HALTED: 1414 return sysfs_emit(buf, "halted\n"); 1415 } 1416 1417 return sysfs_emit(buf, "unknown\n"); 1418 } 1419 static DEVICE_ATTR_RO(state); 1420 1421 static ssize_t errors_show(struct device *dev, 1422 struct device_attribute *attr, char *buf) 1423 { 1424 struct idxd_device *idxd = confdev_to_idxd(dev); 1425 int i, out = 0; 1426 1427 spin_lock(&idxd->dev_lock); 1428 for (i = 0; i < 4; i++) 1429 out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]); 1430 spin_unlock(&idxd->dev_lock); 1431 out--; 1432 out += sysfs_emit_at(buf, out, "\n"); 1433 return out; 1434 } 1435 static DEVICE_ATTR_RO(errors); 1436 1437 static ssize_t max_read_buffers_show(struct device *dev, 1438 struct device_attribute *attr, char *buf) 1439 { 1440 struct idxd_device *idxd = confdev_to_idxd(dev); 1441 1442 return sysfs_emit(buf, "%u\n", idxd->max_rdbufs); 1443 } 1444 1445 static ssize_t max_tokens_show(struct device *dev, 1446 struct device_attribute *attr, char *buf) 1447 { 1448 dev_warn_once(dev, "attribute deprecated, see max_read_buffers.\n"); 1449 return max_read_buffers_show(dev, attr, buf); 1450 } 1451 1452 static DEVICE_ATTR_RO(max_tokens); /* deprecated */ 1453 static DEVICE_ATTR_RO(max_read_buffers); 1454 1455 static ssize_t read_buffer_limit_show(struct device *dev, 1456 struct device_attribute *attr, char *buf) 1457 { 1458 struct idxd_device *idxd = confdev_to_idxd(dev); 1459 1460 return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit); 1461 } 1462 1463 static ssize_t token_limit_show(struct device *dev, 1464 struct device_attribute *attr, char *buf) 1465 { 1466 dev_warn_once(dev, "attribute deprecated, see read_buffer_limit.\n"); 1467 return read_buffer_limit_show(dev, attr, buf); 1468 } 1469 1470 static ssize_t read_buffer_limit_store(struct device *dev, 1471 struct device_attribute *attr, 1472 const char *buf, size_t count) 1473 { 1474 struct idxd_device *idxd = confdev_to_idxd(dev); 1475 unsigned long val; 1476 int rc; 1477 1478 rc = kstrtoul(buf, 10, &val); 1479 if (rc < 0) 1480 return -EINVAL; 1481 1482 if (idxd->state == IDXD_DEV_ENABLED) 1483 return -EPERM; 1484 1485 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1486 return -EPERM; 1487 1488 if (!idxd->hw.group_cap.rdbuf_limit) 1489 return -EPERM; 1490 1491 if (val > idxd->hw.group_cap.total_rdbufs) 1492 return -EINVAL; 1493 1494 idxd->rdbuf_limit = val; 1495 return count; 1496 } 1497 1498 static ssize_t token_limit_store(struct device *dev, 1499 struct device_attribute *attr, 1500 const char *buf, size_t count) 1501 { 1502 dev_warn_once(dev, "attribute deprecated, see read_buffer_limit\n"); 1503 return read_buffer_limit_store(dev, attr, buf, count); 1504 } 1505 1506 static DEVICE_ATTR_RW(token_limit); /* deprecated */ 1507 static DEVICE_ATTR_RW(read_buffer_limit); 1508 1509 static ssize_t cdev_major_show(struct device *dev, 1510 struct device_attribute *attr, char *buf) 1511 { 1512 struct idxd_device *idxd = confdev_to_idxd(dev); 1513 1514 return sysfs_emit(buf, "%u\n", idxd->major); 1515 } 1516 static DEVICE_ATTR_RO(cdev_major); 1517 1518 static ssize_t cmd_status_show(struct device *dev, 1519 struct device_attribute *attr, char *buf) 1520 { 1521 struct idxd_device *idxd = confdev_to_idxd(dev); 1522 1523 return sysfs_emit(buf, "%#x\n", idxd->cmd_status); 1524 } 1525 1526 static ssize_t cmd_status_store(struct device *dev, struct device_attribute *attr, 1527 const char *buf, size_t count) 1528 { 1529 struct idxd_device *idxd = confdev_to_idxd(dev); 1530 1531 idxd->cmd_status = 0; 1532 return count; 1533 } 1534 static DEVICE_ATTR_RW(cmd_status); 1535 1536 static struct attribute *idxd_device_attributes[] = { 1537 &dev_attr_version.attr, 1538 &dev_attr_max_groups.attr, 1539 &dev_attr_max_work_queues.attr, 1540 &dev_attr_max_work_queues_size.attr, 1541 &dev_attr_max_engines.attr, 1542 &dev_attr_numa_node.attr, 1543 &dev_attr_max_batch_size.attr, 1544 &dev_attr_max_transfer_size.attr, 1545 &dev_attr_op_cap.attr, 1546 &dev_attr_gen_cap.attr, 1547 &dev_attr_configurable.attr, 1548 &dev_attr_clients.attr, 1549 &dev_attr_pasid_enabled.attr, 1550 &dev_attr_state.attr, 1551 &dev_attr_errors.attr, 1552 &dev_attr_max_tokens.attr, 1553 &dev_attr_max_read_buffers.attr, 1554 &dev_attr_token_limit.attr, 1555 &dev_attr_read_buffer_limit.attr, 1556 &dev_attr_cdev_major.attr, 1557 &dev_attr_cmd_status.attr, 1558 NULL, 1559 }; 1560 1561 static const struct attribute_group idxd_device_attribute_group = { 1562 .attrs = idxd_device_attributes, 1563 }; 1564 1565 static const struct attribute_group *idxd_attribute_groups[] = { 1566 &idxd_device_attribute_group, 1567 NULL, 1568 }; 1569 1570 static void idxd_conf_device_release(struct device *dev) 1571 { 1572 struct idxd_device *idxd = confdev_to_idxd(dev); 1573 1574 kfree(idxd->groups); 1575 bitmap_free(idxd->wq_enable_map); 1576 kfree(idxd->wqs); 1577 kfree(idxd->engines); 1578 ida_free(&idxd_ida, idxd->id); 1579 bitmap_free(idxd->opcap_bmap); 1580 kfree(idxd); 1581 } 1582 1583 struct device_type dsa_device_type = { 1584 .name = "dsa", 1585 .release = idxd_conf_device_release, 1586 .groups = idxd_attribute_groups, 1587 }; 1588 1589 struct device_type iax_device_type = { 1590 .name = "iax", 1591 .release = idxd_conf_device_release, 1592 .groups = idxd_attribute_groups, 1593 }; 1594 1595 static int idxd_register_engine_devices(struct idxd_device *idxd) 1596 { 1597 struct idxd_engine *engine; 1598 int i, j, rc; 1599 1600 for (i = 0; i < idxd->max_engines; i++) { 1601 engine = idxd->engines[i]; 1602 rc = device_add(engine_confdev(engine)); 1603 if (rc < 0) 1604 goto cleanup; 1605 } 1606 1607 return 0; 1608 1609 cleanup: 1610 j = i - 1; 1611 for (; i < idxd->max_engines; i++) { 1612 engine = idxd->engines[i]; 1613 put_device(engine_confdev(engine)); 1614 } 1615 1616 while (j--) { 1617 engine = idxd->engines[j]; 1618 device_unregister(engine_confdev(engine)); 1619 } 1620 return rc; 1621 } 1622 1623 static int idxd_register_group_devices(struct idxd_device *idxd) 1624 { 1625 struct idxd_group *group; 1626 int i, j, rc; 1627 1628 for (i = 0; i < idxd->max_groups; i++) { 1629 group = idxd->groups[i]; 1630 rc = device_add(group_confdev(group)); 1631 if (rc < 0) 1632 goto cleanup; 1633 } 1634 1635 return 0; 1636 1637 cleanup: 1638 j = i - 1; 1639 for (; i < idxd->max_groups; i++) { 1640 group = idxd->groups[i]; 1641 put_device(group_confdev(group)); 1642 } 1643 1644 while (j--) { 1645 group = idxd->groups[j]; 1646 device_unregister(group_confdev(group)); 1647 } 1648 return rc; 1649 } 1650 1651 static int idxd_register_wq_devices(struct idxd_device *idxd) 1652 { 1653 struct idxd_wq *wq; 1654 int i, rc, j; 1655 1656 for (i = 0; i < idxd->max_wqs; i++) { 1657 wq = idxd->wqs[i]; 1658 rc = device_add(wq_confdev(wq)); 1659 if (rc < 0) 1660 goto cleanup; 1661 } 1662 1663 return 0; 1664 1665 cleanup: 1666 j = i - 1; 1667 for (; i < idxd->max_wqs; i++) { 1668 wq = idxd->wqs[i]; 1669 put_device(wq_confdev(wq)); 1670 } 1671 1672 while (j--) { 1673 wq = idxd->wqs[j]; 1674 device_unregister(wq_confdev(wq)); 1675 } 1676 return rc; 1677 } 1678 1679 int idxd_register_devices(struct idxd_device *idxd) 1680 { 1681 struct device *dev = &idxd->pdev->dev; 1682 int rc, i; 1683 1684 rc = device_add(idxd_confdev(idxd)); 1685 if (rc < 0) 1686 return rc; 1687 1688 rc = idxd_register_wq_devices(idxd); 1689 if (rc < 0) { 1690 dev_dbg(dev, "WQ devices registering failed: %d\n", rc); 1691 goto err_wq; 1692 } 1693 1694 rc = idxd_register_engine_devices(idxd); 1695 if (rc < 0) { 1696 dev_dbg(dev, "Engine devices registering failed: %d\n", rc); 1697 goto err_engine; 1698 } 1699 1700 rc = idxd_register_group_devices(idxd); 1701 if (rc < 0) { 1702 dev_dbg(dev, "Group device registering failed: %d\n", rc); 1703 goto err_group; 1704 } 1705 1706 return 0; 1707 1708 err_group: 1709 for (i = 0; i < idxd->max_engines; i++) 1710 device_unregister(engine_confdev(idxd->engines[i])); 1711 err_engine: 1712 for (i = 0; i < idxd->max_wqs; i++) 1713 device_unregister(wq_confdev(idxd->wqs[i])); 1714 err_wq: 1715 device_del(idxd_confdev(idxd)); 1716 return rc; 1717 } 1718 1719 void idxd_unregister_devices(struct idxd_device *idxd) 1720 { 1721 int i; 1722 1723 for (i = 0; i < idxd->max_wqs; i++) { 1724 struct idxd_wq *wq = idxd->wqs[i]; 1725 1726 device_unregister(wq_confdev(wq)); 1727 } 1728 1729 for (i = 0; i < idxd->max_engines; i++) { 1730 struct idxd_engine *engine = idxd->engines[i]; 1731 1732 device_unregister(engine_confdev(engine)); 1733 } 1734 1735 for (i = 0; i < idxd->max_groups; i++) { 1736 struct idxd_group *group = idxd->groups[i]; 1737 1738 device_unregister(group_confdev(group)); 1739 } 1740 } 1741 1742 int idxd_register_bus_type(void) 1743 { 1744 return bus_register(&dsa_bus_type); 1745 } 1746 1747 void idxd_unregister_bus_type(void) 1748 { 1749 bus_unregister(&dsa_bus_type); 1750 } 1751