1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Driver for FPGA Device Feature List (DFL) Support 4 * 5 * Copyright (C) 2017-2018 Intel Corporation, Inc. 6 * 7 * Authors: 8 * Kang Luwei <luwei.kang@intel.com> 9 * Zhang Yi <yi.z.zhang@intel.com> 10 * Wu Hao <hao.wu@intel.com> 11 * Xiao Guangrong <guangrong.xiao@linux.intel.com> 12 */ 13 #include <linux/dfl.h> 14 #include <linux/fpga-dfl.h> 15 #include <linux/module.h> 16 #include <linux/overflow.h> 17 #include <linux/uaccess.h> 18 19 #include "dfl.h" 20 21 static DEFINE_MUTEX(dfl_id_mutex); 22 23 /* 24 * when adding a new feature dev support in DFL framework, it's required to 25 * add a new item in enum dfl_id_type and provide related information in below 26 * dfl_devs table which is indexed by dfl_id_type, e.g. name string used for 27 * platform device creation (define name strings in dfl.h, as they could be 28 * reused by platform device drivers). 29 * 30 * if the new feature dev needs chardev support, then it's required to add 31 * a new item in dfl_chardevs table and configure dfl_devs[i].devt_type as 32 * index to dfl_chardevs table. If no chardev support just set devt_type 33 * as one invalid index (DFL_FPGA_DEVT_MAX). 34 */ 35 enum dfl_fpga_devt_type { 36 DFL_FPGA_DEVT_FME, 37 DFL_FPGA_DEVT_PORT, 38 DFL_FPGA_DEVT_MAX, 39 }; 40 41 static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX]; 42 43 static const char *dfl_pdata_key_strings[DFL_ID_MAX] = { 44 "dfl-fme-pdata", 45 "dfl-port-pdata", 46 }; 47 48 /** 49 * struct dfl_dev_info - dfl feature device information. 50 * @name: name string of the feature platform device. 51 * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec. 52 * @id: idr id of the feature dev. 53 * @devt_type: index to dfl_chrdevs[]. 54 */ 55 struct dfl_dev_info { 56 const char *name; 57 u16 dfh_id; 58 struct idr id; 59 enum dfl_fpga_devt_type devt_type; 60 }; 61 62 /* it is indexed by dfl_id_type */ 63 static struct dfl_dev_info dfl_devs[] = { 64 {.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME, 65 .devt_type = DFL_FPGA_DEVT_FME}, 66 {.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT, 67 .devt_type = DFL_FPGA_DEVT_PORT}, 68 }; 69 70 /** 71 * struct dfl_chardev_info - chardev information of dfl feature device 72 * @name: nmae string of the char device. 73 * @devt: devt of the char device. 74 */ 75 struct dfl_chardev_info { 76 const char *name; 77 dev_t devt; 78 }; 79 80 /* indexed by enum dfl_fpga_devt_type */ 81 static struct dfl_chardev_info dfl_chrdevs[] = { 82 {.name = DFL_FPGA_FEATURE_DEV_FME}, 83 {.name = DFL_FPGA_FEATURE_DEV_PORT}, 84 }; 85 86 static void dfl_ids_init(void) 87 { 88 int i; 89 90 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++) 91 idr_init(&dfl_devs[i].id); 92 } 93 94 static void dfl_ids_destroy(void) 95 { 96 int i; 97 98 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++) 99 idr_destroy(&dfl_devs[i].id); 100 } 101 102 static int dfl_id_alloc(enum dfl_id_type type, struct device *dev) 103 { 104 int id; 105 106 WARN_ON(type >= DFL_ID_MAX); 107 mutex_lock(&dfl_id_mutex); 108 id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL); 109 mutex_unlock(&dfl_id_mutex); 110 111 return id; 112 } 113 114 static void dfl_id_free(enum dfl_id_type type, int id) 115 { 116 WARN_ON(type >= DFL_ID_MAX); 117 mutex_lock(&dfl_id_mutex); 118 idr_remove(&dfl_devs[type].id, id); 119 mutex_unlock(&dfl_id_mutex); 120 } 121 122 static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev) 123 { 124 int i; 125 126 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++) 127 if (!strcmp(dfl_devs[i].name, pdev->name)) 128 return i; 129 130 return DFL_ID_MAX; 131 } 132 133 static enum dfl_id_type dfh_id_to_type(u16 id) 134 { 135 int i; 136 137 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++) 138 if (dfl_devs[i].dfh_id == id) 139 return i; 140 141 return DFL_ID_MAX; 142 } 143 144 /* 145 * introduce a global port_ops list, it allows port drivers to register ops 146 * in such list, then other feature devices (e.g. FME), could use the port 147 * functions even related port platform device is hidden. Below is one example, 148 * in virtualization case of PCIe-based FPGA DFL device, when SRIOV is 149 * enabled, port (and it's AFU) is turned into VF and port platform device 150 * is hidden from system but it's still required to access port to finish FPGA 151 * reconfiguration function in FME. 152 */ 153 154 static DEFINE_MUTEX(dfl_port_ops_mutex); 155 static LIST_HEAD(dfl_port_ops_list); 156 157 /** 158 * dfl_fpga_port_ops_get - get matched port ops from the global list 159 * @pdev: platform device to match with associated port ops. 160 * Return: matched port ops on success, NULL otherwise. 161 * 162 * Please note that must dfl_fpga_port_ops_put after use the port_ops. 163 */ 164 struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev) 165 { 166 struct dfl_fpga_port_ops *ops = NULL; 167 168 mutex_lock(&dfl_port_ops_mutex); 169 if (list_empty(&dfl_port_ops_list)) 170 goto done; 171 172 list_for_each_entry(ops, &dfl_port_ops_list, node) { 173 /* match port_ops using the name of platform device */ 174 if (!strcmp(pdev->name, ops->name)) { 175 if (!try_module_get(ops->owner)) 176 ops = NULL; 177 goto done; 178 } 179 } 180 181 ops = NULL; 182 done: 183 mutex_unlock(&dfl_port_ops_mutex); 184 return ops; 185 } 186 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get); 187 188 /** 189 * dfl_fpga_port_ops_put - put port ops 190 * @ops: port ops. 191 */ 192 void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops) 193 { 194 if (ops && ops->owner) 195 module_put(ops->owner); 196 } 197 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put); 198 199 /** 200 * dfl_fpga_port_ops_add - add port_ops to global list 201 * @ops: port ops to add. 202 */ 203 void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops) 204 { 205 mutex_lock(&dfl_port_ops_mutex); 206 list_add_tail(&ops->node, &dfl_port_ops_list); 207 mutex_unlock(&dfl_port_ops_mutex); 208 } 209 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add); 210 211 /** 212 * dfl_fpga_port_ops_del - remove port_ops from global list 213 * @ops: port ops to del. 214 */ 215 void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops) 216 { 217 mutex_lock(&dfl_port_ops_mutex); 218 list_del(&ops->node); 219 mutex_unlock(&dfl_port_ops_mutex); 220 } 221 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del); 222 223 /** 224 * dfl_fpga_check_port_id - check the port id 225 * @pdev: port platform device. 226 * @pport_id: port id to compare. 227 * 228 * Return: 1 if port device matches with given port id, otherwise 0. 229 */ 230 int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id) 231 { 232 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); 233 struct dfl_fpga_port_ops *port_ops; 234 235 if (pdata->id != FEATURE_DEV_ID_UNUSED) 236 return pdata->id == *(int *)pport_id; 237 238 port_ops = dfl_fpga_port_ops_get(pdev); 239 if (!port_ops || !port_ops->get_id) 240 return 0; 241 242 pdata->id = port_ops->get_id(pdev); 243 dfl_fpga_port_ops_put(port_ops); 244 245 return pdata->id == *(int *)pport_id; 246 } 247 EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id); 248 249 static DEFINE_IDA(dfl_device_ida); 250 251 static const struct dfl_device_id * 252 dfl_match_one_device(const struct dfl_device_id *id, struct dfl_device *ddev) 253 { 254 if (id->type == ddev->type && id->feature_id == ddev->feature_id) 255 return id; 256 257 return NULL; 258 } 259 260 static int dfl_bus_match(struct device *dev, struct device_driver *drv) 261 { 262 struct dfl_device *ddev = to_dfl_dev(dev); 263 struct dfl_driver *ddrv = to_dfl_drv(drv); 264 const struct dfl_device_id *id_entry; 265 266 id_entry = ddrv->id_table; 267 if (id_entry) { 268 while (id_entry->feature_id) { 269 if (dfl_match_one_device(id_entry, ddev)) { 270 ddev->id_entry = id_entry; 271 return 1; 272 } 273 id_entry++; 274 } 275 } 276 277 return 0; 278 } 279 280 static int dfl_bus_probe(struct device *dev) 281 { 282 struct dfl_driver *ddrv = to_dfl_drv(dev->driver); 283 struct dfl_device *ddev = to_dfl_dev(dev); 284 285 return ddrv->probe(ddev); 286 } 287 288 static void dfl_bus_remove(struct device *dev) 289 { 290 struct dfl_driver *ddrv = to_dfl_drv(dev->driver); 291 struct dfl_device *ddev = to_dfl_dev(dev); 292 293 if (ddrv->remove) 294 ddrv->remove(ddev); 295 } 296 297 static int dfl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 298 { 299 const struct dfl_device *ddev = to_dfl_dev(dev); 300 301 return add_uevent_var(env, "MODALIAS=dfl:t%04Xf%04X", 302 ddev->type, ddev->feature_id); 303 } 304 305 static ssize_t 306 type_show(struct device *dev, struct device_attribute *attr, char *buf) 307 { 308 struct dfl_device *ddev = to_dfl_dev(dev); 309 310 return sprintf(buf, "0x%x\n", ddev->type); 311 } 312 static DEVICE_ATTR_RO(type); 313 314 static ssize_t 315 feature_id_show(struct device *dev, struct device_attribute *attr, char *buf) 316 { 317 struct dfl_device *ddev = to_dfl_dev(dev); 318 319 return sprintf(buf, "0x%x\n", ddev->feature_id); 320 } 321 static DEVICE_ATTR_RO(feature_id); 322 323 static struct attribute *dfl_dev_attrs[] = { 324 &dev_attr_type.attr, 325 &dev_attr_feature_id.attr, 326 NULL, 327 }; 328 ATTRIBUTE_GROUPS(dfl_dev); 329 330 static struct bus_type dfl_bus_type = { 331 .name = "dfl", 332 .match = dfl_bus_match, 333 .probe = dfl_bus_probe, 334 .remove = dfl_bus_remove, 335 .uevent = dfl_bus_uevent, 336 .dev_groups = dfl_dev_groups, 337 }; 338 339 static void release_dfl_dev(struct device *dev) 340 { 341 struct dfl_device *ddev = to_dfl_dev(dev); 342 343 if (ddev->mmio_res.parent) 344 release_resource(&ddev->mmio_res); 345 346 kfree(ddev->params); 347 348 ida_free(&dfl_device_ida, ddev->id); 349 kfree(ddev->irqs); 350 kfree(ddev); 351 } 352 353 static struct dfl_device * 354 dfl_dev_add(struct dfl_feature_platform_data *pdata, 355 struct dfl_feature *feature) 356 { 357 struct platform_device *pdev = pdata->dev; 358 struct resource *parent_res; 359 struct dfl_device *ddev; 360 int id, i, ret; 361 362 ddev = kzalloc(sizeof(*ddev), GFP_KERNEL); 363 if (!ddev) 364 return ERR_PTR(-ENOMEM); 365 366 id = ida_alloc(&dfl_device_ida, GFP_KERNEL); 367 if (id < 0) { 368 dev_err(&pdev->dev, "unable to get id\n"); 369 kfree(ddev); 370 return ERR_PTR(id); 371 } 372 373 /* freeing resources by put_device() after device_initialize() */ 374 device_initialize(&ddev->dev); 375 ddev->dev.parent = &pdev->dev; 376 ddev->dev.bus = &dfl_bus_type; 377 ddev->dev.release = release_dfl_dev; 378 ddev->id = id; 379 ret = dev_set_name(&ddev->dev, "dfl_dev.%d", id); 380 if (ret) 381 goto put_dev; 382 383 ddev->type = feature_dev_id_type(pdev); 384 ddev->feature_id = feature->id; 385 ddev->revision = feature->revision; 386 ddev->dfh_version = feature->dfh_version; 387 ddev->cdev = pdata->dfl_cdev; 388 if (feature->param_size) { 389 ddev->params = kmemdup(feature->params, feature->param_size, GFP_KERNEL); 390 if (!ddev->params) { 391 ret = -ENOMEM; 392 goto put_dev; 393 } 394 ddev->param_size = feature->param_size; 395 } 396 397 /* add mmio resource */ 398 parent_res = &pdev->resource[feature->resource_index]; 399 ddev->mmio_res.flags = IORESOURCE_MEM; 400 ddev->mmio_res.start = parent_res->start; 401 ddev->mmio_res.end = parent_res->end; 402 ddev->mmio_res.name = dev_name(&ddev->dev); 403 ret = insert_resource(parent_res, &ddev->mmio_res); 404 if (ret) { 405 dev_err(&pdev->dev, "%s failed to claim resource: %pR\n", 406 dev_name(&ddev->dev), &ddev->mmio_res); 407 goto put_dev; 408 } 409 410 /* then add irq resource */ 411 if (feature->nr_irqs) { 412 ddev->irqs = kcalloc(feature->nr_irqs, 413 sizeof(*ddev->irqs), GFP_KERNEL); 414 if (!ddev->irqs) { 415 ret = -ENOMEM; 416 goto put_dev; 417 } 418 419 for (i = 0; i < feature->nr_irqs; i++) 420 ddev->irqs[i] = feature->irq_ctx[i].irq; 421 422 ddev->num_irqs = feature->nr_irqs; 423 } 424 425 ret = device_add(&ddev->dev); 426 if (ret) 427 goto put_dev; 428 429 dev_dbg(&pdev->dev, "add dfl_dev: %s\n", dev_name(&ddev->dev)); 430 return ddev; 431 432 put_dev: 433 /* calls release_dfl_dev() which does the clean up */ 434 put_device(&ddev->dev); 435 return ERR_PTR(ret); 436 } 437 438 static void dfl_devs_remove(struct dfl_feature_platform_data *pdata) 439 { 440 struct dfl_feature *feature; 441 442 dfl_fpga_dev_for_each_feature(pdata, feature) { 443 if (feature->ddev) { 444 device_unregister(&feature->ddev->dev); 445 feature->ddev = NULL; 446 } 447 } 448 } 449 450 static int dfl_devs_add(struct dfl_feature_platform_data *pdata) 451 { 452 struct dfl_feature *feature; 453 struct dfl_device *ddev; 454 int ret; 455 456 dfl_fpga_dev_for_each_feature(pdata, feature) { 457 if (feature->ioaddr) 458 continue; 459 460 if (feature->ddev) { 461 ret = -EEXIST; 462 goto err; 463 } 464 465 ddev = dfl_dev_add(pdata, feature); 466 if (IS_ERR(ddev)) { 467 ret = PTR_ERR(ddev); 468 goto err; 469 } 470 471 feature->ddev = ddev; 472 } 473 474 return 0; 475 476 err: 477 dfl_devs_remove(pdata); 478 return ret; 479 } 480 481 int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner) 482 { 483 if (!dfl_drv || !dfl_drv->probe || !dfl_drv->id_table) 484 return -EINVAL; 485 486 dfl_drv->drv.owner = owner; 487 dfl_drv->drv.bus = &dfl_bus_type; 488 489 return driver_register(&dfl_drv->drv); 490 } 491 EXPORT_SYMBOL(__dfl_driver_register); 492 493 void dfl_driver_unregister(struct dfl_driver *dfl_drv) 494 { 495 driver_unregister(&dfl_drv->drv); 496 } 497 EXPORT_SYMBOL(dfl_driver_unregister); 498 499 #define is_header_feature(feature) ((feature)->id == FEATURE_ID_FIU_HEADER) 500 501 /** 502 * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device 503 * @pdev: feature device. 504 */ 505 void dfl_fpga_dev_feature_uinit(struct platform_device *pdev) 506 { 507 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); 508 struct dfl_feature *feature; 509 510 dfl_devs_remove(pdata); 511 512 dfl_fpga_dev_for_each_feature(pdata, feature) { 513 if (feature->ops) { 514 if (feature->ops->uinit) 515 feature->ops->uinit(pdev, feature); 516 feature->ops = NULL; 517 } 518 } 519 } 520 EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit); 521 522 static int dfl_feature_instance_init(struct platform_device *pdev, 523 struct dfl_feature_platform_data *pdata, 524 struct dfl_feature *feature, 525 struct dfl_feature_driver *drv) 526 { 527 void __iomem *base; 528 int ret = 0; 529 530 if (!is_header_feature(feature)) { 531 base = devm_platform_ioremap_resource(pdev, 532 feature->resource_index); 533 if (IS_ERR(base)) { 534 dev_err(&pdev->dev, 535 "ioremap failed for feature 0x%x!\n", 536 feature->id); 537 return PTR_ERR(base); 538 } 539 540 feature->ioaddr = base; 541 } 542 543 if (drv->ops->init) { 544 ret = drv->ops->init(pdev, feature); 545 if (ret) 546 return ret; 547 } 548 549 feature->ops = drv->ops; 550 551 return ret; 552 } 553 554 static bool dfl_feature_drv_match(struct dfl_feature *feature, 555 struct dfl_feature_driver *driver) 556 { 557 const struct dfl_feature_id *ids = driver->id_table; 558 559 if (ids) { 560 while (ids->id) { 561 if (ids->id == feature->id) 562 return true; 563 ids++; 564 } 565 } 566 return false; 567 } 568 569 /** 570 * dfl_fpga_dev_feature_init - init for sub features of dfl feature device 571 * @pdev: feature device. 572 * @feature_drvs: drvs for sub features. 573 * 574 * This function will match sub features with given feature drvs list and 575 * use matched drv to init related sub feature. 576 * 577 * Return: 0 on success, negative error code otherwise. 578 */ 579 int dfl_fpga_dev_feature_init(struct platform_device *pdev, 580 struct dfl_feature_driver *feature_drvs) 581 { 582 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); 583 struct dfl_feature_driver *drv = feature_drvs; 584 struct dfl_feature *feature; 585 int ret; 586 587 while (drv->ops) { 588 dfl_fpga_dev_for_each_feature(pdata, feature) { 589 if (dfl_feature_drv_match(feature, drv)) { 590 ret = dfl_feature_instance_init(pdev, pdata, 591 feature, drv); 592 if (ret) 593 goto exit; 594 } 595 } 596 drv++; 597 } 598 599 ret = dfl_devs_add(pdata); 600 if (ret) 601 goto exit; 602 603 return 0; 604 exit: 605 dfl_fpga_dev_feature_uinit(pdev); 606 return ret; 607 } 608 EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init); 609 610 static void dfl_chardev_uinit(void) 611 { 612 int i; 613 614 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) 615 if (MAJOR(dfl_chrdevs[i].devt)) { 616 unregister_chrdev_region(dfl_chrdevs[i].devt, 617 MINORMASK + 1); 618 dfl_chrdevs[i].devt = MKDEV(0, 0); 619 } 620 } 621 622 static int dfl_chardev_init(void) 623 { 624 int i, ret; 625 626 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) { 627 ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0, 628 MINORMASK + 1, dfl_chrdevs[i].name); 629 if (ret) 630 goto exit; 631 } 632 633 return 0; 634 635 exit: 636 dfl_chardev_uinit(); 637 return ret; 638 } 639 640 static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id) 641 { 642 if (type >= DFL_FPGA_DEVT_MAX) 643 return 0; 644 645 return MKDEV(MAJOR(dfl_chrdevs[type].devt), id); 646 } 647 648 /** 649 * dfl_fpga_dev_ops_register - register cdev ops for feature dev 650 * 651 * @pdev: feature dev. 652 * @fops: file operations for feature dev's cdev. 653 * @owner: owning module/driver. 654 * 655 * Return: 0 on success, negative error code otherwise. 656 */ 657 int dfl_fpga_dev_ops_register(struct platform_device *pdev, 658 const struct file_operations *fops, 659 struct module *owner) 660 { 661 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); 662 663 cdev_init(&pdata->cdev, fops); 664 pdata->cdev.owner = owner; 665 666 /* 667 * set parent to the feature device so that its refcount is 668 * decreased after the last refcount of cdev is gone, that 669 * makes sure the feature device is valid during device 670 * file's life-cycle. 671 */ 672 pdata->cdev.kobj.parent = &pdev->dev.kobj; 673 674 return cdev_add(&pdata->cdev, pdev->dev.devt, 1); 675 } 676 EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register); 677 678 /** 679 * dfl_fpga_dev_ops_unregister - unregister cdev ops for feature dev 680 * @pdev: feature dev. 681 */ 682 void dfl_fpga_dev_ops_unregister(struct platform_device *pdev) 683 { 684 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); 685 686 cdev_del(&pdata->cdev); 687 } 688 EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister); 689 690 /** 691 * struct build_feature_devs_info - info collected during feature dev build. 692 * 693 * @dev: device to enumerate. 694 * @cdev: the container device for all feature devices. 695 * @nr_irqs: number of irqs for all feature devices. 696 * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of 697 * this device. 698 * @feature_dev: current feature device. 699 * @ioaddr: header register region address of current FIU in enumeration. 700 * @start: register resource start of current FIU. 701 * @len: max register resource length of current FIU. 702 * @sub_features: a sub features linked list for feature device in enumeration. 703 * @feature_num: number of sub features for feature device in enumeration. 704 */ 705 struct build_feature_devs_info { 706 struct device *dev; 707 struct dfl_fpga_cdev *cdev; 708 unsigned int nr_irqs; 709 int *irq_table; 710 711 struct platform_device *feature_dev; 712 void __iomem *ioaddr; 713 resource_size_t start; 714 resource_size_t len; 715 struct list_head sub_features; 716 int feature_num; 717 }; 718 719 /** 720 * struct dfl_feature_info - sub feature info collected during feature dev build 721 * 722 * @fid: id of this sub feature. 723 * @revision: revision of this sub feature 724 * @dfh_version: version of Device Feature Header (DFH) 725 * @mmio_res: mmio resource of this sub feature. 726 * @ioaddr: mapped base address of mmio resource. 727 * @node: node in sub_features linked list. 728 * @irq_base: start of irq index in this sub feature. 729 * @nr_irqs: number of irqs of this sub feature. 730 * @param_size: size DFH parameters. 731 * @params: DFH parameter data. 732 */ 733 struct dfl_feature_info { 734 u16 fid; 735 u8 revision; 736 u8 dfh_version; 737 struct resource mmio_res; 738 void __iomem *ioaddr; 739 struct list_head node; 740 unsigned int irq_base; 741 unsigned int nr_irqs; 742 unsigned int param_size; 743 u64 params[]; 744 }; 745 746 static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev, 747 struct platform_device *port) 748 { 749 struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev); 750 751 mutex_lock(&cdev->lock); 752 list_add(&pdata->node, &cdev->port_dev_list); 753 get_device(&pdata->dev->dev); 754 mutex_unlock(&cdev->lock); 755 } 756 757 /* 758 * register current feature device, it is called when we need to switch to 759 * another feature parsing or we have parsed all features on given device 760 * feature list. 761 */ 762 static int build_info_commit_dev(struct build_feature_devs_info *binfo) 763 { 764 struct platform_device *fdev = binfo->feature_dev; 765 struct dfl_feature_platform_data *pdata; 766 struct dfl_feature_info *finfo, *p; 767 enum dfl_id_type type; 768 int ret, index = 0, res_idx = 0; 769 770 type = feature_dev_id_type(fdev); 771 if (WARN_ON_ONCE(type >= DFL_ID_MAX)) 772 return -EINVAL; 773 774 /* 775 * we do not need to care for the memory which is associated with 776 * the platform device. After calling platform_device_unregister(), 777 * it will be automatically freed by device's release() callback, 778 * platform_device_release(). 779 */ 780 pdata = kzalloc(struct_size(pdata, features, binfo->feature_num), GFP_KERNEL); 781 if (!pdata) 782 return -ENOMEM; 783 784 pdata->dev = fdev; 785 pdata->num = binfo->feature_num; 786 pdata->dfl_cdev = binfo->cdev; 787 pdata->id = FEATURE_DEV_ID_UNUSED; 788 mutex_init(&pdata->lock); 789 lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type], 790 dfl_pdata_key_strings[type]); 791 792 /* 793 * the count should be initialized to 0 to make sure 794 *__fpga_port_enable() following __fpga_port_disable() 795 * works properly for port device. 796 * and it should always be 0 for fme device. 797 */ 798 WARN_ON(pdata->disable_count); 799 800 fdev->dev.platform_data = pdata; 801 802 /* each sub feature has one MMIO resource */ 803 fdev->num_resources = binfo->feature_num; 804 fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource), 805 GFP_KERNEL); 806 if (!fdev->resource) 807 return -ENOMEM; 808 809 /* fill features and resource information for feature dev */ 810 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) { 811 struct dfl_feature *feature = &pdata->features[index++]; 812 struct dfl_feature_irq_ctx *ctx; 813 unsigned int i; 814 815 /* save resource information for each feature */ 816 feature->dev = fdev; 817 feature->id = finfo->fid; 818 feature->revision = finfo->revision; 819 feature->dfh_version = finfo->dfh_version; 820 821 if (finfo->param_size) { 822 feature->params = devm_kmemdup(binfo->dev, 823 finfo->params, finfo->param_size, 824 GFP_KERNEL); 825 if (!feature->params) 826 return -ENOMEM; 827 828 feature->param_size = finfo->param_size; 829 } 830 /* 831 * the FIU header feature has some fundamental functions (sriov 832 * set, port enable/disable) needed for the dfl bus device and 833 * other sub features. So its mmio resource should be mapped by 834 * DFL bus device. And we should not assign it to feature 835 * devices (dfl-fme/afu) again. 836 */ 837 if (is_header_feature(feature)) { 838 feature->resource_index = -1; 839 feature->ioaddr = 840 devm_ioremap_resource(binfo->dev, 841 &finfo->mmio_res); 842 if (IS_ERR(feature->ioaddr)) 843 return PTR_ERR(feature->ioaddr); 844 } else { 845 feature->resource_index = res_idx; 846 fdev->resource[res_idx++] = finfo->mmio_res; 847 } 848 849 if (finfo->nr_irqs) { 850 ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs, 851 sizeof(*ctx), GFP_KERNEL); 852 if (!ctx) 853 return -ENOMEM; 854 855 for (i = 0; i < finfo->nr_irqs; i++) 856 ctx[i].irq = 857 binfo->irq_table[finfo->irq_base + i]; 858 859 feature->irq_ctx = ctx; 860 feature->nr_irqs = finfo->nr_irqs; 861 } 862 863 list_del(&finfo->node); 864 kfree(finfo); 865 } 866 867 ret = platform_device_add(binfo->feature_dev); 868 if (!ret) { 869 if (type == PORT_ID) 870 dfl_fpga_cdev_add_port_dev(binfo->cdev, 871 binfo->feature_dev); 872 else 873 binfo->cdev->fme_dev = 874 get_device(&binfo->feature_dev->dev); 875 /* 876 * reset it to avoid build_info_free() freeing their resource. 877 * 878 * The resource of successfully registered feature devices 879 * will be freed by platform_device_unregister(). See the 880 * comments in build_info_create_dev(). 881 */ 882 binfo->feature_dev = NULL; 883 } 884 885 return ret; 886 } 887 888 static int 889 build_info_create_dev(struct build_feature_devs_info *binfo, 890 enum dfl_id_type type) 891 { 892 struct platform_device *fdev; 893 894 if (type >= DFL_ID_MAX) 895 return -EINVAL; 896 897 /* 898 * we use -ENODEV as the initialization indicator which indicates 899 * whether the id need to be reclaimed 900 */ 901 fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV); 902 if (!fdev) 903 return -ENOMEM; 904 905 binfo->feature_dev = fdev; 906 binfo->feature_num = 0; 907 908 INIT_LIST_HEAD(&binfo->sub_features); 909 910 fdev->id = dfl_id_alloc(type, &fdev->dev); 911 if (fdev->id < 0) 912 return fdev->id; 913 914 fdev->dev.parent = &binfo->cdev->region->dev; 915 fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id); 916 917 return 0; 918 } 919 920 static void build_info_free(struct build_feature_devs_info *binfo) 921 { 922 struct dfl_feature_info *finfo, *p; 923 924 /* 925 * it is a valid id, free it. See comments in 926 * build_info_create_dev() 927 */ 928 if (binfo->feature_dev && binfo->feature_dev->id >= 0) { 929 dfl_id_free(feature_dev_id_type(binfo->feature_dev), 930 binfo->feature_dev->id); 931 932 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) { 933 list_del(&finfo->node); 934 kfree(finfo); 935 } 936 } 937 938 platform_device_put(binfo->feature_dev); 939 940 devm_kfree(binfo->dev, binfo); 941 } 942 943 static inline u32 feature_size(u64 value) 944 { 945 u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, value); 946 /* workaround for private features with invalid size, use 4K instead */ 947 return ofst ? ofst : 4096; 948 } 949 950 static u16 feature_id(u64 value) 951 { 952 u16 id = FIELD_GET(DFH_ID, value); 953 u8 type = FIELD_GET(DFH_TYPE, value); 954 955 if (type == DFH_TYPE_FIU) 956 return FEATURE_ID_FIU_HEADER; 957 else if (type == DFH_TYPE_PRIVATE) 958 return id; 959 else if (type == DFH_TYPE_AFU) 960 return FEATURE_ID_AFU; 961 962 WARN_ON(1); 963 return 0; 964 } 965 966 static u64 *find_param(u64 *params, resource_size_t max, int param_id) 967 { 968 u64 *end = params + max / sizeof(u64); 969 u64 v, next; 970 971 while (params < end) { 972 v = *params; 973 if (param_id == FIELD_GET(DFHv1_PARAM_HDR_ID, v)) 974 return params; 975 976 if (FIELD_GET(DFHv1_PARAM_HDR_NEXT_EOP, v)) 977 break; 978 979 next = FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, v); 980 params += next; 981 } 982 983 return NULL; 984 } 985 986 /** 987 * dfh_find_param() - find parameter block for the given parameter id 988 * @dfl_dev: dfl device 989 * @param_id: id of dfl parameter 990 * @psize: destination to store size of parameter data in bytes 991 * 992 * Return: pointer to start of parameter data, PTR_ERR otherwise. 993 */ 994 void *dfh_find_param(struct dfl_device *dfl_dev, int param_id, size_t *psize) 995 { 996 u64 *phdr = find_param(dfl_dev->params, dfl_dev->param_size, param_id); 997 998 if (!phdr) 999 return ERR_PTR(-ENOENT); 1000 1001 if (psize) 1002 *psize = (FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, *phdr) - 1) * sizeof(u64); 1003 1004 return phdr + 1; 1005 } 1006 EXPORT_SYMBOL_GPL(dfh_find_param); 1007 1008 static int parse_feature_irqs(struct build_feature_devs_info *binfo, 1009 resource_size_t ofst, struct dfl_feature_info *finfo) 1010 { 1011 void __iomem *base = binfo->ioaddr + ofst; 1012 unsigned int i, ibase, inr = 0; 1013 void *params = finfo->params; 1014 enum dfl_id_type type; 1015 u16 fid = finfo->fid; 1016 int virq; 1017 u64 *p; 1018 u64 v; 1019 1020 switch (finfo->dfh_version) { 1021 case 0: 1022 /* 1023 * DFHv0 only provides MMIO resource information for each feature 1024 * in the DFL header. There is no generic interrupt information. 1025 * Instead, features with interrupt functionality provide 1026 * the information in feature specific registers. 1027 */ 1028 type = feature_dev_id_type(binfo->feature_dev); 1029 if (type == PORT_ID) { 1030 switch (fid) { 1031 case PORT_FEATURE_ID_UINT: 1032 v = readq(base + PORT_UINT_CAP); 1033 ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v); 1034 inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v); 1035 break; 1036 case PORT_FEATURE_ID_ERROR: 1037 v = readq(base + PORT_ERROR_CAP); 1038 ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v); 1039 inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v); 1040 break; 1041 } 1042 } else if (type == FME_ID) { 1043 switch (fid) { 1044 case FME_FEATURE_ID_GLOBAL_ERR: 1045 v = readq(base + FME_ERROR_CAP); 1046 ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v); 1047 inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v); 1048 break; 1049 } 1050 } 1051 break; 1052 1053 case 1: 1054 /* 1055 * DFHv1 provides interrupt resource information in DFHv1 1056 * parameter blocks. 1057 */ 1058 p = find_param(params, finfo->param_size, DFHv1_PARAM_ID_MSI_X); 1059 if (!p) 1060 break; 1061 1062 p++; 1063 ibase = FIELD_GET(DFHv1_PARAM_MSI_X_STARTV, *p); 1064 inr = FIELD_GET(DFHv1_PARAM_MSI_X_NUMV, *p); 1065 break; 1066 1067 default: 1068 dev_warn(binfo->dev, "unexpected DFH version %d\n", finfo->dfh_version); 1069 break; 1070 } 1071 1072 if (!inr) { 1073 finfo->irq_base = 0; 1074 finfo->nr_irqs = 0; 1075 return 0; 1076 } 1077 1078 dev_dbg(binfo->dev, "feature: 0x%x, irq_base: %u, nr_irqs: %u\n", 1079 fid, ibase, inr); 1080 1081 if (ibase + inr > binfo->nr_irqs) { 1082 dev_err(binfo->dev, 1083 "Invalid interrupt number in feature 0x%x\n", fid); 1084 return -EINVAL; 1085 } 1086 1087 for (i = 0; i < inr; i++) { 1088 virq = binfo->irq_table[ibase + i]; 1089 if (virq < 0 || virq > NR_IRQS) { 1090 dev_err(binfo->dev, 1091 "Invalid irq table entry for feature 0x%x\n", 1092 fid); 1093 return -EINVAL; 1094 } 1095 } 1096 1097 finfo->irq_base = ibase; 1098 finfo->nr_irqs = inr; 1099 1100 return 0; 1101 } 1102 1103 static int dfh_get_param_size(void __iomem *dfh_base, resource_size_t max) 1104 { 1105 int size = 0; 1106 u64 v, next; 1107 1108 if (!FIELD_GET(DFHv1_CSR_SIZE_GRP_HAS_PARAMS, 1109 readq(dfh_base + DFHv1_CSR_SIZE_GRP))) 1110 return 0; 1111 1112 while (size + DFHv1_PARAM_HDR < max) { 1113 v = readq(dfh_base + DFHv1_PARAM_HDR + size); 1114 1115 next = FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, v); 1116 if (!next) 1117 return -EINVAL; 1118 1119 size += next * sizeof(u64); 1120 1121 if (FIELD_GET(DFHv1_PARAM_HDR_NEXT_EOP, v)) 1122 return size; 1123 } 1124 1125 return -ENOENT; 1126 } 1127 1128 /* 1129 * when create sub feature instances, for private features, it doesn't need 1130 * to provide resource size and feature id as they could be read from DFH 1131 * register. For afu sub feature, its register region only contains user 1132 * defined registers, so never trust any information from it, just use the 1133 * resource size information provided by its parent FIU. 1134 */ 1135 static int 1136 create_feature_instance(struct build_feature_devs_info *binfo, 1137 resource_size_t ofst, resource_size_t size, u16 fid) 1138 { 1139 struct dfl_feature_info *finfo; 1140 resource_size_t start, end; 1141 int dfh_psize = 0; 1142 u8 revision = 0; 1143 u64 v, addr_off; 1144 u8 dfh_ver = 0; 1145 int ret; 1146 1147 if (fid != FEATURE_ID_AFU) { 1148 v = readq(binfo->ioaddr + ofst); 1149 revision = FIELD_GET(DFH_REVISION, v); 1150 dfh_ver = FIELD_GET(DFH_VERSION, v); 1151 /* read feature size and id if inputs are invalid */ 1152 size = size ? size : feature_size(v); 1153 fid = fid ? fid : feature_id(v); 1154 if (dfh_ver == 1) { 1155 dfh_psize = dfh_get_param_size(binfo->ioaddr + ofst, size); 1156 if (dfh_psize < 0) { 1157 dev_err(binfo->dev, 1158 "failed to read size of DFHv1 parameters %d\n", 1159 dfh_psize); 1160 return dfh_psize; 1161 } 1162 dev_dbg(binfo->dev, "dfhv1_psize %d\n", dfh_psize); 1163 } 1164 } 1165 1166 if (binfo->len - ofst < size) 1167 return -EINVAL; 1168 1169 finfo = kzalloc(struct_size(finfo, params, dfh_psize / sizeof(u64)), GFP_KERNEL); 1170 if (!finfo) 1171 return -ENOMEM; 1172 1173 memcpy_fromio(finfo->params, binfo->ioaddr + ofst + DFHv1_PARAM_HDR, dfh_psize); 1174 finfo->param_size = dfh_psize; 1175 1176 finfo->fid = fid; 1177 finfo->revision = revision; 1178 finfo->dfh_version = dfh_ver; 1179 if (dfh_ver == 1) { 1180 v = readq(binfo->ioaddr + ofst + DFHv1_CSR_ADDR); 1181 addr_off = FIELD_GET(DFHv1_CSR_ADDR_MASK, v); 1182 if (FIELD_GET(DFHv1_CSR_ADDR_REL, v)) 1183 start = addr_off << 1; 1184 else 1185 start = binfo->start + ofst + addr_off; 1186 1187 v = readq(binfo->ioaddr + ofst + DFHv1_CSR_SIZE_GRP); 1188 end = start + FIELD_GET(DFHv1_CSR_SIZE_GRP_SIZE, v) - 1; 1189 } else { 1190 start = binfo->start + ofst; 1191 end = start + size - 1; 1192 } 1193 finfo->mmio_res.flags = IORESOURCE_MEM; 1194 finfo->mmio_res.start = start; 1195 finfo->mmio_res.end = end; 1196 1197 ret = parse_feature_irqs(binfo, ofst, finfo); 1198 if (ret) { 1199 kfree(finfo); 1200 return ret; 1201 } 1202 1203 list_add_tail(&finfo->node, &binfo->sub_features); 1204 binfo->feature_num++; 1205 1206 return 0; 1207 } 1208 1209 static int parse_feature_port_afu(struct build_feature_devs_info *binfo, 1210 resource_size_t ofst) 1211 { 1212 u64 v = readq(binfo->ioaddr + PORT_HDR_CAP); 1213 u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10; 1214 1215 WARN_ON(!size); 1216 1217 return create_feature_instance(binfo, ofst, size, FEATURE_ID_AFU); 1218 } 1219 1220 #define is_feature_dev_detected(binfo) (!!(binfo)->feature_dev) 1221 1222 static int parse_feature_afu(struct build_feature_devs_info *binfo, 1223 resource_size_t ofst) 1224 { 1225 if (!is_feature_dev_detected(binfo)) { 1226 dev_err(binfo->dev, "this AFU does not belong to any FIU.\n"); 1227 return -EINVAL; 1228 } 1229 1230 switch (feature_dev_id_type(binfo->feature_dev)) { 1231 case PORT_ID: 1232 return parse_feature_port_afu(binfo, ofst); 1233 default: 1234 dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n", 1235 binfo->feature_dev->name); 1236 } 1237 1238 return 0; 1239 } 1240 1241 static int build_info_prepare(struct build_feature_devs_info *binfo, 1242 resource_size_t start, resource_size_t len) 1243 { 1244 struct device *dev = binfo->dev; 1245 void __iomem *ioaddr; 1246 1247 if (!devm_request_mem_region(dev, start, len, dev_name(dev))) { 1248 dev_err(dev, "request region fail, start:%pa, len:%pa\n", 1249 &start, &len); 1250 return -EBUSY; 1251 } 1252 1253 ioaddr = devm_ioremap(dev, start, len); 1254 if (!ioaddr) { 1255 dev_err(dev, "ioremap region fail, start:%pa, len:%pa\n", 1256 &start, &len); 1257 return -ENOMEM; 1258 } 1259 1260 binfo->start = start; 1261 binfo->len = len; 1262 binfo->ioaddr = ioaddr; 1263 1264 return 0; 1265 } 1266 1267 static void build_info_complete(struct build_feature_devs_info *binfo) 1268 { 1269 devm_iounmap(binfo->dev, binfo->ioaddr); 1270 devm_release_mem_region(binfo->dev, binfo->start, binfo->len); 1271 } 1272 1273 static int parse_feature_fiu(struct build_feature_devs_info *binfo, 1274 resource_size_t ofst) 1275 { 1276 int ret = 0; 1277 u32 offset; 1278 u16 id; 1279 u64 v; 1280 1281 if (is_feature_dev_detected(binfo)) { 1282 build_info_complete(binfo); 1283 1284 ret = build_info_commit_dev(binfo); 1285 if (ret) 1286 return ret; 1287 1288 ret = build_info_prepare(binfo, binfo->start + ofst, 1289 binfo->len - ofst); 1290 if (ret) 1291 return ret; 1292 } 1293 1294 v = readq(binfo->ioaddr + DFH); 1295 id = FIELD_GET(DFH_ID, v); 1296 1297 /* create platform device for dfl feature dev */ 1298 ret = build_info_create_dev(binfo, dfh_id_to_type(id)); 1299 if (ret) 1300 return ret; 1301 1302 ret = create_feature_instance(binfo, 0, 0, 0); 1303 if (ret) 1304 return ret; 1305 /* 1306 * find and parse FIU's child AFU via its NEXT_AFU register. 1307 * please note that only Port has valid NEXT_AFU pointer per spec. 1308 */ 1309 v = readq(binfo->ioaddr + NEXT_AFU); 1310 1311 offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v); 1312 if (offset) 1313 return parse_feature_afu(binfo, offset); 1314 1315 dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id); 1316 1317 return ret; 1318 } 1319 1320 static int parse_feature_private(struct build_feature_devs_info *binfo, 1321 resource_size_t ofst) 1322 { 1323 if (!is_feature_dev_detected(binfo)) { 1324 dev_err(binfo->dev, "the private feature 0x%x does not belong to any AFU.\n", 1325 feature_id(readq(binfo->ioaddr + ofst))); 1326 return -EINVAL; 1327 } 1328 1329 return create_feature_instance(binfo, ofst, 0, 0); 1330 } 1331 1332 /** 1333 * parse_feature - parse a feature on given device feature list 1334 * 1335 * @binfo: build feature devices information. 1336 * @ofst: offset to current FIU header 1337 */ 1338 static int parse_feature(struct build_feature_devs_info *binfo, 1339 resource_size_t ofst) 1340 { 1341 u64 v; 1342 u32 type; 1343 1344 v = readq(binfo->ioaddr + ofst + DFH); 1345 type = FIELD_GET(DFH_TYPE, v); 1346 1347 switch (type) { 1348 case DFH_TYPE_AFU: 1349 return parse_feature_afu(binfo, ofst); 1350 case DFH_TYPE_PRIVATE: 1351 return parse_feature_private(binfo, ofst); 1352 case DFH_TYPE_FIU: 1353 return parse_feature_fiu(binfo, ofst); 1354 default: 1355 dev_info(binfo->dev, 1356 "Feature Type %x is not supported.\n", type); 1357 } 1358 1359 return 0; 1360 } 1361 1362 static int parse_feature_list(struct build_feature_devs_info *binfo, 1363 resource_size_t start, resource_size_t len) 1364 { 1365 resource_size_t end = start + len; 1366 int ret = 0; 1367 u32 ofst = 0; 1368 u64 v; 1369 1370 ret = build_info_prepare(binfo, start, len); 1371 if (ret) 1372 return ret; 1373 1374 /* walk through the device feature list via DFH's next DFH pointer. */ 1375 for (; start < end; start += ofst) { 1376 if (end - start < DFH_SIZE) { 1377 dev_err(binfo->dev, "The region is too small to contain a feature.\n"); 1378 return -EINVAL; 1379 } 1380 1381 ret = parse_feature(binfo, start - binfo->start); 1382 if (ret) 1383 return ret; 1384 1385 v = readq(binfo->ioaddr + start - binfo->start + DFH); 1386 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v); 1387 1388 /* stop parsing if EOL(End of List) is set or offset is 0 */ 1389 if ((v & DFH_EOL) || !ofst) 1390 break; 1391 } 1392 1393 /* commit current feature device when reach the end of list */ 1394 build_info_complete(binfo); 1395 1396 if (is_feature_dev_detected(binfo)) 1397 ret = build_info_commit_dev(binfo); 1398 1399 return ret; 1400 } 1401 1402 struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev) 1403 { 1404 struct dfl_fpga_enum_info *info; 1405 1406 get_device(dev); 1407 1408 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); 1409 if (!info) { 1410 put_device(dev); 1411 return NULL; 1412 } 1413 1414 info->dev = dev; 1415 INIT_LIST_HEAD(&info->dfls); 1416 1417 return info; 1418 } 1419 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc); 1420 1421 void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info) 1422 { 1423 struct dfl_fpga_enum_dfl *tmp, *dfl; 1424 struct device *dev; 1425 1426 if (!info) 1427 return; 1428 1429 dev = info->dev; 1430 1431 /* remove all device feature lists in the list. */ 1432 list_for_each_entry_safe(dfl, tmp, &info->dfls, node) { 1433 list_del(&dfl->node); 1434 devm_kfree(dev, dfl); 1435 } 1436 1437 /* remove irq table */ 1438 if (info->irq_table) 1439 devm_kfree(dev, info->irq_table); 1440 1441 devm_kfree(dev, info); 1442 put_device(dev); 1443 } 1444 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free); 1445 1446 /** 1447 * dfl_fpga_enum_info_add_dfl - add info of a device feature list to enum info 1448 * 1449 * @info: ptr to dfl_fpga_enum_info 1450 * @start: mmio resource address of the device feature list. 1451 * @len: mmio resource length of the device feature list. 1452 * 1453 * One FPGA device may have one or more Device Feature Lists (DFLs), use this 1454 * function to add information of each DFL to common data structure for next 1455 * step enumeration. 1456 * 1457 * Return: 0 on success, negative error code otherwise. 1458 */ 1459 int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info, 1460 resource_size_t start, resource_size_t len) 1461 { 1462 struct dfl_fpga_enum_dfl *dfl; 1463 1464 dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL); 1465 if (!dfl) 1466 return -ENOMEM; 1467 1468 dfl->start = start; 1469 dfl->len = len; 1470 1471 list_add_tail(&dfl->node, &info->dfls); 1472 1473 return 0; 1474 } 1475 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl); 1476 1477 /** 1478 * dfl_fpga_enum_info_add_irq - add irq table to enum info 1479 * 1480 * @info: ptr to dfl_fpga_enum_info 1481 * @nr_irqs: number of irqs of the DFL fpga device to be enumerated. 1482 * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of 1483 * this device. 1484 * 1485 * One FPGA device may have several interrupts. This function adds irq 1486 * information of the DFL fpga device to enum info for next step enumeration. 1487 * This function should be called before dfl_fpga_feature_devs_enumerate(). 1488 * As we only support one irq domain for all DFLs in the same enum info, adding 1489 * irq table a second time for the same enum info will return error. 1490 * 1491 * If we need to enumerate DFLs which belong to different irq domains, we 1492 * should fill more enum info and enumerate them one by one. 1493 * 1494 * Return: 0 on success, negative error code otherwise. 1495 */ 1496 int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info, 1497 unsigned int nr_irqs, int *irq_table) 1498 { 1499 if (!nr_irqs || !irq_table) 1500 return -EINVAL; 1501 1502 if (info->irq_table) 1503 return -EEXIST; 1504 1505 info->irq_table = devm_kmemdup(info->dev, irq_table, 1506 sizeof(int) * nr_irqs, GFP_KERNEL); 1507 if (!info->irq_table) 1508 return -ENOMEM; 1509 1510 info->nr_irqs = nr_irqs; 1511 1512 return 0; 1513 } 1514 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq); 1515 1516 static int remove_feature_dev(struct device *dev, void *data) 1517 { 1518 struct platform_device *pdev = to_platform_device(dev); 1519 enum dfl_id_type type = feature_dev_id_type(pdev); 1520 int id = pdev->id; 1521 1522 platform_device_unregister(pdev); 1523 1524 dfl_id_free(type, id); 1525 1526 return 0; 1527 } 1528 1529 static void remove_feature_devs(struct dfl_fpga_cdev *cdev) 1530 { 1531 device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev); 1532 } 1533 1534 /** 1535 * dfl_fpga_feature_devs_enumerate - enumerate feature devices 1536 * @info: information for enumeration. 1537 * 1538 * This function creates a container device (base FPGA region), enumerates 1539 * feature devices based on the enumeration info and creates platform devices 1540 * under the container device. 1541 * 1542 * Return: dfl_fpga_cdev struct on success, -errno on failure 1543 */ 1544 struct dfl_fpga_cdev * 1545 dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info) 1546 { 1547 struct build_feature_devs_info *binfo; 1548 struct dfl_fpga_enum_dfl *dfl; 1549 struct dfl_fpga_cdev *cdev; 1550 int ret = 0; 1551 1552 if (!info->dev) 1553 return ERR_PTR(-ENODEV); 1554 1555 cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL); 1556 if (!cdev) 1557 return ERR_PTR(-ENOMEM); 1558 1559 cdev->parent = info->dev; 1560 mutex_init(&cdev->lock); 1561 INIT_LIST_HEAD(&cdev->port_dev_list); 1562 1563 cdev->region = fpga_region_register(info->dev, NULL, NULL); 1564 if (IS_ERR(cdev->region)) { 1565 ret = PTR_ERR(cdev->region); 1566 goto free_cdev_exit; 1567 } 1568 1569 /* create and init build info for enumeration */ 1570 binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL); 1571 if (!binfo) { 1572 ret = -ENOMEM; 1573 goto unregister_region_exit; 1574 } 1575 1576 binfo->dev = info->dev; 1577 binfo->cdev = cdev; 1578 1579 binfo->nr_irqs = info->nr_irqs; 1580 if (info->nr_irqs) 1581 binfo->irq_table = info->irq_table; 1582 1583 /* 1584 * start enumeration for all feature devices based on Device Feature 1585 * Lists. 1586 */ 1587 list_for_each_entry(dfl, &info->dfls, node) { 1588 ret = parse_feature_list(binfo, dfl->start, dfl->len); 1589 if (ret) { 1590 remove_feature_devs(cdev); 1591 build_info_free(binfo); 1592 goto unregister_region_exit; 1593 } 1594 } 1595 1596 build_info_free(binfo); 1597 1598 return cdev; 1599 1600 unregister_region_exit: 1601 fpga_region_unregister(cdev->region); 1602 free_cdev_exit: 1603 devm_kfree(info->dev, cdev); 1604 return ERR_PTR(ret); 1605 } 1606 EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate); 1607 1608 /** 1609 * dfl_fpga_feature_devs_remove - remove all feature devices 1610 * @cdev: fpga container device. 1611 * 1612 * Remove the container device and all feature devices under given container 1613 * devices. 1614 */ 1615 void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev) 1616 { 1617 struct dfl_feature_platform_data *pdata, *ptmp; 1618 1619 mutex_lock(&cdev->lock); 1620 if (cdev->fme_dev) 1621 put_device(cdev->fme_dev); 1622 1623 list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) { 1624 struct platform_device *port_dev = pdata->dev; 1625 1626 /* remove released ports */ 1627 if (!device_is_registered(&port_dev->dev)) { 1628 dfl_id_free(feature_dev_id_type(port_dev), 1629 port_dev->id); 1630 platform_device_put(port_dev); 1631 } 1632 1633 list_del(&pdata->node); 1634 put_device(&port_dev->dev); 1635 } 1636 mutex_unlock(&cdev->lock); 1637 1638 remove_feature_devs(cdev); 1639 1640 fpga_region_unregister(cdev->region); 1641 devm_kfree(cdev->parent, cdev); 1642 } 1643 EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove); 1644 1645 /** 1646 * __dfl_fpga_cdev_find_port - find a port under given container device 1647 * 1648 * @cdev: container device 1649 * @data: data passed to match function 1650 * @match: match function used to find specific port from the port device list 1651 * 1652 * Find a port device under container device. This function needs to be 1653 * invoked with lock held. 1654 * 1655 * Return: pointer to port's platform device if successful, NULL otherwise. 1656 * 1657 * NOTE: you will need to drop the device reference with put_device() after use. 1658 */ 1659 struct platform_device * 1660 __dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data, 1661 int (*match)(struct platform_device *, void *)) 1662 { 1663 struct dfl_feature_platform_data *pdata; 1664 struct platform_device *port_dev; 1665 1666 list_for_each_entry(pdata, &cdev->port_dev_list, node) { 1667 port_dev = pdata->dev; 1668 1669 if (match(port_dev, data) && get_device(&port_dev->dev)) 1670 return port_dev; 1671 } 1672 1673 return NULL; 1674 } 1675 EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port); 1676 1677 static int __init dfl_fpga_init(void) 1678 { 1679 int ret; 1680 1681 ret = bus_register(&dfl_bus_type); 1682 if (ret) 1683 return ret; 1684 1685 dfl_ids_init(); 1686 1687 ret = dfl_chardev_init(); 1688 if (ret) { 1689 dfl_ids_destroy(); 1690 bus_unregister(&dfl_bus_type); 1691 } 1692 1693 return ret; 1694 } 1695 1696 /** 1697 * dfl_fpga_cdev_release_port - release a port platform device 1698 * 1699 * @cdev: parent container device. 1700 * @port_id: id of the port platform device. 1701 * 1702 * This function allows user to release a port platform device. This is a 1703 * mandatory step before turn a port from PF into VF for SRIOV support. 1704 * 1705 * Return: 0 on success, negative error code otherwise. 1706 */ 1707 int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id) 1708 { 1709 struct dfl_feature_platform_data *pdata; 1710 struct platform_device *port_pdev; 1711 int ret = -ENODEV; 1712 1713 mutex_lock(&cdev->lock); 1714 port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id, 1715 dfl_fpga_check_port_id); 1716 if (!port_pdev) 1717 goto unlock_exit; 1718 1719 if (!device_is_registered(&port_pdev->dev)) { 1720 ret = -EBUSY; 1721 goto put_dev_exit; 1722 } 1723 1724 pdata = dev_get_platdata(&port_pdev->dev); 1725 1726 mutex_lock(&pdata->lock); 1727 ret = dfl_feature_dev_use_begin(pdata, true); 1728 mutex_unlock(&pdata->lock); 1729 if (ret) 1730 goto put_dev_exit; 1731 1732 platform_device_del(port_pdev); 1733 cdev->released_port_num++; 1734 put_dev_exit: 1735 put_device(&port_pdev->dev); 1736 unlock_exit: 1737 mutex_unlock(&cdev->lock); 1738 return ret; 1739 } 1740 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port); 1741 1742 /** 1743 * dfl_fpga_cdev_assign_port - assign a port platform device back 1744 * 1745 * @cdev: parent container device. 1746 * @port_id: id of the port platform device. 1747 * 1748 * This function allows user to assign a port platform device back. This is 1749 * a mandatory step after disable SRIOV support. 1750 * 1751 * Return: 0 on success, negative error code otherwise. 1752 */ 1753 int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id) 1754 { 1755 struct dfl_feature_platform_data *pdata; 1756 struct platform_device *port_pdev; 1757 int ret = -ENODEV; 1758 1759 mutex_lock(&cdev->lock); 1760 port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id, 1761 dfl_fpga_check_port_id); 1762 if (!port_pdev) 1763 goto unlock_exit; 1764 1765 if (device_is_registered(&port_pdev->dev)) { 1766 ret = -EBUSY; 1767 goto put_dev_exit; 1768 } 1769 1770 ret = platform_device_add(port_pdev); 1771 if (ret) 1772 goto put_dev_exit; 1773 1774 pdata = dev_get_platdata(&port_pdev->dev); 1775 1776 mutex_lock(&pdata->lock); 1777 dfl_feature_dev_use_end(pdata); 1778 mutex_unlock(&pdata->lock); 1779 1780 cdev->released_port_num--; 1781 put_dev_exit: 1782 put_device(&port_pdev->dev); 1783 unlock_exit: 1784 mutex_unlock(&cdev->lock); 1785 return ret; 1786 } 1787 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port); 1788 1789 static void config_port_access_mode(struct device *fme_dev, int port_id, 1790 bool is_vf) 1791 { 1792 void __iomem *base; 1793 u64 v; 1794 1795 base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER); 1796 1797 v = readq(base + FME_HDR_PORT_OFST(port_id)); 1798 1799 v &= ~FME_PORT_OFST_ACC_CTRL; 1800 v |= FIELD_PREP(FME_PORT_OFST_ACC_CTRL, 1801 is_vf ? FME_PORT_OFST_ACC_VF : FME_PORT_OFST_ACC_PF); 1802 1803 writeq(v, base + FME_HDR_PORT_OFST(port_id)); 1804 } 1805 1806 #define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true) 1807 #define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false) 1808 1809 /** 1810 * dfl_fpga_cdev_config_ports_pf - configure ports to PF access mode 1811 * 1812 * @cdev: parent container device. 1813 * 1814 * This function is needed in sriov configuration routine. It could be used to 1815 * configure the all released ports from VF access mode to PF. 1816 */ 1817 void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev) 1818 { 1819 struct dfl_feature_platform_data *pdata; 1820 1821 mutex_lock(&cdev->lock); 1822 list_for_each_entry(pdata, &cdev->port_dev_list, node) { 1823 if (device_is_registered(&pdata->dev->dev)) 1824 continue; 1825 1826 config_port_pf_mode(cdev->fme_dev, pdata->id); 1827 } 1828 mutex_unlock(&cdev->lock); 1829 } 1830 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf); 1831 1832 /** 1833 * dfl_fpga_cdev_config_ports_vf - configure ports to VF access mode 1834 * 1835 * @cdev: parent container device. 1836 * @num_vfs: VF device number. 1837 * 1838 * This function is needed in sriov configuration routine. It could be used to 1839 * configure the released ports from PF access mode to VF. 1840 * 1841 * Return: 0 on success, negative error code otherwise. 1842 */ 1843 int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs) 1844 { 1845 struct dfl_feature_platform_data *pdata; 1846 int ret = 0; 1847 1848 mutex_lock(&cdev->lock); 1849 /* 1850 * can't turn multiple ports into 1 VF device, only 1 port for 1 VF 1851 * device, so if released port number doesn't match VF device number, 1852 * then reject the request with -EINVAL error code. 1853 */ 1854 if (cdev->released_port_num != num_vfs) { 1855 ret = -EINVAL; 1856 goto done; 1857 } 1858 1859 list_for_each_entry(pdata, &cdev->port_dev_list, node) { 1860 if (device_is_registered(&pdata->dev->dev)) 1861 continue; 1862 1863 config_port_vf_mode(cdev->fme_dev, pdata->id); 1864 } 1865 done: 1866 mutex_unlock(&cdev->lock); 1867 return ret; 1868 } 1869 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf); 1870 1871 static irqreturn_t dfl_irq_handler(int irq, void *arg) 1872 { 1873 struct eventfd_ctx *trigger = arg; 1874 1875 eventfd_signal(trigger); 1876 return IRQ_HANDLED; 1877 } 1878 1879 static int do_set_irq_trigger(struct dfl_feature *feature, unsigned int idx, 1880 int fd) 1881 { 1882 struct platform_device *pdev = feature->dev; 1883 struct eventfd_ctx *trigger; 1884 int irq, ret; 1885 1886 irq = feature->irq_ctx[idx].irq; 1887 1888 if (feature->irq_ctx[idx].trigger) { 1889 free_irq(irq, feature->irq_ctx[idx].trigger); 1890 kfree(feature->irq_ctx[idx].name); 1891 eventfd_ctx_put(feature->irq_ctx[idx].trigger); 1892 feature->irq_ctx[idx].trigger = NULL; 1893 } 1894 1895 if (fd < 0) 1896 return 0; 1897 1898 feature->irq_ctx[idx].name = 1899 kasprintf(GFP_KERNEL, "fpga-irq[%u](%s-%x)", idx, 1900 dev_name(&pdev->dev), feature->id); 1901 if (!feature->irq_ctx[idx].name) 1902 return -ENOMEM; 1903 1904 trigger = eventfd_ctx_fdget(fd); 1905 if (IS_ERR(trigger)) { 1906 ret = PTR_ERR(trigger); 1907 goto free_name; 1908 } 1909 1910 ret = request_irq(irq, dfl_irq_handler, 0, 1911 feature->irq_ctx[idx].name, trigger); 1912 if (!ret) { 1913 feature->irq_ctx[idx].trigger = trigger; 1914 return ret; 1915 } 1916 1917 eventfd_ctx_put(trigger); 1918 free_name: 1919 kfree(feature->irq_ctx[idx].name); 1920 1921 return ret; 1922 } 1923 1924 /** 1925 * dfl_fpga_set_irq_triggers - set eventfd triggers for dfl feature interrupts 1926 * 1927 * @feature: dfl sub feature. 1928 * @start: start of irq index in this dfl sub feature. 1929 * @count: number of irqs. 1930 * @fds: eventfds to bind with irqs. unbind related irq if fds[n] is negative. 1931 * unbind "count" specified number of irqs if fds ptr is NULL. 1932 * 1933 * Bind given eventfds with irqs in this dfl sub feature. Unbind related irq if 1934 * fds[n] is negative. Unbind "count" specified number of irqs if fds ptr is 1935 * NULL. 1936 * 1937 * Return: 0 on success, negative error code otherwise. 1938 */ 1939 int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start, 1940 unsigned int count, int32_t *fds) 1941 { 1942 unsigned int i; 1943 int ret = 0; 1944 1945 /* overflow */ 1946 if (unlikely(start + count < start)) 1947 return -EINVAL; 1948 1949 /* exceeds nr_irqs */ 1950 if (start + count > feature->nr_irqs) 1951 return -EINVAL; 1952 1953 for (i = 0; i < count; i++) { 1954 int fd = fds ? fds[i] : -1; 1955 1956 ret = do_set_irq_trigger(feature, start + i, fd); 1957 if (ret) { 1958 while (i--) 1959 do_set_irq_trigger(feature, start + i, -1); 1960 break; 1961 } 1962 } 1963 1964 return ret; 1965 } 1966 EXPORT_SYMBOL_GPL(dfl_fpga_set_irq_triggers); 1967 1968 /** 1969 * dfl_feature_ioctl_get_num_irqs - dfl feature _GET_IRQ_NUM ioctl interface. 1970 * @pdev: the feature device which has the sub feature 1971 * @feature: the dfl sub feature 1972 * @arg: ioctl argument 1973 * 1974 * Return: 0 on success, negative error code otherwise. 1975 */ 1976 long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev, 1977 struct dfl_feature *feature, 1978 unsigned long arg) 1979 { 1980 return put_user(feature->nr_irqs, (__u32 __user *)arg); 1981 } 1982 EXPORT_SYMBOL_GPL(dfl_feature_ioctl_get_num_irqs); 1983 1984 /** 1985 * dfl_feature_ioctl_set_irq - dfl feature _SET_IRQ ioctl interface. 1986 * @pdev: the feature device which has the sub feature 1987 * @feature: the dfl sub feature 1988 * @arg: ioctl argument 1989 * 1990 * Return: 0 on success, negative error code otherwise. 1991 */ 1992 long dfl_feature_ioctl_set_irq(struct platform_device *pdev, 1993 struct dfl_feature *feature, 1994 unsigned long arg) 1995 { 1996 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); 1997 struct dfl_fpga_irq_set hdr; 1998 s32 *fds; 1999 long ret; 2000 2001 if (!feature->nr_irqs) 2002 return -ENOENT; 2003 2004 if (copy_from_user(&hdr, (void __user *)arg, sizeof(hdr))) 2005 return -EFAULT; 2006 2007 if (!hdr.count || (hdr.start + hdr.count > feature->nr_irqs) || 2008 (hdr.start + hdr.count < hdr.start)) 2009 return -EINVAL; 2010 2011 fds = memdup_array_user((void __user *)(arg + sizeof(hdr)), 2012 hdr.count, sizeof(s32)); 2013 if (IS_ERR(fds)) 2014 return PTR_ERR(fds); 2015 2016 mutex_lock(&pdata->lock); 2017 ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds); 2018 mutex_unlock(&pdata->lock); 2019 2020 kfree(fds); 2021 return ret; 2022 } 2023 EXPORT_SYMBOL_GPL(dfl_feature_ioctl_set_irq); 2024 2025 static void __exit dfl_fpga_exit(void) 2026 { 2027 dfl_chardev_uinit(); 2028 dfl_ids_destroy(); 2029 bus_unregister(&dfl_bus_type); 2030 } 2031 2032 module_init(dfl_fpga_init); 2033 module_exit(dfl_fpga_exit); 2034 2035 MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support"); 2036 MODULE_AUTHOR("Intel Corporation"); 2037 MODULE_LICENSE("GPL v2"); 2038