1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Driver for FPGA Device Feature List (DFL) Support 4 * 5 * Copyright (C) 2017-2018 Intel Corporation, Inc. 6 * 7 * Authors: 8 * Kang Luwei <luwei.kang@intel.com> 9 * Zhang Yi <yi.z.zhang@intel.com> 10 * Wu Hao <hao.wu@intel.com> 11 * Xiao Guangrong <guangrong.xiao@linux.intel.com> 12 */ 13 #include <linux/dfl.h> 14 #include <linux/fpga-dfl.h> 15 #include <linux/module.h> 16 #include <linux/overflow.h> 17 #include <linux/uaccess.h> 18 19 #include "dfl.h" 20 21 static DEFINE_MUTEX(dfl_id_mutex); 22 23 /* 24 * when adding a new feature dev support in DFL framework, it's required to 25 * add a new item in enum dfl_id_type and provide related information in below 26 * dfl_devs table which is indexed by dfl_id_type, e.g. name string used for 27 * platform device creation (define name strings in dfl.h, as they could be 28 * reused by platform device drivers). 29 * 30 * if the new feature dev needs chardev support, then it's required to add 31 * a new item in dfl_chardevs table and configure dfl_devs[i].devt_type as 32 * index to dfl_chardevs table. If no chardev support just set devt_type 33 * as one invalid index (DFL_FPGA_DEVT_MAX). 34 */ 35 enum dfl_fpga_devt_type { 36 DFL_FPGA_DEVT_FME, 37 DFL_FPGA_DEVT_PORT, 38 DFL_FPGA_DEVT_MAX, 39 }; 40 41 static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX]; 42 43 static const char *dfl_pdata_key_strings[DFL_ID_MAX] = { 44 "dfl-fme-pdata", 45 "dfl-port-pdata", 46 }; 47 48 /** 49 * struct dfl_dev_info - dfl feature device information. 50 * @name: name string of the feature platform device. 51 * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec. 52 * @id: idr id of the feature dev. 53 * @devt_type: index to dfl_chrdevs[]. 54 */ 55 struct dfl_dev_info { 56 const char *name; 57 u16 dfh_id; 58 struct idr id; 59 enum dfl_fpga_devt_type devt_type; 60 }; 61 62 /* it is indexed by dfl_id_type */ 63 static struct dfl_dev_info dfl_devs[] = { 64 {.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME, 65 .devt_type = DFL_FPGA_DEVT_FME}, 66 {.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT, 67 .devt_type = DFL_FPGA_DEVT_PORT}, 68 }; 69 70 /** 71 * struct dfl_chardev_info - chardev information of dfl feature device 72 * @name: nmae string of the char device. 73 * @devt: devt of the char device. 74 */ 75 struct dfl_chardev_info { 76 const char *name; 77 dev_t devt; 78 }; 79 80 /* indexed by enum dfl_fpga_devt_type */ 81 static struct dfl_chardev_info dfl_chrdevs[] = { 82 {.name = DFL_FPGA_FEATURE_DEV_FME}, 83 {.name = DFL_FPGA_FEATURE_DEV_PORT}, 84 }; 85 86 static void dfl_ids_init(void) 87 { 88 int i; 89 90 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++) 91 idr_init(&dfl_devs[i].id); 92 } 93 94 static void dfl_ids_destroy(void) 95 { 96 int i; 97 98 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++) 99 idr_destroy(&dfl_devs[i].id); 100 } 101 102 static int dfl_id_alloc(enum dfl_id_type type, struct device *dev) 103 { 104 int id; 105 106 WARN_ON(type >= DFL_ID_MAX); 107 mutex_lock(&dfl_id_mutex); 108 id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL); 109 mutex_unlock(&dfl_id_mutex); 110 111 return id; 112 } 113 114 static void dfl_id_free(enum dfl_id_type type, int id) 115 { 116 WARN_ON(type >= DFL_ID_MAX); 117 mutex_lock(&dfl_id_mutex); 118 idr_remove(&dfl_devs[type].id, id); 119 mutex_unlock(&dfl_id_mutex); 120 } 121 122 static enum dfl_id_type dfh_id_to_type(u16 id) 123 { 124 int i; 125 126 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++) 127 if (dfl_devs[i].dfh_id == id) 128 return i; 129 130 return DFL_ID_MAX; 131 } 132 133 /* 134 * introduce a global port_ops list, it allows port drivers to register ops 135 * in such list, then other feature devices (e.g. FME), could use the port 136 * functions even related port platform device is hidden. Below is one example, 137 * in virtualization case of PCIe-based FPGA DFL device, when SRIOV is 138 * enabled, port (and it's AFU) is turned into VF and port platform device 139 * is hidden from system but it's still required to access port to finish FPGA 140 * reconfiguration function in FME. 141 */ 142 143 static DEFINE_MUTEX(dfl_port_ops_mutex); 144 static LIST_HEAD(dfl_port_ops_list); 145 146 /** 147 * dfl_fpga_port_ops_get - get matched port ops from the global list 148 * @fdata: feature dev data to match with associated port ops. 149 * Return: matched port ops on success, NULL otherwise. 150 * 151 * Please note that must dfl_fpga_port_ops_put after use the port_ops. 152 */ 153 struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct dfl_feature_dev_data *fdata) 154 { 155 struct dfl_fpga_port_ops *ops = NULL; 156 157 mutex_lock(&dfl_port_ops_mutex); 158 if (list_empty(&dfl_port_ops_list)) 159 goto done; 160 161 list_for_each_entry(ops, &dfl_port_ops_list, node) { 162 /* match port_ops using the name of platform device */ 163 if (!strcmp(fdata->pdev_name, ops->name)) { 164 if (!try_module_get(ops->owner)) 165 ops = NULL; 166 goto done; 167 } 168 } 169 170 ops = NULL; 171 done: 172 mutex_unlock(&dfl_port_ops_mutex); 173 return ops; 174 } 175 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get); 176 177 /** 178 * dfl_fpga_port_ops_put - put port ops 179 * @ops: port ops. 180 */ 181 void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops) 182 { 183 if (ops && ops->owner) 184 module_put(ops->owner); 185 } 186 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put); 187 188 /** 189 * dfl_fpga_port_ops_add - add port_ops to global list 190 * @ops: port ops to add. 191 */ 192 void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops) 193 { 194 mutex_lock(&dfl_port_ops_mutex); 195 list_add_tail(&ops->node, &dfl_port_ops_list); 196 mutex_unlock(&dfl_port_ops_mutex); 197 } 198 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add); 199 200 /** 201 * dfl_fpga_port_ops_del - remove port_ops from global list 202 * @ops: port ops to del. 203 */ 204 void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops) 205 { 206 mutex_lock(&dfl_port_ops_mutex); 207 list_del(&ops->node); 208 mutex_unlock(&dfl_port_ops_mutex); 209 } 210 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del); 211 212 /** 213 * dfl_fpga_check_port_id - check the port id 214 * @fdata: port feature dev data. 215 * @pport_id: port id to compare. 216 * 217 * Return: 1 if port device matches with given port id, otherwise 0. 218 */ 219 int dfl_fpga_check_port_id(struct dfl_feature_dev_data *fdata, void *pport_id) 220 { 221 struct dfl_fpga_port_ops *port_ops; 222 223 if (fdata->id != FEATURE_DEV_ID_UNUSED) 224 return fdata->id == *(int *)pport_id; 225 226 port_ops = dfl_fpga_port_ops_get(fdata); 227 if (!port_ops || !port_ops->get_id) 228 return 0; 229 230 fdata->id = port_ops->get_id(fdata); 231 dfl_fpga_port_ops_put(port_ops); 232 233 return fdata->id == *(int *)pport_id; 234 } 235 EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id); 236 237 static DEFINE_IDA(dfl_device_ida); 238 239 static const struct dfl_device_id * 240 dfl_match_one_device(const struct dfl_device_id *id, struct dfl_device *ddev) 241 { 242 if (id->type == ddev->type && id->feature_id == ddev->feature_id) 243 return id; 244 245 return NULL; 246 } 247 248 static int dfl_bus_match(struct device *dev, const struct device_driver *drv) 249 { 250 struct dfl_device *ddev = to_dfl_dev(dev); 251 const struct dfl_driver *ddrv = to_dfl_drv(drv); 252 const struct dfl_device_id *id_entry; 253 254 id_entry = ddrv->id_table; 255 if (id_entry) { 256 while (id_entry->feature_id) { 257 if (dfl_match_one_device(id_entry, ddev)) { 258 ddev->id_entry = id_entry; 259 return 1; 260 } 261 id_entry++; 262 } 263 } 264 265 return 0; 266 } 267 268 static int dfl_bus_probe(struct device *dev) 269 { 270 struct dfl_driver *ddrv = to_dfl_drv(dev->driver); 271 struct dfl_device *ddev = to_dfl_dev(dev); 272 273 return ddrv->probe(ddev); 274 } 275 276 static void dfl_bus_remove(struct device *dev) 277 { 278 struct dfl_driver *ddrv = to_dfl_drv(dev->driver); 279 struct dfl_device *ddev = to_dfl_dev(dev); 280 281 if (ddrv->remove) 282 ddrv->remove(ddev); 283 } 284 285 static int dfl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) 286 { 287 const struct dfl_device *ddev = to_dfl_dev(dev); 288 289 return add_uevent_var(env, "MODALIAS=dfl:t%04Xf%04X", 290 ddev->type, ddev->feature_id); 291 } 292 293 static ssize_t 294 type_show(struct device *dev, struct device_attribute *attr, char *buf) 295 { 296 struct dfl_device *ddev = to_dfl_dev(dev); 297 298 return sprintf(buf, "0x%x\n", ddev->type); 299 } 300 static DEVICE_ATTR_RO(type); 301 302 static ssize_t 303 feature_id_show(struct device *dev, struct device_attribute *attr, char *buf) 304 { 305 struct dfl_device *ddev = to_dfl_dev(dev); 306 307 return sprintf(buf, "0x%x\n", ddev->feature_id); 308 } 309 static DEVICE_ATTR_RO(feature_id); 310 311 static struct attribute *dfl_dev_attrs[] = { 312 &dev_attr_type.attr, 313 &dev_attr_feature_id.attr, 314 NULL, 315 }; 316 ATTRIBUTE_GROUPS(dfl_dev); 317 318 static const struct bus_type dfl_bus_type = { 319 .name = "dfl", 320 .match = dfl_bus_match, 321 .probe = dfl_bus_probe, 322 .remove = dfl_bus_remove, 323 .uevent = dfl_bus_uevent, 324 .dev_groups = dfl_dev_groups, 325 }; 326 327 static void release_dfl_dev(struct device *dev) 328 { 329 struct dfl_device *ddev = to_dfl_dev(dev); 330 331 if (ddev->mmio_res.parent) 332 release_resource(&ddev->mmio_res); 333 334 kfree(ddev->params); 335 336 ida_free(&dfl_device_ida, ddev->id); 337 kfree(ddev->irqs); 338 kfree(ddev); 339 } 340 341 static struct dfl_device * 342 dfl_dev_add(struct dfl_feature_dev_data *fdata, 343 struct dfl_feature *feature) 344 { 345 struct platform_device *pdev = fdata->dev; 346 struct resource *parent_res; 347 struct dfl_device *ddev; 348 int id, i, ret; 349 350 ddev = kzalloc_obj(*ddev, GFP_KERNEL); 351 if (!ddev) 352 return ERR_PTR(-ENOMEM); 353 354 id = ida_alloc(&dfl_device_ida, GFP_KERNEL); 355 if (id < 0) { 356 dev_err(&pdev->dev, "unable to get id\n"); 357 kfree(ddev); 358 return ERR_PTR(id); 359 } 360 361 /* freeing resources by put_device() after device_initialize() */ 362 device_initialize(&ddev->dev); 363 ddev->dev.parent = &pdev->dev; 364 ddev->dev.bus = &dfl_bus_type; 365 ddev->dev.release = release_dfl_dev; 366 ddev->id = id; 367 ret = dev_set_name(&ddev->dev, "dfl_dev.%d", id); 368 if (ret) 369 goto put_dev; 370 371 ddev->type = fdata->type; 372 ddev->feature_id = feature->id; 373 ddev->revision = feature->revision; 374 ddev->dfh_version = feature->dfh_version; 375 ddev->cdev = fdata->dfl_cdev; 376 if (feature->param_size) { 377 ddev->params = kmemdup(feature->params, feature->param_size, GFP_KERNEL); 378 if (!ddev->params) { 379 ret = -ENOMEM; 380 goto put_dev; 381 } 382 ddev->param_size = feature->param_size; 383 } 384 385 /* add mmio resource */ 386 parent_res = &pdev->resource[feature->resource_index]; 387 ddev->mmio_res.flags = IORESOURCE_MEM; 388 ddev->mmio_res.start = parent_res->start; 389 ddev->mmio_res.end = parent_res->end; 390 ddev->mmio_res.name = dev_name(&ddev->dev); 391 ret = insert_resource(parent_res, &ddev->mmio_res); 392 if (ret) { 393 dev_err(&pdev->dev, "%s failed to claim resource: %pR\n", 394 dev_name(&ddev->dev), &ddev->mmio_res); 395 goto put_dev; 396 } 397 398 /* then add irq resource */ 399 if (feature->nr_irqs) { 400 ddev->irqs = kzalloc_objs(*ddev->irqs, feature->nr_irqs, 401 GFP_KERNEL); 402 if (!ddev->irqs) { 403 ret = -ENOMEM; 404 goto put_dev; 405 } 406 407 for (i = 0; i < feature->nr_irqs; i++) 408 ddev->irqs[i] = feature->irq_ctx[i].irq; 409 410 ddev->num_irqs = feature->nr_irqs; 411 } 412 413 ret = device_add(&ddev->dev); 414 if (ret) 415 goto put_dev; 416 417 dev_dbg(&pdev->dev, "add dfl_dev: %s\n", dev_name(&ddev->dev)); 418 return ddev; 419 420 put_dev: 421 /* calls release_dfl_dev() which does the clean up */ 422 put_device(&ddev->dev); 423 return ERR_PTR(ret); 424 } 425 426 static void dfl_devs_remove(struct dfl_feature_dev_data *fdata) 427 { 428 struct dfl_feature *feature; 429 430 dfl_fpga_dev_for_each_feature(fdata, feature) { 431 if (feature->ddev) { 432 device_unregister(&feature->ddev->dev); 433 feature->ddev = NULL; 434 } 435 } 436 } 437 438 static int dfl_devs_add(struct dfl_feature_dev_data *fdata) 439 { 440 struct dfl_feature *feature; 441 struct dfl_device *ddev; 442 int ret; 443 444 dfl_fpga_dev_for_each_feature(fdata, feature) { 445 if (feature->ioaddr) 446 continue; 447 448 if (feature->ddev) { 449 ret = -EEXIST; 450 goto err; 451 } 452 453 ddev = dfl_dev_add(fdata, feature); 454 if (IS_ERR(ddev)) { 455 ret = PTR_ERR(ddev); 456 goto err; 457 } 458 459 feature->ddev = ddev; 460 } 461 462 return 0; 463 464 err: 465 dfl_devs_remove(fdata); 466 return ret; 467 } 468 469 int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner) 470 { 471 if (!dfl_drv || !dfl_drv->probe || !dfl_drv->id_table) 472 return -EINVAL; 473 474 dfl_drv->drv.owner = owner; 475 dfl_drv->drv.bus = &dfl_bus_type; 476 477 return driver_register(&dfl_drv->drv); 478 } 479 EXPORT_SYMBOL(__dfl_driver_register); 480 481 void dfl_driver_unregister(struct dfl_driver *dfl_drv) 482 { 483 driver_unregister(&dfl_drv->drv); 484 } 485 EXPORT_SYMBOL(dfl_driver_unregister); 486 487 #define is_header_feature(feature) ((feature)->id == FEATURE_ID_FIU_HEADER) 488 489 /** 490 * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device 491 * @pdev: feature device. 492 */ 493 void dfl_fpga_dev_feature_uinit(struct platform_device *pdev) 494 { 495 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev); 496 struct dfl_feature *feature; 497 498 dfl_devs_remove(fdata); 499 500 dfl_fpga_dev_for_each_feature(fdata, feature) { 501 if (feature->ops) { 502 if (feature->ops->uinit) 503 feature->ops->uinit(pdev, feature); 504 feature->ops = NULL; 505 } 506 } 507 } 508 EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit); 509 510 static int dfl_feature_instance_init(struct platform_device *pdev, 511 struct dfl_feature *feature, 512 struct dfl_feature_driver *drv) 513 { 514 void __iomem *base; 515 int ret = 0; 516 517 if (!is_header_feature(feature)) { 518 base = devm_platform_ioremap_resource(pdev, 519 feature->resource_index); 520 if (IS_ERR(base)) { 521 dev_err(&pdev->dev, 522 "ioremap failed for feature 0x%x!\n", 523 feature->id); 524 return PTR_ERR(base); 525 } 526 527 feature->ioaddr = base; 528 } 529 530 if (drv->ops->init) { 531 ret = drv->ops->init(pdev, feature); 532 if (ret) 533 return ret; 534 } 535 536 feature->ops = drv->ops; 537 538 return ret; 539 } 540 541 static bool dfl_feature_drv_match(struct dfl_feature *feature, 542 struct dfl_feature_driver *driver) 543 { 544 const struct dfl_feature_id *ids = driver->id_table; 545 546 if (ids) { 547 while (ids->id) { 548 if (ids->id == feature->id) 549 return true; 550 ids++; 551 } 552 } 553 return false; 554 } 555 556 /** 557 * dfl_fpga_dev_feature_init - init for sub features of dfl feature device 558 * @pdev: feature device. 559 * @feature_drvs: drvs for sub features. 560 * 561 * This function will match sub features with given feature drvs list and 562 * use matched drv to init related sub feature. 563 * 564 * Return: 0 on success, negative error code otherwise. 565 */ 566 int dfl_fpga_dev_feature_init(struct platform_device *pdev, 567 struct dfl_feature_driver *feature_drvs) 568 { 569 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev); 570 struct dfl_feature_driver *drv = feature_drvs; 571 struct dfl_feature *feature; 572 int ret; 573 574 while (drv->ops) { 575 dfl_fpga_dev_for_each_feature(fdata, feature) { 576 if (dfl_feature_drv_match(feature, drv)) { 577 ret = dfl_feature_instance_init(pdev, feature, drv); 578 if (ret) 579 goto exit; 580 } 581 } 582 drv++; 583 } 584 585 ret = dfl_devs_add(fdata); 586 if (ret) 587 goto exit; 588 589 return 0; 590 exit: 591 dfl_fpga_dev_feature_uinit(pdev); 592 return ret; 593 } 594 EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init); 595 596 static void dfl_chardev_uinit(void) 597 { 598 int i; 599 600 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) 601 if (MAJOR(dfl_chrdevs[i].devt)) { 602 unregister_chrdev_region(dfl_chrdevs[i].devt, 603 MINORMASK + 1); 604 dfl_chrdevs[i].devt = MKDEV(0, 0); 605 } 606 } 607 608 static int dfl_chardev_init(void) 609 { 610 int i, ret; 611 612 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) { 613 ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0, 614 MINORMASK + 1, dfl_chrdevs[i].name); 615 if (ret) 616 goto exit; 617 } 618 619 return 0; 620 621 exit: 622 dfl_chardev_uinit(); 623 return ret; 624 } 625 626 static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id) 627 { 628 if (type >= DFL_FPGA_DEVT_MAX) 629 return 0; 630 631 return MKDEV(MAJOR(dfl_chrdevs[type].devt), id); 632 } 633 634 /** 635 * dfl_fpga_dev_ops_register - register cdev ops for feature dev 636 * 637 * @pdev: feature dev. 638 * @fops: file operations for feature dev's cdev. 639 * @owner: owning module/driver. 640 * 641 * Return: 0 on success, negative error code otherwise. 642 */ 643 int dfl_fpga_dev_ops_register(struct platform_device *pdev, 644 const struct file_operations *fops, 645 struct module *owner) 646 { 647 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); 648 649 cdev_init(&pdata->cdev, fops); 650 pdata->cdev.owner = owner; 651 652 /* 653 * set parent to the feature device so that its refcount is 654 * decreased after the last refcount of cdev is gone, that 655 * makes sure the feature device is valid during device 656 * file's life-cycle. 657 */ 658 pdata->cdev.kobj.parent = &pdev->dev.kobj; 659 660 return cdev_add(&pdata->cdev, pdev->dev.devt, 1); 661 } 662 EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register); 663 664 /** 665 * dfl_fpga_dev_ops_unregister - unregister cdev ops for feature dev 666 * @pdev: feature dev. 667 */ 668 void dfl_fpga_dev_ops_unregister(struct platform_device *pdev) 669 { 670 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); 671 672 cdev_del(&pdata->cdev); 673 } 674 EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister); 675 676 /** 677 * struct build_feature_devs_info - info collected during feature dev build. 678 * 679 * @dev: device to enumerate. 680 * @cdev: the container device for all feature devices. 681 * @nr_irqs: number of irqs for all feature devices. 682 * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of 683 * this device. 684 * @type: the current FIU type. 685 * @ioaddr: header register region address of current FIU in enumeration. 686 * @start: register resource start of current FIU. 687 * @len: max register resource length of current FIU. 688 * @sub_features: a sub features linked list for feature device in enumeration. 689 * @feature_num: number of sub features for feature device in enumeration. 690 */ 691 struct build_feature_devs_info { 692 struct device *dev; 693 struct dfl_fpga_cdev *cdev; 694 unsigned int nr_irqs; 695 int *irq_table; 696 697 enum dfl_id_type type; 698 void __iomem *ioaddr; 699 resource_size_t start; 700 resource_size_t len; 701 struct list_head sub_features; 702 int feature_num; 703 }; 704 705 /** 706 * struct dfl_feature_info - sub feature info collected during feature dev build 707 * 708 * @fid: id of this sub feature. 709 * @revision: revision of this sub feature 710 * @dfh_version: version of Device Feature Header (DFH) 711 * @mmio_res: mmio resource of this sub feature. 712 * @ioaddr: mapped base address of mmio resource. 713 * @node: node in sub_features linked list. 714 * @irq_base: start of irq index in this sub feature. 715 * @nr_irqs: number of irqs of this sub feature. 716 * @param_size: size DFH parameters. 717 * @params: DFH parameter data. 718 */ 719 struct dfl_feature_info { 720 u16 fid; 721 u8 revision; 722 u8 dfh_version; 723 struct resource mmio_res; 724 void __iomem *ioaddr; 725 struct list_head node; 726 unsigned int irq_base; 727 unsigned int nr_irqs; 728 unsigned int param_size; 729 u64 params[]; 730 }; 731 732 static void dfl_fpga_cdev_add_port_data(struct dfl_fpga_cdev *cdev, 733 struct dfl_feature_dev_data *fdata) 734 { 735 mutex_lock(&cdev->lock); 736 list_add(&fdata->node, &cdev->port_dev_list); 737 mutex_unlock(&cdev->lock); 738 } 739 740 static void dfl_id_free_action(void *arg) 741 { 742 struct dfl_feature_dev_data *fdata = arg; 743 744 dfl_id_free(fdata->type, fdata->pdev_id); 745 } 746 747 static struct dfl_feature_dev_data * 748 binfo_create_feature_dev_data(struct build_feature_devs_info *binfo) 749 { 750 enum dfl_id_type type = binfo->type; 751 struct dfl_feature_info *finfo, *p; 752 struct dfl_feature_dev_data *fdata; 753 int ret, index = 0, res_idx = 0; 754 755 if (WARN_ON_ONCE(type >= DFL_ID_MAX)) 756 return ERR_PTR(-EINVAL); 757 758 fdata = devm_kzalloc(binfo->dev, sizeof(*fdata), GFP_KERNEL); 759 if (!fdata) 760 return ERR_PTR(-ENOMEM); 761 762 fdata->features = devm_kcalloc(binfo->dev, binfo->feature_num, 763 sizeof(*fdata->features), GFP_KERNEL); 764 if (!fdata->features) 765 return ERR_PTR(-ENOMEM); 766 767 fdata->resources = devm_kcalloc(binfo->dev, binfo->feature_num, 768 sizeof(*fdata->resources), GFP_KERNEL); 769 if (!fdata->resources) 770 return ERR_PTR(-ENOMEM); 771 772 fdata->type = type; 773 774 fdata->pdev_id = dfl_id_alloc(type, binfo->dev); 775 if (fdata->pdev_id < 0) 776 return ERR_PTR(fdata->pdev_id); 777 778 ret = devm_add_action_or_reset(binfo->dev, dfl_id_free_action, fdata); 779 if (ret) 780 return ERR_PTR(ret); 781 782 fdata->pdev_name = dfl_devs[type].name; 783 fdata->num = binfo->feature_num; 784 fdata->dfl_cdev = binfo->cdev; 785 fdata->id = FEATURE_DEV_ID_UNUSED; 786 mutex_init(&fdata->lock); 787 lockdep_set_class_and_name(&fdata->lock, &dfl_pdata_keys[type], 788 dfl_pdata_key_strings[type]); 789 790 /* 791 * the count should be initialized to 0 to make sure 792 *__fpga_port_enable() following __fpga_port_disable() 793 * works properly for port device. 794 * and it should always be 0 for fme device. 795 */ 796 WARN_ON(fdata->disable_count); 797 798 /* fill features and resource information for feature dev */ 799 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) { 800 struct dfl_feature *feature = &fdata->features[index++]; 801 struct dfl_feature_irq_ctx *ctx; 802 unsigned int i; 803 804 /* save resource information for each feature */ 805 feature->id = finfo->fid; 806 feature->revision = finfo->revision; 807 feature->dfh_version = finfo->dfh_version; 808 809 if (finfo->param_size) { 810 feature->params = devm_kmemdup(binfo->dev, 811 finfo->params, finfo->param_size, 812 GFP_KERNEL); 813 if (!feature->params) 814 return ERR_PTR(-ENOMEM); 815 816 feature->param_size = finfo->param_size; 817 } 818 /* 819 * the FIU header feature has some fundamental functions (sriov 820 * set, port enable/disable) needed for the dfl bus device and 821 * other sub features. So its mmio resource should be mapped by 822 * DFL bus device. And we should not assign it to feature 823 * devices (dfl-fme/afu) again. 824 */ 825 if (is_header_feature(feature)) { 826 feature->resource_index = -1; 827 feature->ioaddr = 828 devm_ioremap_resource(binfo->dev, 829 &finfo->mmio_res); 830 if (IS_ERR(feature->ioaddr)) 831 return ERR_CAST(feature->ioaddr); 832 } else { 833 feature->resource_index = res_idx; 834 fdata->resources[res_idx++] = finfo->mmio_res; 835 } 836 837 if (finfo->nr_irqs) { 838 ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs, 839 sizeof(*ctx), GFP_KERNEL); 840 if (!ctx) 841 return ERR_PTR(-ENOMEM); 842 843 for (i = 0; i < finfo->nr_irqs; i++) 844 ctx[i].irq = 845 binfo->irq_table[finfo->irq_base + i]; 846 847 feature->irq_ctx = ctx; 848 feature->nr_irqs = finfo->nr_irqs; 849 } 850 851 list_del(&finfo->node); 852 kfree(finfo); 853 } 854 855 fdata->resource_num = res_idx; 856 857 return fdata; 858 } 859 860 /* 861 * register current feature device, it is called when we need to switch to 862 * another feature parsing or we have parsed all features on given device 863 * feature list. 864 */ 865 static int feature_dev_register(struct dfl_feature_dev_data *fdata) 866 { 867 struct dfl_feature_platform_data pdata = {}; 868 struct platform_device *fdev; 869 struct dfl_feature *feature; 870 int ret; 871 872 fdev = platform_device_alloc(fdata->pdev_name, fdata->pdev_id); 873 if (!fdev) 874 return -ENOMEM; 875 876 fdata->dev = fdev; 877 878 fdev->dev.parent = &fdata->dfl_cdev->region->dev; 879 fdev->dev.devt = dfl_get_devt(dfl_devs[fdata->type].devt_type, fdev->id); 880 881 dfl_fpga_dev_for_each_feature(fdata, feature) 882 feature->dev = fdev; 883 884 ret = platform_device_add_resources(fdev, fdata->resources, 885 fdata->resource_num); 886 if (ret) 887 goto err_put_dev; 888 889 pdata.fdata = fdata; 890 ret = platform_device_add_data(fdev, &pdata, sizeof(pdata)); 891 if (ret) 892 goto err_put_dev; 893 894 ret = platform_device_add(fdev); 895 if (ret) 896 goto err_put_dev; 897 898 return 0; 899 900 err_put_dev: 901 platform_device_put(fdev); 902 903 fdata->dev = NULL; 904 905 dfl_fpga_dev_for_each_feature(fdata, feature) 906 feature->dev = NULL; 907 908 return ret; 909 } 910 911 static void feature_dev_unregister(struct dfl_feature_dev_data *fdata) 912 { 913 struct dfl_feature *feature; 914 915 platform_device_unregister(fdata->dev); 916 917 fdata->dev = NULL; 918 919 dfl_fpga_dev_for_each_feature(fdata, feature) 920 feature->dev = NULL; 921 } 922 923 static int build_info_commit_dev(struct build_feature_devs_info *binfo) 924 { 925 struct dfl_feature_dev_data *fdata; 926 int ret; 927 928 fdata = binfo_create_feature_dev_data(binfo); 929 if (IS_ERR(fdata)) 930 return PTR_ERR(fdata); 931 932 ret = feature_dev_register(fdata); 933 if (ret) 934 return ret; 935 936 if (binfo->type == PORT_ID) 937 dfl_fpga_cdev_add_port_data(binfo->cdev, fdata); 938 else 939 binfo->cdev->fme_dev = get_device(&fdata->dev->dev); 940 941 /* reset the binfo for next FIU */ 942 binfo->type = DFL_ID_MAX; 943 944 return 0; 945 } 946 947 static void build_info_free(struct build_feature_devs_info *binfo) 948 { 949 struct dfl_feature_info *finfo, *p; 950 951 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) { 952 list_del(&finfo->node); 953 kfree(finfo); 954 } 955 956 devm_kfree(binfo->dev, binfo); 957 } 958 959 static inline u32 feature_size(u64 value) 960 { 961 u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, value); 962 /* workaround for private features with invalid size, use 4K instead */ 963 return ofst ? ofst : 4096; 964 } 965 966 static u16 feature_id(u64 value) 967 { 968 u16 id = FIELD_GET(DFH_ID, value); 969 u8 type = FIELD_GET(DFH_TYPE, value); 970 971 if (type == DFH_TYPE_FIU) 972 return FEATURE_ID_FIU_HEADER; 973 else if (type == DFH_TYPE_PRIVATE) 974 return id; 975 else if (type == DFH_TYPE_AFU) 976 return FEATURE_ID_AFU; 977 978 WARN_ON(1); 979 return 0; 980 } 981 982 static u64 *find_param(u64 *params, resource_size_t max, int param_id) 983 { 984 u64 *end = params + max / sizeof(u64); 985 u64 v, next; 986 987 while (params < end) { 988 v = *params; 989 if (param_id == FIELD_GET(DFHv1_PARAM_HDR_ID, v)) 990 return params; 991 992 if (FIELD_GET(DFHv1_PARAM_HDR_NEXT_EOP, v)) 993 break; 994 995 next = FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, v); 996 params += next; 997 } 998 999 return NULL; 1000 } 1001 1002 /** 1003 * dfh_find_param() - find parameter block for the given parameter id 1004 * @dfl_dev: dfl device 1005 * @param_id: id of dfl parameter 1006 * @psize: destination to store size of parameter data in bytes 1007 * 1008 * Return: pointer to start of parameter data, PTR_ERR otherwise. 1009 */ 1010 void *dfh_find_param(struct dfl_device *dfl_dev, int param_id, size_t *psize) 1011 { 1012 u64 *phdr = find_param(dfl_dev->params, dfl_dev->param_size, param_id); 1013 1014 if (!phdr) 1015 return ERR_PTR(-ENOENT); 1016 1017 if (psize) 1018 *psize = (FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, *phdr) - 1) * sizeof(u64); 1019 1020 return phdr + 1; 1021 } 1022 EXPORT_SYMBOL_GPL(dfh_find_param); 1023 1024 static int parse_feature_irqs(struct build_feature_devs_info *binfo, 1025 resource_size_t ofst, struct dfl_feature_info *finfo) 1026 { 1027 void __iomem *base = binfo->ioaddr + ofst; 1028 unsigned int i, ibase, inr = 0; 1029 void *params = finfo->params; 1030 enum dfl_id_type type; 1031 u16 fid = finfo->fid; 1032 int virq; 1033 u64 *p; 1034 u64 v; 1035 1036 switch (finfo->dfh_version) { 1037 case 0: 1038 /* 1039 * DFHv0 only provides MMIO resource information for each feature 1040 * in the DFL header. There is no generic interrupt information. 1041 * Instead, features with interrupt functionality provide 1042 * the information in feature specific registers. 1043 */ 1044 type = binfo->type; 1045 if (type == PORT_ID) { 1046 switch (fid) { 1047 case PORT_FEATURE_ID_UINT: 1048 v = readq(base + PORT_UINT_CAP); 1049 ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v); 1050 inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v); 1051 break; 1052 case PORT_FEATURE_ID_ERROR: 1053 v = readq(base + PORT_ERROR_CAP); 1054 ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v); 1055 inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v); 1056 break; 1057 } 1058 } else if (type == FME_ID) { 1059 switch (fid) { 1060 case FME_FEATURE_ID_GLOBAL_ERR: 1061 v = readq(base + FME_ERROR_CAP); 1062 ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v); 1063 inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v); 1064 break; 1065 } 1066 } 1067 break; 1068 1069 case 1: 1070 /* 1071 * DFHv1 provides interrupt resource information in DFHv1 1072 * parameter blocks. 1073 */ 1074 p = find_param(params, finfo->param_size, DFHv1_PARAM_ID_MSI_X); 1075 if (!p) 1076 break; 1077 1078 p++; 1079 ibase = FIELD_GET(DFHv1_PARAM_MSI_X_STARTV, *p); 1080 inr = FIELD_GET(DFHv1_PARAM_MSI_X_NUMV, *p); 1081 break; 1082 1083 default: 1084 dev_warn(binfo->dev, "unexpected DFH version %d\n", finfo->dfh_version); 1085 break; 1086 } 1087 1088 if (!inr) { 1089 finfo->irq_base = 0; 1090 finfo->nr_irqs = 0; 1091 return 0; 1092 } 1093 1094 dev_dbg(binfo->dev, "feature: 0x%x, irq_base: %u, nr_irqs: %u\n", 1095 fid, ibase, inr); 1096 1097 if (ibase + inr > binfo->nr_irqs) { 1098 dev_err(binfo->dev, 1099 "Invalid interrupt number in feature 0x%x\n", fid); 1100 return -EINVAL; 1101 } 1102 1103 for (i = 0; i < inr; i++) { 1104 virq = binfo->irq_table[ibase + i]; 1105 if (virq < 0 || virq > NR_IRQS) { 1106 dev_err(binfo->dev, 1107 "Invalid irq table entry for feature 0x%x\n", 1108 fid); 1109 return -EINVAL; 1110 } 1111 } 1112 1113 finfo->irq_base = ibase; 1114 finfo->nr_irqs = inr; 1115 1116 return 0; 1117 } 1118 1119 static int dfh_get_param_size(void __iomem *dfh_base, resource_size_t max) 1120 { 1121 int size = 0; 1122 u64 v, next; 1123 1124 if (!FIELD_GET(DFHv1_CSR_SIZE_GRP_HAS_PARAMS, 1125 readq(dfh_base + DFHv1_CSR_SIZE_GRP))) 1126 return 0; 1127 1128 while (size + DFHv1_PARAM_HDR < max) { 1129 v = readq(dfh_base + DFHv1_PARAM_HDR + size); 1130 1131 next = FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, v); 1132 if (!next) 1133 return -EINVAL; 1134 1135 size += next * sizeof(u64); 1136 1137 if (FIELD_GET(DFHv1_PARAM_HDR_NEXT_EOP, v)) 1138 return size; 1139 } 1140 1141 return -ENOENT; 1142 } 1143 1144 /* 1145 * when create sub feature instances, for private features, it doesn't need 1146 * to provide resource size and feature id as they could be read from DFH 1147 * register. For afu sub feature, its register region only contains user 1148 * defined registers, so never trust any information from it, just use the 1149 * resource size information provided by its parent FIU. 1150 */ 1151 static int 1152 create_feature_instance(struct build_feature_devs_info *binfo, 1153 resource_size_t ofst, resource_size_t size, u16 fid) 1154 { 1155 struct dfl_feature_info *finfo; 1156 resource_size_t start, end; 1157 int dfh_psize = 0; 1158 u8 revision = 0; 1159 u64 v, addr_off; 1160 u8 dfh_ver = 0; 1161 int ret; 1162 1163 if (fid != FEATURE_ID_AFU) { 1164 v = readq(binfo->ioaddr + ofst); 1165 revision = FIELD_GET(DFH_REVISION, v); 1166 dfh_ver = FIELD_GET(DFH_VERSION, v); 1167 /* read feature size and id if inputs are invalid */ 1168 size = size ? size : feature_size(v); 1169 fid = fid ? fid : feature_id(v); 1170 if (dfh_ver == 1) { 1171 dfh_psize = dfh_get_param_size(binfo->ioaddr + ofst, size); 1172 if (dfh_psize < 0) { 1173 dev_err(binfo->dev, 1174 "failed to read size of DFHv1 parameters %d\n", 1175 dfh_psize); 1176 return dfh_psize; 1177 } 1178 dev_dbg(binfo->dev, "dfhv1_psize %d\n", dfh_psize); 1179 } 1180 } 1181 1182 if (binfo->len - ofst < size) 1183 return -EINVAL; 1184 1185 finfo = kzalloc_flex(*finfo, params, dfh_psize / sizeof(u64), 1186 GFP_KERNEL); 1187 if (!finfo) 1188 return -ENOMEM; 1189 1190 memcpy_fromio(finfo->params, binfo->ioaddr + ofst + DFHv1_PARAM_HDR, dfh_psize); 1191 finfo->param_size = dfh_psize; 1192 1193 finfo->fid = fid; 1194 finfo->revision = revision; 1195 finfo->dfh_version = dfh_ver; 1196 if (dfh_ver == 1) { 1197 v = readq(binfo->ioaddr + ofst + DFHv1_CSR_ADDR); 1198 addr_off = FIELD_GET(DFHv1_CSR_ADDR_MASK, v); 1199 if (FIELD_GET(DFHv1_CSR_ADDR_REL, v)) 1200 start = addr_off << 1; 1201 else 1202 start = binfo->start + ofst + addr_off; 1203 1204 v = readq(binfo->ioaddr + ofst + DFHv1_CSR_SIZE_GRP); 1205 end = start + FIELD_GET(DFHv1_CSR_SIZE_GRP_SIZE, v) - 1; 1206 } else { 1207 start = binfo->start + ofst; 1208 end = start + size - 1; 1209 } 1210 finfo->mmio_res.flags = IORESOURCE_MEM; 1211 finfo->mmio_res.start = start; 1212 finfo->mmio_res.end = end; 1213 1214 ret = parse_feature_irqs(binfo, ofst, finfo); 1215 if (ret) { 1216 kfree(finfo); 1217 return ret; 1218 } 1219 1220 list_add_tail(&finfo->node, &binfo->sub_features); 1221 binfo->feature_num++; 1222 1223 return 0; 1224 } 1225 1226 static int parse_feature_port_afu(struct build_feature_devs_info *binfo, 1227 resource_size_t ofst) 1228 { 1229 u64 v = readq(binfo->ioaddr + PORT_HDR_CAP); 1230 u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10; 1231 1232 WARN_ON(!size); 1233 1234 return create_feature_instance(binfo, ofst, size, FEATURE_ID_AFU); 1235 } 1236 1237 #define is_feature_dev_detected(binfo) ((binfo)->type != DFL_ID_MAX) 1238 1239 static int parse_feature_afu(struct build_feature_devs_info *binfo, 1240 resource_size_t ofst) 1241 { 1242 if (!is_feature_dev_detected(binfo)) { 1243 dev_err(binfo->dev, "this AFU does not belong to any FIU.\n"); 1244 return -EINVAL; 1245 } 1246 1247 switch (binfo->type) { 1248 case PORT_ID: 1249 return parse_feature_port_afu(binfo, ofst); 1250 default: 1251 dev_info(binfo->dev, "AFU belonging to FIU is not supported yet.\n"); 1252 } 1253 1254 return 0; 1255 } 1256 1257 static int build_info_prepare(struct build_feature_devs_info *binfo, 1258 resource_size_t start, resource_size_t len) 1259 { 1260 struct device *dev = binfo->dev; 1261 void __iomem *ioaddr; 1262 1263 if (!devm_request_mem_region(dev, start, len, dev_name(dev))) { 1264 dev_err(dev, "request region fail, start:%pa, len:%pa\n", 1265 &start, &len); 1266 return -EBUSY; 1267 } 1268 1269 ioaddr = devm_ioremap(dev, start, len); 1270 if (!ioaddr) { 1271 dev_err(dev, "ioremap region fail, start:%pa, len:%pa\n", 1272 &start, &len); 1273 return -ENOMEM; 1274 } 1275 1276 binfo->start = start; 1277 binfo->len = len; 1278 binfo->ioaddr = ioaddr; 1279 1280 return 0; 1281 } 1282 1283 static void build_info_complete(struct build_feature_devs_info *binfo) 1284 { 1285 devm_iounmap(binfo->dev, binfo->ioaddr); 1286 devm_release_mem_region(binfo->dev, binfo->start, binfo->len); 1287 } 1288 1289 static int parse_feature_fiu(struct build_feature_devs_info *binfo, 1290 resource_size_t ofst) 1291 { 1292 enum dfl_id_type type; 1293 int ret = 0; 1294 u32 offset; 1295 u16 id; 1296 u64 v; 1297 1298 if (is_feature_dev_detected(binfo)) { 1299 build_info_complete(binfo); 1300 1301 ret = build_info_commit_dev(binfo); 1302 if (ret) 1303 return ret; 1304 1305 ret = build_info_prepare(binfo, binfo->start + ofst, 1306 binfo->len - ofst); 1307 if (ret) 1308 return ret; 1309 } 1310 1311 v = readq(binfo->ioaddr + DFH); 1312 id = FIELD_GET(DFH_ID, v); 1313 1314 type = dfh_id_to_type(id); 1315 if (type >= DFL_ID_MAX) 1316 return -EINVAL; 1317 1318 binfo->type = type; 1319 binfo->feature_num = 0; 1320 INIT_LIST_HEAD(&binfo->sub_features); 1321 1322 ret = create_feature_instance(binfo, 0, 0, 0); 1323 if (ret) 1324 return ret; 1325 /* 1326 * find and parse FIU's child AFU via its NEXT_AFU register. 1327 * please note that only Port has valid NEXT_AFU pointer per spec. 1328 */ 1329 v = readq(binfo->ioaddr + NEXT_AFU); 1330 1331 offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v); 1332 if (offset) 1333 return parse_feature_afu(binfo, offset); 1334 1335 dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id); 1336 1337 return ret; 1338 } 1339 1340 static int parse_feature_private(struct build_feature_devs_info *binfo, 1341 resource_size_t ofst) 1342 { 1343 if (!is_feature_dev_detected(binfo)) { 1344 dev_err(binfo->dev, "the private feature 0x%x does not belong to any AFU.\n", 1345 feature_id(readq(binfo->ioaddr + ofst))); 1346 return -EINVAL; 1347 } 1348 1349 return create_feature_instance(binfo, ofst, 0, 0); 1350 } 1351 1352 /** 1353 * parse_feature - parse a feature on given device feature list 1354 * 1355 * @binfo: build feature devices information. 1356 * @ofst: offset to current FIU header 1357 */ 1358 static int parse_feature(struct build_feature_devs_info *binfo, 1359 resource_size_t ofst) 1360 { 1361 u64 v; 1362 u32 type; 1363 1364 v = readq(binfo->ioaddr + ofst + DFH); 1365 type = FIELD_GET(DFH_TYPE, v); 1366 1367 switch (type) { 1368 case DFH_TYPE_AFU: 1369 return parse_feature_afu(binfo, ofst); 1370 case DFH_TYPE_PRIVATE: 1371 return parse_feature_private(binfo, ofst); 1372 case DFH_TYPE_FIU: 1373 return parse_feature_fiu(binfo, ofst); 1374 default: 1375 dev_info(binfo->dev, 1376 "Feature Type %x is not supported.\n", type); 1377 } 1378 1379 return 0; 1380 } 1381 1382 static int parse_feature_list(struct build_feature_devs_info *binfo, 1383 resource_size_t start, resource_size_t len) 1384 { 1385 resource_size_t end = start + len; 1386 int ret = 0; 1387 u32 ofst = 0; 1388 u64 v; 1389 1390 ret = build_info_prepare(binfo, start, len); 1391 if (ret) 1392 return ret; 1393 1394 /* walk through the device feature list via DFH's next DFH pointer. */ 1395 for (; start < end; start += ofst) { 1396 if (end - start < DFH_SIZE) { 1397 dev_err(binfo->dev, "The region is too small to contain a feature.\n"); 1398 return -EINVAL; 1399 } 1400 1401 ret = parse_feature(binfo, start - binfo->start); 1402 if (ret) 1403 return ret; 1404 1405 v = readq(binfo->ioaddr + start - binfo->start + DFH); 1406 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v); 1407 1408 /* stop parsing if EOL(End of List) is set or offset is 0 */ 1409 if ((v & DFH_EOL) || !ofst) 1410 break; 1411 } 1412 1413 /* commit current feature device when reach the end of list */ 1414 build_info_complete(binfo); 1415 1416 if (is_feature_dev_detected(binfo)) 1417 ret = build_info_commit_dev(binfo); 1418 1419 return ret; 1420 } 1421 1422 struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev) 1423 { 1424 struct dfl_fpga_enum_info *info; 1425 1426 get_device(dev); 1427 1428 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); 1429 if (!info) { 1430 put_device(dev); 1431 return NULL; 1432 } 1433 1434 info->dev = dev; 1435 INIT_LIST_HEAD(&info->dfls); 1436 1437 return info; 1438 } 1439 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc); 1440 1441 void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info) 1442 { 1443 struct dfl_fpga_enum_dfl *tmp, *dfl; 1444 struct device *dev; 1445 1446 if (!info) 1447 return; 1448 1449 dev = info->dev; 1450 1451 /* remove all device feature lists in the list. */ 1452 list_for_each_entry_safe(dfl, tmp, &info->dfls, node) { 1453 list_del(&dfl->node); 1454 devm_kfree(dev, dfl); 1455 } 1456 1457 /* remove irq table */ 1458 if (info->irq_table) 1459 devm_kfree(dev, info->irq_table); 1460 1461 devm_kfree(dev, info); 1462 put_device(dev); 1463 } 1464 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free); 1465 1466 /** 1467 * dfl_fpga_enum_info_add_dfl - add info of a device feature list to enum info 1468 * 1469 * @info: ptr to dfl_fpga_enum_info 1470 * @start: mmio resource address of the device feature list. 1471 * @len: mmio resource length of the device feature list. 1472 * 1473 * One FPGA device may have one or more Device Feature Lists (DFLs), use this 1474 * function to add information of each DFL to common data structure for next 1475 * step enumeration. 1476 * 1477 * Return: 0 on success, negative error code otherwise. 1478 */ 1479 int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info, 1480 resource_size_t start, resource_size_t len) 1481 { 1482 struct dfl_fpga_enum_dfl *dfl; 1483 1484 dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL); 1485 if (!dfl) 1486 return -ENOMEM; 1487 1488 dfl->start = start; 1489 dfl->len = len; 1490 1491 list_add_tail(&dfl->node, &info->dfls); 1492 1493 return 0; 1494 } 1495 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl); 1496 1497 /** 1498 * dfl_fpga_enum_info_add_irq - add irq table to enum info 1499 * 1500 * @info: ptr to dfl_fpga_enum_info 1501 * @nr_irqs: number of irqs of the DFL fpga device to be enumerated. 1502 * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of 1503 * this device. 1504 * 1505 * One FPGA device may have several interrupts. This function adds irq 1506 * information of the DFL fpga device to enum info for next step enumeration. 1507 * This function should be called before dfl_fpga_feature_devs_enumerate(). 1508 * As we only support one irq domain for all DFLs in the same enum info, adding 1509 * irq table a second time for the same enum info will return error. 1510 * 1511 * If we need to enumerate DFLs which belong to different irq domains, we 1512 * should fill more enum info and enumerate them one by one. 1513 * 1514 * Return: 0 on success, negative error code otherwise. 1515 */ 1516 int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info, 1517 unsigned int nr_irqs, int *irq_table) 1518 { 1519 if (!nr_irqs || !irq_table) 1520 return -EINVAL; 1521 1522 if (info->irq_table) 1523 return -EEXIST; 1524 1525 info->irq_table = devm_kmemdup(info->dev, irq_table, 1526 sizeof(int) * nr_irqs, GFP_KERNEL); 1527 if (!info->irq_table) 1528 return -ENOMEM; 1529 1530 info->nr_irqs = nr_irqs; 1531 1532 return 0; 1533 } 1534 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq); 1535 1536 static int remove_feature_dev(struct device *dev, void *data) 1537 { 1538 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev); 1539 1540 feature_dev_unregister(fdata); 1541 1542 return 0; 1543 } 1544 1545 static void remove_feature_devs(struct dfl_fpga_cdev *cdev) 1546 { 1547 device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev); 1548 } 1549 1550 /** 1551 * dfl_fpga_feature_devs_enumerate - enumerate feature devices 1552 * @info: information for enumeration. 1553 * 1554 * This function creates a container device (base FPGA region), enumerates 1555 * feature devices based on the enumeration info and creates platform devices 1556 * under the container device. 1557 * 1558 * Return: dfl_fpga_cdev struct on success, -errno on failure 1559 */ 1560 struct dfl_fpga_cdev * 1561 dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info) 1562 { 1563 struct build_feature_devs_info *binfo; 1564 struct dfl_fpga_enum_dfl *dfl; 1565 struct dfl_fpga_cdev *cdev; 1566 int ret = 0; 1567 1568 if (!info->dev) 1569 return ERR_PTR(-ENODEV); 1570 1571 cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL); 1572 if (!cdev) 1573 return ERR_PTR(-ENOMEM); 1574 1575 cdev->parent = info->dev; 1576 mutex_init(&cdev->lock); 1577 INIT_LIST_HEAD(&cdev->port_dev_list); 1578 1579 cdev->region = fpga_region_register(info->dev, NULL, NULL); 1580 if (IS_ERR(cdev->region)) { 1581 ret = PTR_ERR(cdev->region); 1582 goto free_cdev_exit; 1583 } 1584 1585 /* create and init build info for enumeration */ 1586 binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL); 1587 if (!binfo) { 1588 ret = -ENOMEM; 1589 goto unregister_region_exit; 1590 } 1591 1592 binfo->type = DFL_ID_MAX; 1593 binfo->dev = info->dev; 1594 binfo->cdev = cdev; 1595 1596 binfo->nr_irqs = info->nr_irqs; 1597 if (info->nr_irqs) 1598 binfo->irq_table = info->irq_table; 1599 1600 /* 1601 * start enumeration for all feature devices based on Device Feature 1602 * Lists. 1603 */ 1604 list_for_each_entry(dfl, &info->dfls, node) { 1605 ret = parse_feature_list(binfo, dfl->start, dfl->len); 1606 if (ret) { 1607 remove_feature_devs(cdev); 1608 build_info_free(binfo); 1609 goto unregister_region_exit; 1610 } 1611 } 1612 1613 build_info_free(binfo); 1614 1615 return cdev; 1616 1617 unregister_region_exit: 1618 fpga_region_unregister(cdev->region); 1619 free_cdev_exit: 1620 devm_kfree(info->dev, cdev); 1621 return ERR_PTR(ret); 1622 } 1623 EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate); 1624 1625 /** 1626 * dfl_fpga_feature_devs_remove - remove all feature devices 1627 * @cdev: fpga container device. 1628 * 1629 * Remove the container device and all feature devices under given container 1630 * devices. 1631 */ 1632 void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev) 1633 { 1634 mutex_lock(&cdev->lock); 1635 if (cdev->fme_dev) 1636 put_device(cdev->fme_dev); 1637 1638 mutex_unlock(&cdev->lock); 1639 1640 remove_feature_devs(cdev); 1641 1642 fpga_region_unregister(cdev->region); 1643 devm_kfree(cdev->parent, cdev); 1644 } 1645 EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove); 1646 1647 /** 1648 * __dfl_fpga_cdev_find_port_data - find a port under given container device 1649 * 1650 * @cdev: container device 1651 * @data: data passed to match function 1652 * @match: match function used to find specific port from the port device list 1653 * 1654 * Find a port device under container device. This function needs to be 1655 * invoked with lock held. 1656 * 1657 * Return: pointer to port's platform device if successful, NULL otherwise. 1658 * 1659 * NOTE: you will need to drop the device reference with put_device() after use. 1660 */ 1661 struct dfl_feature_dev_data * 1662 __dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data, 1663 int (*match)(struct dfl_feature_dev_data *, void *)) 1664 { 1665 struct dfl_feature_dev_data *fdata; 1666 1667 list_for_each_entry(fdata, &cdev->port_dev_list, node) { 1668 if (match(fdata, data)) 1669 return fdata; 1670 } 1671 1672 return NULL; 1673 } 1674 EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port_data); 1675 1676 static int __init dfl_fpga_init(void) 1677 { 1678 int ret; 1679 1680 ret = bus_register(&dfl_bus_type); 1681 if (ret) 1682 return ret; 1683 1684 dfl_ids_init(); 1685 1686 ret = dfl_chardev_init(); 1687 if (ret) { 1688 dfl_ids_destroy(); 1689 bus_unregister(&dfl_bus_type); 1690 } 1691 1692 return ret; 1693 } 1694 1695 /** 1696 * dfl_fpga_cdev_release_port - release a port platform device 1697 * 1698 * @cdev: parent container device. 1699 * @port_id: id of the port platform device. 1700 * 1701 * This function allows user to release a port platform device. This is a 1702 * mandatory step before turn a port from PF into VF for SRIOV support. 1703 * 1704 * Return: 0 on success, negative error code otherwise. 1705 */ 1706 int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id) 1707 { 1708 struct dfl_feature_dev_data *fdata; 1709 int ret = -ENODEV; 1710 1711 mutex_lock(&cdev->lock); 1712 fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id, 1713 dfl_fpga_check_port_id); 1714 if (!fdata) 1715 goto unlock_exit; 1716 1717 if (!fdata->dev) { 1718 ret = -EBUSY; 1719 goto unlock_exit; 1720 } 1721 1722 mutex_lock(&fdata->lock); 1723 ret = dfl_feature_dev_use_begin(fdata, true); 1724 mutex_unlock(&fdata->lock); 1725 if (ret) 1726 goto unlock_exit; 1727 1728 feature_dev_unregister(fdata); 1729 cdev->released_port_num++; 1730 unlock_exit: 1731 mutex_unlock(&cdev->lock); 1732 return ret; 1733 } 1734 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port); 1735 1736 /** 1737 * dfl_fpga_cdev_assign_port - assign a port platform device back 1738 * 1739 * @cdev: parent container device. 1740 * @port_id: id of the port platform device. 1741 * 1742 * This function allows user to assign a port platform device back. This is 1743 * a mandatory step after disable SRIOV support. 1744 * 1745 * Return: 0 on success, negative error code otherwise. 1746 */ 1747 int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id) 1748 { 1749 struct dfl_feature_dev_data *fdata; 1750 int ret = -ENODEV; 1751 1752 mutex_lock(&cdev->lock); 1753 fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id, 1754 dfl_fpga_check_port_id); 1755 if (!fdata) 1756 goto unlock_exit; 1757 1758 if (fdata->dev) { 1759 ret = -EBUSY; 1760 goto unlock_exit; 1761 } 1762 1763 ret = feature_dev_register(fdata); 1764 if (ret) 1765 goto unlock_exit; 1766 1767 mutex_lock(&fdata->lock); 1768 dfl_feature_dev_use_end(fdata); 1769 mutex_unlock(&fdata->lock); 1770 1771 cdev->released_port_num--; 1772 unlock_exit: 1773 mutex_unlock(&cdev->lock); 1774 return ret; 1775 } 1776 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port); 1777 1778 static void config_port_access_mode(struct device *fme_dev, int port_id, 1779 bool is_vf) 1780 { 1781 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(fme_dev); 1782 void __iomem *base; 1783 u64 v; 1784 1785 base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER); 1786 1787 v = readq(base + FME_HDR_PORT_OFST(port_id)); 1788 1789 v &= ~FME_PORT_OFST_ACC_CTRL; 1790 v |= FIELD_PREP(FME_PORT_OFST_ACC_CTRL, 1791 is_vf ? FME_PORT_OFST_ACC_VF : FME_PORT_OFST_ACC_PF); 1792 1793 writeq(v, base + FME_HDR_PORT_OFST(port_id)); 1794 } 1795 1796 #define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true) 1797 #define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false) 1798 1799 /** 1800 * dfl_fpga_cdev_config_ports_pf - configure ports to PF access mode 1801 * 1802 * @cdev: parent container device. 1803 * 1804 * This function is needed in sriov configuration routine. It could be used to 1805 * configure the all released ports from VF access mode to PF. 1806 */ 1807 void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev) 1808 { 1809 struct dfl_feature_dev_data *fdata; 1810 1811 mutex_lock(&cdev->lock); 1812 list_for_each_entry(fdata, &cdev->port_dev_list, node) { 1813 if (fdata->dev) 1814 continue; 1815 1816 config_port_pf_mode(cdev->fme_dev, fdata->id); 1817 } 1818 mutex_unlock(&cdev->lock); 1819 } 1820 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf); 1821 1822 /** 1823 * dfl_fpga_cdev_config_ports_vf - configure ports to VF access mode 1824 * 1825 * @cdev: parent container device. 1826 * @num_vfs: VF device number. 1827 * 1828 * This function is needed in sriov configuration routine. It could be used to 1829 * configure the released ports from PF access mode to VF. 1830 * 1831 * Return: 0 on success, negative error code otherwise. 1832 */ 1833 int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs) 1834 { 1835 struct dfl_feature_dev_data *fdata; 1836 int ret = 0; 1837 1838 mutex_lock(&cdev->lock); 1839 /* 1840 * can't turn multiple ports into 1 VF device, only 1 port for 1 VF 1841 * device, so if released port number doesn't match VF device number, 1842 * then reject the request with -EINVAL error code. 1843 */ 1844 if (cdev->released_port_num != num_vfs) { 1845 ret = -EINVAL; 1846 goto done; 1847 } 1848 1849 list_for_each_entry(fdata, &cdev->port_dev_list, node) { 1850 if (fdata->dev) 1851 continue; 1852 1853 config_port_vf_mode(cdev->fme_dev, fdata->id); 1854 } 1855 done: 1856 mutex_unlock(&cdev->lock); 1857 return ret; 1858 } 1859 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf); 1860 1861 static irqreturn_t dfl_irq_handler(int irq, void *arg) 1862 { 1863 struct eventfd_ctx *trigger = arg; 1864 1865 eventfd_signal(trigger); 1866 return IRQ_HANDLED; 1867 } 1868 1869 static int do_set_irq_trigger(struct dfl_feature *feature, unsigned int idx, 1870 int fd) 1871 { 1872 struct platform_device *pdev = feature->dev; 1873 struct eventfd_ctx *trigger; 1874 int irq, ret; 1875 1876 irq = feature->irq_ctx[idx].irq; 1877 1878 if (feature->irq_ctx[idx].trigger) { 1879 free_irq(irq, feature->irq_ctx[idx].trigger); 1880 kfree(feature->irq_ctx[idx].name); 1881 eventfd_ctx_put(feature->irq_ctx[idx].trigger); 1882 feature->irq_ctx[idx].trigger = NULL; 1883 } 1884 1885 if (fd < 0) 1886 return 0; 1887 1888 feature->irq_ctx[idx].name = 1889 kasprintf(GFP_KERNEL, "fpga-irq[%u](%s-%x)", idx, 1890 dev_name(&pdev->dev), feature->id); 1891 if (!feature->irq_ctx[idx].name) 1892 return -ENOMEM; 1893 1894 trigger = eventfd_ctx_fdget(fd); 1895 if (IS_ERR(trigger)) { 1896 ret = PTR_ERR(trigger); 1897 goto free_name; 1898 } 1899 1900 ret = request_irq(irq, dfl_irq_handler, 0, 1901 feature->irq_ctx[idx].name, trigger); 1902 if (!ret) { 1903 feature->irq_ctx[idx].trigger = trigger; 1904 return ret; 1905 } 1906 1907 eventfd_ctx_put(trigger); 1908 free_name: 1909 kfree(feature->irq_ctx[idx].name); 1910 1911 return ret; 1912 } 1913 1914 /** 1915 * dfl_fpga_set_irq_triggers - set eventfd triggers for dfl feature interrupts 1916 * 1917 * @feature: dfl sub feature. 1918 * @start: start of irq index in this dfl sub feature. 1919 * @count: number of irqs. 1920 * @fds: eventfds to bind with irqs. unbind related irq if fds[n] is negative. 1921 * unbind "count" specified number of irqs if fds ptr is NULL. 1922 * 1923 * Bind given eventfds with irqs in this dfl sub feature. Unbind related irq if 1924 * fds[n] is negative. Unbind "count" specified number of irqs if fds ptr is 1925 * NULL. 1926 * 1927 * Return: 0 on success, negative error code otherwise. 1928 */ 1929 int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start, 1930 unsigned int count, int32_t *fds) 1931 { 1932 unsigned int i; 1933 int ret = 0; 1934 1935 /* overflow */ 1936 if (unlikely(start + count < start)) 1937 return -EINVAL; 1938 1939 /* exceeds nr_irqs */ 1940 if (start + count > feature->nr_irqs) 1941 return -EINVAL; 1942 1943 for (i = 0; i < count; i++) { 1944 int fd = fds ? fds[i] : -1; 1945 1946 ret = do_set_irq_trigger(feature, start + i, fd); 1947 if (ret) { 1948 while (i--) 1949 do_set_irq_trigger(feature, start + i, -1); 1950 break; 1951 } 1952 } 1953 1954 return ret; 1955 } 1956 EXPORT_SYMBOL_GPL(dfl_fpga_set_irq_triggers); 1957 1958 /** 1959 * dfl_feature_ioctl_get_num_irqs - dfl feature _GET_IRQ_NUM ioctl interface. 1960 * @pdev: the feature device which has the sub feature 1961 * @feature: the dfl sub feature 1962 * @arg: ioctl argument 1963 * 1964 * Return: 0 on success, negative error code otherwise. 1965 */ 1966 long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev, 1967 struct dfl_feature *feature, 1968 unsigned long arg) 1969 { 1970 return put_user(feature->nr_irqs, (__u32 __user *)arg); 1971 } 1972 EXPORT_SYMBOL_GPL(dfl_feature_ioctl_get_num_irqs); 1973 1974 /** 1975 * dfl_feature_ioctl_set_irq - dfl feature _SET_IRQ ioctl interface. 1976 * @pdev: the feature device which has the sub feature 1977 * @feature: the dfl sub feature 1978 * @arg: ioctl argument 1979 * 1980 * Return: 0 on success, negative error code otherwise. 1981 */ 1982 long dfl_feature_ioctl_set_irq(struct platform_device *pdev, 1983 struct dfl_feature *feature, 1984 unsigned long arg) 1985 { 1986 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev); 1987 struct dfl_fpga_irq_set hdr; 1988 s32 *fds; 1989 long ret; 1990 1991 if (!feature->nr_irqs) 1992 return -ENOENT; 1993 1994 if (copy_from_user(&hdr, (void __user *)arg, sizeof(hdr))) 1995 return -EFAULT; 1996 1997 if (!hdr.count || (hdr.start + hdr.count > feature->nr_irqs) || 1998 (hdr.start + hdr.count < hdr.start)) 1999 return -EINVAL; 2000 2001 fds = memdup_array_user((void __user *)(arg + sizeof(hdr)), 2002 hdr.count, sizeof(s32)); 2003 if (IS_ERR(fds)) 2004 return PTR_ERR(fds); 2005 2006 mutex_lock(&fdata->lock); 2007 ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds); 2008 mutex_unlock(&fdata->lock); 2009 2010 kfree(fds); 2011 return ret; 2012 } 2013 EXPORT_SYMBOL_GPL(dfl_feature_ioctl_set_irq); 2014 2015 static void __exit dfl_fpga_exit(void) 2016 { 2017 dfl_chardev_uinit(); 2018 dfl_ids_destroy(); 2019 bus_unregister(&dfl_bus_type); 2020 } 2021 2022 subsys_initcall(dfl_fpga_init); 2023 module_exit(dfl_fpga_exit); 2024 2025 MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support"); 2026 MODULE_AUTHOR("Intel Corporation"); 2027 MODULE_LICENSE("GPL v2"); 2028