xref: /linux/drivers/fpga/dfl.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for FPGA Device Feature List (DFL) Support
4  *
5  * Copyright (C) 2017-2018 Intel Corporation, Inc.
6  *
7  * Authors:
8  *   Kang Luwei <luwei.kang@intel.com>
9  *   Zhang Yi <yi.z.zhang@intel.com>
10  *   Wu Hao <hao.wu@intel.com>
11  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
12  */
13 #include <linux/dfl.h>
14 #include <linux/fpga-dfl.h>
15 #include <linux/module.h>
16 #include <linux/overflow.h>
17 #include <linux/uaccess.h>
18 
19 #include "dfl.h"
20 
21 static DEFINE_MUTEX(dfl_id_mutex);
22 
23 /*
24  * when adding a new feature dev support in DFL framework, it's required to
25  * add a new item in enum dfl_id_type and provide related information in below
26  * dfl_devs table which is indexed by dfl_id_type, e.g. name string used for
27  * platform device creation (define name strings in dfl.h, as they could be
28  * reused by platform device drivers).
29  *
30  * if the new feature dev needs chardev support, then it's required to add
31  * a new item in dfl_chardevs table and configure dfl_devs[i].devt_type as
32  * index to dfl_chardevs table. If no chardev support just set devt_type
33  * as one invalid index (DFL_FPGA_DEVT_MAX).
34  */
35 enum dfl_fpga_devt_type {
36 	DFL_FPGA_DEVT_FME,
37 	DFL_FPGA_DEVT_PORT,
38 	DFL_FPGA_DEVT_MAX,
39 };
40 
41 static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX];
42 
43 static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
44 	"dfl-fme-pdata",
45 	"dfl-port-pdata",
46 };
47 
48 /**
49  * struct dfl_dev_info - dfl feature device information.
50  * @name: name string of the feature platform device.
51  * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
52  * @id: idr id of the feature dev.
53  * @devt_type: index to dfl_chrdevs[].
54  */
55 struct dfl_dev_info {
56 	const char *name;
57 	u16 dfh_id;
58 	struct idr id;
59 	enum dfl_fpga_devt_type devt_type;
60 };
61 
62 /* it is indexed by dfl_id_type */
63 static struct dfl_dev_info dfl_devs[] = {
64 	{.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME,
65 	 .devt_type = DFL_FPGA_DEVT_FME},
66 	{.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT,
67 	 .devt_type = DFL_FPGA_DEVT_PORT},
68 };
69 
70 /**
71  * struct dfl_chardev_info - chardev information of dfl feature device
72  * @name: nmae string of the char device.
73  * @devt: devt of the char device.
74  */
75 struct dfl_chardev_info {
76 	const char *name;
77 	dev_t devt;
78 };
79 
80 /* indexed by enum dfl_fpga_devt_type */
81 static struct dfl_chardev_info dfl_chrdevs[] = {
82 	{.name = DFL_FPGA_FEATURE_DEV_FME},
83 	{.name = DFL_FPGA_FEATURE_DEV_PORT},
84 };
85 
dfl_ids_init(void)86 static void dfl_ids_init(void)
87 {
88 	int i;
89 
90 	for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
91 		idr_init(&dfl_devs[i].id);
92 }
93 
dfl_ids_destroy(void)94 static void dfl_ids_destroy(void)
95 {
96 	int i;
97 
98 	for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
99 		idr_destroy(&dfl_devs[i].id);
100 }
101 
dfl_id_alloc(enum dfl_id_type type,struct device * dev)102 static int dfl_id_alloc(enum dfl_id_type type, struct device *dev)
103 {
104 	int id;
105 
106 	WARN_ON(type >= DFL_ID_MAX);
107 	mutex_lock(&dfl_id_mutex);
108 	id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL);
109 	mutex_unlock(&dfl_id_mutex);
110 
111 	return id;
112 }
113 
dfl_id_free(enum dfl_id_type type,int id)114 static void dfl_id_free(enum dfl_id_type type, int id)
115 {
116 	WARN_ON(type >= DFL_ID_MAX);
117 	mutex_lock(&dfl_id_mutex);
118 	idr_remove(&dfl_devs[type].id, id);
119 	mutex_unlock(&dfl_id_mutex);
120 }
121 
dfh_id_to_type(u16 id)122 static enum dfl_id_type dfh_id_to_type(u16 id)
123 {
124 	int i;
125 
126 	for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
127 		if (dfl_devs[i].dfh_id == id)
128 			return i;
129 
130 	return DFL_ID_MAX;
131 }
132 
133 /*
134  * introduce a global port_ops list, it allows port drivers to register ops
135  * in such list, then other feature devices (e.g. FME), could use the port
136  * functions even related port platform device is hidden. Below is one example,
137  * in virtualization case of PCIe-based FPGA DFL device, when SRIOV is
138  * enabled, port (and it's AFU) is turned into VF and port platform device
139  * is hidden from system but it's still required to access port to finish FPGA
140  * reconfiguration function in FME.
141  */
142 
143 static DEFINE_MUTEX(dfl_port_ops_mutex);
144 static LIST_HEAD(dfl_port_ops_list);
145 
146 /**
147  * dfl_fpga_port_ops_get - get matched port ops from the global list
148  * @fdata: feature dev data to match with associated port ops.
149  * Return: matched port ops on success, NULL otherwise.
150  *
151  * Please note that must dfl_fpga_port_ops_put after use the port_ops.
152  */
dfl_fpga_port_ops_get(struct dfl_feature_dev_data * fdata)153 struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct dfl_feature_dev_data *fdata)
154 {
155 	struct dfl_fpga_port_ops *ops = NULL;
156 
157 	mutex_lock(&dfl_port_ops_mutex);
158 	if (list_empty(&dfl_port_ops_list))
159 		goto done;
160 
161 	list_for_each_entry(ops, &dfl_port_ops_list, node) {
162 		/* match port_ops using the name of platform device */
163 		if (!strcmp(fdata->pdev_name, ops->name)) {
164 			if (!try_module_get(ops->owner))
165 				ops = NULL;
166 			goto done;
167 		}
168 	}
169 
170 	ops = NULL;
171 done:
172 	mutex_unlock(&dfl_port_ops_mutex);
173 	return ops;
174 }
175 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get);
176 
177 /**
178  * dfl_fpga_port_ops_put - put port ops
179  * @ops: port ops.
180  */
dfl_fpga_port_ops_put(struct dfl_fpga_port_ops * ops)181 void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops)
182 {
183 	if (ops && ops->owner)
184 		module_put(ops->owner);
185 }
186 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put);
187 
188 /**
189  * dfl_fpga_port_ops_add - add port_ops to global list
190  * @ops: port ops to add.
191  */
dfl_fpga_port_ops_add(struct dfl_fpga_port_ops * ops)192 void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops)
193 {
194 	mutex_lock(&dfl_port_ops_mutex);
195 	list_add_tail(&ops->node, &dfl_port_ops_list);
196 	mutex_unlock(&dfl_port_ops_mutex);
197 }
198 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add);
199 
200 /**
201  * dfl_fpga_port_ops_del - remove port_ops from global list
202  * @ops: port ops to del.
203  */
dfl_fpga_port_ops_del(struct dfl_fpga_port_ops * ops)204 void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops)
205 {
206 	mutex_lock(&dfl_port_ops_mutex);
207 	list_del(&ops->node);
208 	mutex_unlock(&dfl_port_ops_mutex);
209 }
210 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
211 
212 /**
213  * dfl_fpga_check_port_id - check the port id
214  * @fdata: port feature dev data.
215  * @pport_id: port id to compare.
216  *
217  * Return: 1 if port device matches with given port id, otherwise 0.
218  */
dfl_fpga_check_port_id(struct dfl_feature_dev_data * fdata,void * pport_id)219 int dfl_fpga_check_port_id(struct dfl_feature_dev_data *fdata, void *pport_id)
220 {
221 	struct dfl_fpga_port_ops *port_ops;
222 
223 	if (fdata->id != FEATURE_DEV_ID_UNUSED)
224 		return fdata->id == *(int *)pport_id;
225 
226 	port_ops = dfl_fpga_port_ops_get(fdata);
227 	if (!port_ops || !port_ops->get_id)
228 		return 0;
229 
230 	fdata->id = port_ops->get_id(fdata);
231 	dfl_fpga_port_ops_put(port_ops);
232 
233 	return fdata->id == *(int *)pport_id;
234 }
235 EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
236 
237 static DEFINE_IDA(dfl_device_ida);
238 
239 static const struct dfl_device_id *
dfl_match_one_device(const struct dfl_device_id * id,struct dfl_device * ddev)240 dfl_match_one_device(const struct dfl_device_id *id, struct dfl_device *ddev)
241 {
242 	if (id->type == ddev->type && id->feature_id == ddev->feature_id)
243 		return id;
244 
245 	return NULL;
246 }
247 
dfl_bus_match(struct device * dev,const struct device_driver * drv)248 static int dfl_bus_match(struct device *dev, const struct device_driver *drv)
249 {
250 	struct dfl_device *ddev = to_dfl_dev(dev);
251 	const struct dfl_driver *ddrv = to_dfl_drv(drv);
252 	const struct dfl_device_id *id_entry;
253 
254 	id_entry = ddrv->id_table;
255 	if (id_entry) {
256 		while (id_entry->feature_id) {
257 			if (dfl_match_one_device(id_entry, ddev)) {
258 				ddev->id_entry = id_entry;
259 				return 1;
260 			}
261 			id_entry++;
262 		}
263 	}
264 
265 	return 0;
266 }
267 
dfl_bus_probe(struct device * dev)268 static int dfl_bus_probe(struct device *dev)
269 {
270 	struct dfl_driver *ddrv = to_dfl_drv(dev->driver);
271 	struct dfl_device *ddev = to_dfl_dev(dev);
272 
273 	return ddrv->probe(ddev);
274 }
275 
dfl_bus_remove(struct device * dev)276 static void dfl_bus_remove(struct device *dev)
277 {
278 	struct dfl_driver *ddrv = to_dfl_drv(dev->driver);
279 	struct dfl_device *ddev = to_dfl_dev(dev);
280 
281 	if (ddrv->remove)
282 		ddrv->remove(ddev);
283 }
284 
dfl_bus_uevent(const struct device * dev,struct kobj_uevent_env * env)285 static int dfl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
286 {
287 	const struct dfl_device *ddev = to_dfl_dev(dev);
288 
289 	return add_uevent_var(env, "MODALIAS=dfl:t%04Xf%04X",
290 			      ddev->type, ddev->feature_id);
291 }
292 
293 static ssize_t
type_show(struct device * dev,struct device_attribute * attr,char * buf)294 type_show(struct device *dev, struct device_attribute *attr, char *buf)
295 {
296 	struct dfl_device *ddev = to_dfl_dev(dev);
297 
298 	return sprintf(buf, "0x%x\n", ddev->type);
299 }
300 static DEVICE_ATTR_RO(type);
301 
302 static ssize_t
feature_id_show(struct device * dev,struct device_attribute * attr,char * buf)303 feature_id_show(struct device *dev, struct device_attribute *attr, char *buf)
304 {
305 	struct dfl_device *ddev = to_dfl_dev(dev);
306 
307 	return sprintf(buf, "0x%x\n", ddev->feature_id);
308 }
309 static DEVICE_ATTR_RO(feature_id);
310 
311 static struct attribute *dfl_dev_attrs[] = {
312 	&dev_attr_type.attr,
313 	&dev_attr_feature_id.attr,
314 	NULL,
315 };
316 ATTRIBUTE_GROUPS(dfl_dev);
317 
318 static const struct bus_type dfl_bus_type = {
319 	.name		= "dfl",
320 	.match		= dfl_bus_match,
321 	.probe		= dfl_bus_probe,
322 	.remove		= dfl_bus_remove,
323 	.uevent		= dfl_bus_uevent,
324 	.dev_groups	= dfl_dev_groups,
325 };
326 
release_dfl_dev(struct device * dev)327 static void release_dfl_dev(struct device *dev)
328 {
329 	struct dfl_device *ddev = to_dfl_dev(dev);
330 
331 	if (ddev->mmio_res.parent)
332 		release_resource(&ddev->mmio_res);
333 
334 	kfree(ddev->params);
335 
336 	ida_free(&dfl_device_ida, ddev->id);
337 	kfree(ddev->irqs);
338 	kfree(ddev);
339 }
340 
341 static struct dfl_device *
dfl_dev_add(struct dfl_feature_dev_data * fdata,struct dfl_feature * feature)342 dfl_dev_add(struct dfl_feature_dev_data *fdata,
343 	    struct dfl_feature *feature)
344 {
345 	struct platform_device *pdev = fdata->dev;
346 	struct resource *parent_res;
347 	struct dfl_device *ddev;
348 	int id, i, ret;
349 
350 	ddev = kzalloc_obj(*ddev);
351 	if (!ddev)
352 		return ERR_PTR(-ENOMEM);
353 
354 	id = ida_alloc(&dfl_device_ida, GFP_KERNEL);
355 	if (id < 0) {
356 		dev_err(&pdev->dev, "unable to get id\n");
357 		kfree(ddev);
358 		return ERR_PTR(id);
359 	}
360 
361 	/* freeing resources by put_device() after device_initialize() */
362 	device_initialize(&ddev->dev);
363 	ddev->dev.parent = &pdev->dev;
364 	ddev->dev.bus = &dfl_bus_type;
365 	ddev->dev.release = release_dfl_dev;
366 	ddev->id = id;
367 	ret = dev_set_name(&ddev->dev, "dfl_dev.%d", id);
368 	if (ret)
369 		goto put_dev;
370 
371 	ddev->type = fdata->type;
372 	ddev->feature_id = feature->id;
373 	ddev->revision = feature->revision;
374 	ddev->dfh_version = feature->dfh_version;
375 	ddev->cdev = fdata->dfl_cdev;
376 	if (feature->param_size) {
377 		ddev->params = kmemdup(feature->params, feature->param_size, GFP_KERNEL);
378 		if (!ddev->params) {
379 			ret = -ENOMEM;
380 			goto put_dev;
381 		}
382 		ddev->param_size = feature->param_size;
383 	}
384 
385 	/* add mmio resource */
386 	parent_res = &pdev->resource[feature->resource_index];
387 	ddev->mmio_res.flags = IORESOURCE_MEM;
388 	ddev->mmio_res.start = parent_res->start;
389 	ddev->mmio_res.end = parent_res->end;
390 	ddev->mmio_res.name = dev_name(&ddev->dev);
391 	ret = insert_resource(parent_res, &ddev->mmio_res);
392 	if (ret) {
393 		dev_err(&pdev->dev, "%s failed to claim resource: %pR\n",
394 			dev_name(&ddev->dev), &ddev->mmio_res);
395 		goto put_dev;
396 	}
397 
398 	/* then add irq resource */
399 	if (feature->nr_irqs) {
400 		ddev->irqs = kzalloc_objs(*ddev->irqs, feature->nr_irqs);
401 		if (!ddev->irqs) {
402 			ret = -ENOMEM;
403 			goto put_dev;
404 		}
405 
406 		for (i = 0; i < feature->nr_irqs; i++)
407 			ddev->irqs[i] = feature->irq_ctx[i].irq;
408 
409 		ddev->num_irqs = feature->nr_irqs;
410 	}
411 
412 	ret = device_add(&ddev->dev);
413 	if (ret)
414 		goto put_dev;
415 
416 	dev_dbg(&pdev->dev, "add dfl_dev: %s\n", dev_name(&ddev->dev));
417 	return ddev;
418 
419 put_dev:
420 	/* calls release_dfl_dev() which does the clean up  */
421 	put_device(&ddev->dev);
422 	return ERR_PTR(ret);
423 }
424 
dfl_devs_remove(struct dfl_feature_dev_data * fdata)425 static void dfl_devs_remove(struct dfl_feature_dev_data *fdata)
426 {
427 	struct dfl_feature *feature;
428 
429 	dfl_fpga_dev_for_each_feature(fdata, feature) {
430 		if (feature->ddev) {
431 			device_unregister(&feature->ddev->dev);
432 			feature->ddev = NULL;
433 		}
434 	}
435 }
436 
dfl_devs_add(struct dfl_feature_dev_data * fdata)437 static int dfl_devs_add(struct dfl_feature_dev_data *fdata)
438 {
439 	struct dfl_feature *feature;
440 	struct dfl_device *ddev;
441 	int ret;
442 
443 	dfl_fpga_dev_for_each_feature(fdata, feature) {
444 		if (feature->ioaddr)
445 			continue;
446 
447 		if (feature->ddev) {
448 			ret = -EEXIST;
449 			goto err;
450 		}
451 
452 		ddev = dfl_dev_add(fdata, feature);
453 		if (IS_ERR(ddev)) {
454 			ret = PTR_ERR(ddev);
455 			goto err;
456 		}
457 
458 		feature->ddev = ddev;
459 	}
460 
461 	return 0;
462 
463 err:
464 	dfl_devs_remove(fdata);
465 	return ret;
466 }
467 
__dfl_driver_register(struct dfl_driver * dfl_drv,struct module * owner)468 int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner)
469 {
470 	if (!dfl_drv || !dfl_drv->probe || !dfl_drv->id_table)
471 		return -EINVAL;
472 
473 	dfl_drv->drv.owner = owner;
474 	dfl_drv->drv.bus = &dfl_bus_type;
475 
476 	return driver_register(&dfl_drv->drv);
477 }
478 EXPORT_SYMBOL(__dfl_driver_register);
479 
dfl_driver_unregister(struct dfl_driver * dfl_drv)480 void dfl_driver_unregister(struct dfl_driver *dfl_drv)
481 {
482 	driver_unregister(&dfl_drv->drv);
483 }
484 EXPORT_SYMBOL(dfl_driver_unregister);
485 
486 #define is_header_feature(feature) ((feature)->id == FEATURE_ID_FIU_HEADER)
487 
488 /**
489  * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device
490  * @pdev: feature device.
491  */
dfl_fpga_dev_feature_uinit(struct platform_device * pdev)492 void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
493 {
494 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
495 	struct dfl_feature *feature;
496 
497 	dfl_devs_remove(fdata);
498 
499 	dfl_fpga_dev_for_each_feature(fdata, feature) {
500 		if (feature->ops) {
501 			if (feature->ops->uinit)
502 				feature->ops->uinit(pdev, feature);
503 			feature->ops = NULL;
504 		}
505 	}
506 }
507 EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
508 
dfl_feature_instance_init(struct platform_device * pdev,struct dfl_feature * feature,struct dfl_feature_driver * drv)509 static int dfl_feature_instance_init(struct platform_device *pdev,
510 				     struct dfl_feature *feature,
511 				     struct dfl_feature_driver *drv)
512 {
513 	void __iomem *base;
514 	int ret = 0;
515 
516 	if (!is_header_feature(feature)) {
517 		base = devm_platform_ioremap_resource(pdev,
518 						      feature->resource_index);
519 		if (IS_ERR(base)) {
520 			dev_err(&pdev->dev,
521 				"ioremap failed for feature 0x%x!\n",
522 				feature->id);
523 			return PTR_ERR(base);
524 		}
525 
526 		feature->ioaddr = base;
527 	}
528 
529 	if (drv->ops->init) {
530 		ret = drv->ops->init(pdev, feature);
531 		if (ret)
532 			return ret;
533 	}
534 
535 	feature->ops = drv->ops;
536 
537 	return ret;
538 }
539 
dfl_feature_drv_match(struct dfl_feature * feature,struct dfl_feature_driver * driver)540 static bool dfl_feature_drv_match(struct dfl_feature *feature,
541 				  struct dfl_feature_driver *driver)
542 {
543 	const struct dfl_feature_id *ids = driver->id_table;
544 
545 	if (ids) {
546 		while (ids->id) {
547 			if (ids->id == feature->id)
548 				return true;
549 			ids++;
550 		}
551 	}
552 	return false;
553 }
554 
555 /**
556  * dfl_fpga_dev_feature_init - init for sub features of dfl feature device
557  * @pdev: feature device.
558  * @feature_drvs: drvs for sub features.
559  *
560  * This function will match sub features with given feature drvs list and
561  * use matched drv to init related sub feature.
562  *
563  * Return: 0 on success, negative error code otherwise.
564  */
dfl_fpga_dev_feature_init(struct platform_device * pdev,struct dfl_feature_driver * feature_drvs)565 int dfl_fpga_dev_feature_init(struct platform_device *pdev,
566 			      struct dfl_feature_driver *feature_drvs)
567 {
568 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
569 	struct dfl_feature_driver *drv = feature_drvs;
570 	struct dfl_feature *feature;
571 	int ret;
572 
573 	while (drv->ops) {
574 		dfl_fpga_dev_for_each_feature(fdata, feature) {
575 			if (dfl_feature_drv_match(feature, drv)) {
576 				ret = dfl_feature_instance_init(pdev, feature, drv);
577 				if (ret)
578 					goto exit;
579 			}
580 		}
581 		drv++;
582 	}
583 
584 	ret = dfl_devs_add(fdata);
585 	if (ret)
586 		goto exit;
587 
588 	return 0;
589 exit:
590 	dfl_fpga_dev_feature_uinit(pdev);
591 	return ret;
592 }
593 EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init);
594 
dfl_chardev_uinit(void)595 static void dfl_chardev_uinit(void)
596 {
597 	int i;
598 
599 	for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
600 		if (MAJOR(dfl_chrdevs[i].devt)) {
601 			unregister_chrdev_region(dfl_chrdevs[i].devt,
602 						 MINORMASK + 1);
603 			dfl_chrdevs[i].devt = MKDEV(0, 0);
604 		}
605 }
606 
dfl_chardev_init(void)607 static int dfl_chardev_init(void)
608 {
609 	int i, ret;
610 
611 	for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
612 		ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0,
613 					  MINORMASK + 1, dfl_chrdevs[i].name);
614 		if (ret)
615 			goto exit;
616 	}
617 
618 	return 0;
619 
620 exit:
621 	dfl_chardev_uinit();
622 	return ret;
623 }
624 
dfl_get_devt(enum dfl_fpga_devt_type type,int id)625 static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id)
626 {
627 	if (type >= DFL_FPGA_DEVT_MAX)
628 		return 0;
629 
630 	return MKDEV(MAJOR(dfl_chrdevs[type].devt), id);
631 }
632 
633 /**
634  * dfl_fpga_dev_ops_register - register cdev ops for feature dev
635  *
636  * @pdev: feature dev.
637  * @fops: file operations for feature dev's cdev.
638  * @owner: owning module/driver.
639  *
640  * Return: 0 on success, negative error code otherwise.
641  */
dfl_fpga_dev_ops_register(struct platform_device * pdev,const struct file_operations * fops,struct module * owner)642 int dfl_fpga_dev_ops_register(struct platform_device *pdev,
643 			      const struct file_operations *fops,
644 			      struct module *owner)
645 {
646 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
647 
648 	cdev_init(&pdata->cdev, fops);
649 	pdata->cdev.owner = owner;
650 
651 	/*
652 	 * set parent to the feature device so that its refcount is
653 	 * decreased after the last refcount of cdev is gone, that
654 	 * makes sure the feature device is valid during device
655 	 * file's life-cycle.
656 	 */
657 	pdata->cdev.kobj.parent = &pdev->dev.kobj;
658 
659 	return cdev_add(&pdata->cdev, pdev->dev.devt, 1);
660 }
661 EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register);
662 
663 /**
664  * dfl_fpga_dev_ops_unregister - unregister cdev ops for feature dev
665  * @pdev: feature dev.
666  */
dfl_fpga_dev_ops_unregister(struct platform_device * pdev)667 void dfl_fpga_dev_ops_unregister(struct platform_device *pdev)
668 {
669 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
670 
671 	cdev_del(&pdata->cdev);
672 }
673 EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
674 
675 /**
676  * struct build_feature_devs_info - info collected during feature dev build.
677  *
678  * @dev: device to enumerate.
679  * @cdev: the container device for all feature devices.
680  * @nr_irqs: number of irqs for all feature devices.
681  * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
682  *	       this device.
683  * @type: the current FIU type.
684  * @ioaddr: header register region address of current FIU in enumeration.
685  * @start: register resource start of current FIU.
686  * @len: max register resource length of current FIU.
687  * @sub_features: a sub features linked list for feature device in enumeration.
688  * @feature_num: number of sub features for feature device in enumeration.
689  */
690 struct build_feature_devs_info {
691 	struct device *dev;
692 	struct dfl_fpga_cdev *cdev;
693 	unsigned int nr_irqs;
694 	int *irq_table;
695 
696 	enum dfl_id_type type;
697 	void __iomem *ioaddr;
698 	resource_size_t start;
699 	resource_size_t len;
700 	struct list_head sub_features;
701 	int feature_num;
702 };
703 
704 /**
705  * struct dfl_feature_info - sub feature info collected during feature dev build
706  *
707  * @fid: id of this sub feature.
708  * @revision: revision of this sub feature
709  * @dfh_version: version of Device Feature Header (DFH)
710  * @mmio_res: mmio resource of this sub feature.
711  * @ioaddr: mapped base address of mmio resource.
712  * @node: node in sub_features linked list.
713  * @irq_base: start of irq index in this sub feature.
714  * @nr_irqs: number of irqs of this sub feature.
715  * @param_size: size DFH parameters.
716  * @params: DFH parameter data.
717  */
718 struct dfl_feature_info {
719 	u16 fid;
720 	u8 revision;
721 	u8 dfh_version;
722 	struct resource mmio_res;
723 	void __iomem *ioaddr;
724 	struct list_head node;
725 	unsigned int irq_base;
726 	unsigned int nr_irqs;
727 	unsigned int param_size;
728 	u64 params[];
729 };
730 
dfl_fpga_cdev_add_port_data(struct dfl_fpga_cdev * cdev,struct dfl_feature_dev_data * fdata)731 static void dfl_fpga_cdev_add_port_data(struct dfl_fpga_cdev *cdev,
732 					struct dfl_feature_dev_data *fdata)
733 {
734 	mutex_lock(&cdev->lock);
735 	list_add(&fdata->node, &cdev->port_dev_list);
736 	mutex_unlock(&cdev->lock);
737 }
738 
dfl_id_free_action(void * arg)739 static void dfl_id_free_action(void *arg)
740 {
741 	struct dfl_feature_dev_data *fdata = arg;
742 
743 	dfl_id_free(fdata->type, fdata->pdev_id);
744 }
745 
746 static struct dfl_feature_dev_data *
binfo_create_feature_dev_data(struct build_feature_devs_info * binfo)747 binfo_create_feature_dev_data(struct build_feature_devs_info *binfo)
748 {
749 	enum dfl_id_type type = binfo->type;
750 	struct dfl_feature_info *finfo, *p;
751 	struct dfl_feature_dev_data *fdata;
752 	int ret, index = 0, res_idx = 0;
753 
754 	if (WARN_ON_ONCE(type >= DFL_ID_MAX))
755 		return ERR_PTR(-EINVAL);
756 
757 	fdata = devm_kzalloc(binfo->dev, sizeof(*fdata), GFP_KERNEL);
758 	if (!fdata)
759 		return ERR_PTR(-ENOMEM);
760 
761 	fdata->features = devm_kcalloc(binfo->dev, binfo->feature_num,
762 				       sizeof(*fdata->features), GFP_KERNEL);
763 	if (!fdata->features)
764 		return ERR_PTR(-ENOMEM);
765 
766 	fdata->resources = devm_kcalloc(binfo->dev, binfo->feature_num,
767 					sizeof(*fdata->resources), GFP_KERNEL);
768 	if (!fdata->resources)
769 		return ERR_PTR(-ENOMEM);
770 
771 	fdata->type = type;
772 
773 	fdata->pdev_id = dfl_id_alloc(type, binfo->dev);
774 	if (fdata->pdev_id < 0)
775 		return ERR_PTR(fdata->pdev_id);
776 
777 	ret = devm_add_action_or_reset(binfo->dev, dfl_id_free_action, fdata);
778 	if (ret)
779 		return ERR_PTR(ret);
780 
781 	fdata->pdev_name = dfl_devs[type].name;
782 	fdata->num = binfo->feature_num;
783 	fdata->dfl_cdev = binfo->cdev;
784 	fdata->id = FEATURE_DEV_ID_UNUSED;
785 	mutex_init(&fdata->lock);
786 	lockdep_set_class_and_name(&fdata->lock, &dfl_pdata_keys[type],
787 				   dfl_pdata_key_strings[type]);
788 
789 	/*
790 	 * the count should be initialized to 0 to make sure
791 	 *__fpga_port_enable() following __fpga_port_disable()
792 	 * works properly for port device.
793 	 * and it should always be 0 for fme device.
794 	 */
795 	WARN_ON(fdata->disable_count);
796 
797 	/* fill features and resource information for feature dev */
798 	list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
799 		struct dfl_feature *feature = &fdata->features[index++];
800 		struct dfl_feature_irq_ctx *ctx;
801 		unsigned int i;
802 
803 		/* save resource information for each feature */
804 		feature->id = finfo->fid;
805 		feature->revision = finfo->revision;
806 		feature->dfh_version = finfo->dfh_version;
807 
808 		if (finfo->param_size) {
809 			feature->params = devm_kmemdup(binfo->dev,
810 						       finfo->params, finfo->param_size,
811 						       GFP_KERNEL);
812 			if (!feature->params)
813 				return ERR_PTR(-ENOMEM);
814 
815 			feature->param_size = finfo->param_size;
816 		}
817 		/*
818 		 * the FIU header feature has some fundamental functions (sriov
819 		 * set, port enable/disable) needed for the dfl bus device and
820 		 * other sub features. So its mmio resource should be mapped by
821 		 * DFL bus device. And we should not assign it to feature
822 		 * devices (dfl-fme/afu) again.
823 		 */
824 		if (is_header_feature(feature)) {
825 			feature->resource_index = -1;
826 			feature->ioaddr =
827 				devm_ioremap_resource(binfo->dev,
828 						      &finfo->mmio_res);
829 			if (IS_ERR(feature->ioaddr))
830 				return ERR_CAST(feature->ioaddr);
831 		} else {
832 			feature->resource_index = res_idx;
833 			fdata->resources[res_idx++] = finfo->mmio_res;
834 		}
835 
836 		if (finfo->nr_irqs) {
837 			ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs,
838 					   sizeof(*ctx), GFP_KERNEL);
839 			if (!ctx)
840 				return ERR_PTR(-ENOMEM);
841 
842 			for (i = 0; i < finfo->nr_irqs; i++)
843 				ctx[i].irq =
844 					binfo->irq_table[finfo->irq_base + i];
845 
846 			feature->irq_ctx = ctx;
847 			feature->nr_irqs = finfo->nr_irqs;
848 		}
849 
850 		list_del(&finfo->node);
851 		kfree(finfo);
852 	}
853 
854 	fdata->resource_num = res_idx;
855 
856 	return fdata;
857 }
858 
859 /*
860  * register current feature device, it is called when we need to switch to
861  * another feature parsing or we have parsed all features on given device
862  * feature list.
863  */
feature_dev_register(struct dfl_feature_dev_data * fdata)864 static int feature_dev_register(struct dfl_feature_dev_data *fdata)
865 {
866 	struct dfl_feature_platform_data pdata = {};
867 	struct platform_device *fdev;
868 	struct dfl_feature *feature;
869 	int ret;
870 
871 	fdev = platform_device_alloc(fdata->pdev_name, fdata->pdev_id);
872 	if (!fdev)
873 		return -ENOMEM;
874 
875 	fdata->dev = fdev;
876 
877 	fdev->dev.parent = &fdata->dfl_cdev->region->dev;
878 	fdev->dev.devt = dfl_get_devt(dfl_devs[fdata->type].devt_type, fdev->id);
879 
880 	dfl_fpga_dev_for_each_feature(fdata, feature)
881 		feature->dev = fdev;
882 
883 	ret = platform_device_add_resources(fdev, fdata->resources,
884 					    fdata->resource_num);
885 	if (ret)
886 		goto err_put_dev;
887 
888 	pdata.fdata = fdata;
889 	ret = platform_device_add_data(fdev, &pdata, sizeof(pdata));
890 	if (ret)
891 		goto err_put_dev;
892 
893 	ret = platform_device_add(fdev);
894 	if (ret)
895 		goto err_put_dev;
896 
897 	return 0;
898 
899 err_put_dev:
900 	platform_device_put(fdev);
901 
902 	fdata->dev = NULL;
903 
904 	dfl_fpga_dev_for_each_feature(fdata, feature)
905 		feature->dev = NULL;
906 
907 	return ret;
908 }
909 
feature_dev_unregister(struct dfl_feature_dev_data * fdata)910 static void feature_dev_unregister(struct dfl_feature_dev_data *fdata)
911 {
912 	struct dfl_feature *feature;
913 
914 	platform_device_unregister(fdata->dev);
915 
916 	fdata->dev = NULL;
917 
918 	dfl_fpga_dev_for_each_feature(fdata, feature)
919 		feature->dev = NULL;
920 }
921 
build_info_commit_dev(struct build_feature_devs_info * binfo)922 static int build_info_commit_dev(struct build_feature_devs_info *binfo)
923 {
924 	struct dfl_feature_dev_data *fdata;
925 	int ret;
926 
927 	fdata = binfo_create_feature_dev_data(binfo);
928 	if (IS_ERR(fdata))
929 		return PTR_ERR(fdata);
930 
931 	ret = feature_dev_register(fdata);
932 	if (ret)
933 		return ret;
934 
935 	if (binfo->type == PORT_ID)
936 		dfl_fpga_cdev_add_port_data(binfo->cdev, fdata);
937 	else
938 		binfo->cdev->fme_dev = get_device(&fdata->dev->dev);
939 
940 	/* reset the binfo for next FIU */
941 	binfo->type = DFL_ID_MAX;
942 
943 	return 0;
944 }
945 
build_info_free(struct build_feature_devs_info * binfo)946 static void build_info_free(struct build_feature_devs_info *binfo)
947 {
948 	struct dfl_feature_info *finfo, *p;
949 
950 	list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
951 		list_del(&finfo->node);
952 		kfree(finfo);
953 	}
954 
955 	devm_kfree(binfo->dev, binfo);
956 }
957 
feature_size(u64 value)958 static inline u32 feature_size(u64 value)
959 {
960 	u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, value);
961 	/* workaround for private features with invalid size, use 4K instead */
962 	return ofst ? ofst : 4096;
963 }
964 
feature_id(u64 value)965 static u16 feature_id(u64 value)
966 {
967 	u16 id = FIELD_GET(DFH_ID, value);
968 	u8 type = FIELD_GET(DFH_TYPE, value);
969 
970 	if (type == DFH_TYPE_FIU)
971 		return FEATURE_ID_FIU_HEADER;
972 	else if (type == DFH_TYPE_PRIVATE)
973 		return id;
974 	else if (type == DFH_TYPE_AFU)
975 		return FEATURE_ID_AFU;
976 
977 	WARN_ON(1);
978 	return 0;
979 }
980 
find_param(u64 * params,resource_size_t max,int param_id)981 static u64 *find_param(u64 *params, resource_size_t max, int param_id)
982 {
983 	u64 *end = params + max / sizeof(u64);
984 	u64 v, next;
985 
986 	while (params < end) {
987 		v = *params;
988 		if (param_id == FIELD_GET(DFHv1_PARAM_HDR_ID, v))
989 			return params;
990 
991 		if (FIELD_GET(DFHv1_PARAM_HDR_NEXT_EOP, v))
992 			break;
993 
994 		next = FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, v);
995 		params += next;
996 	}
997 
998 	return NULL;
999 }
1000 
1001 /**
1002  * dfh_find_param() - find parameter block for the given parameter id
1003  * @dfl_dev: dfl device
1004  * @param_id: id of dfl parameter
1005  * @psize: destination to store size of parameter data in bytes
1006  *
1007  * Return: pointer to start of parameter data, PTR_ERR otherwise.
1008  */
dfh_find_param(struct dfl_device * dfl_dev,int param_id,size_t * psize)1009 void *dfh_find_param(struct dfl_device *dfl_dev, int param_id, size_t *psize)
1010 {
1011 	u64 *phdr = find_param(dfl_dev->params, dfl_dev->param_size, param_id);
1012 
1013 	if (!phdr)
1014 		return ERR_PTR(-ENOENT);
1015 
1016 	if (psize)
1017 		*psize = (FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, *phdr) - 1) * sizeof(u64);
1018 
1019 	return phdr + 1;
1020 }
1021 EXPORT_SYMBOL_GPL(dfh_find_param);
1022 
parse_feature_irqs(struct build_feature_devs_info * binfo,resource_size_t ofst,struct dfl_feature_info * finfo)1023 static int parse_feature_irqs(struct build_feature_devs_info *binfo,
1024 			      resource_size_t ofst, struct dfl_feature_info *finfo)
1025 {
1026 	void __iomem *base = binfo->ioaddr + ofst;
1027 	unsigned int i, ibase, inr = 0;
1028 	void *params = finfo->params;
1029 	enum dfl_id_type type;
1030 	u16 fid = finfo->fid;
1031 	int virq;
1032 	u64 *p;
1033 	u64 v;
1034 
1035 	switch (finfo->dfh_version) {
1036 	case 0:
1037 		/*
1038 		 * DFHv0 only provides MMIO resource information for each feature
1039 		 * in the DFL header.  There is no generic interrupt information.
1040 		 * Instead, features with interrupt functionality provide
1041 		 * the information in feature specific registers.
1042 		 */
1043 		type = binfo->type;
1044 		if (type == PORT_ID) {
1045 			switch (fid) {
1046 			case PORT_FEATURE_ID_UINT:
1047 				v = readq(base + PORT_UINT_CAP);
1048 				ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
1049 				inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
1050 				break;
1051 			case PORT_FEATURE_ID_ERROR:
1052 				v = readq(base + PORT_ERROR_CAP);
1053 				ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
1054 				inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
1055 				break;
1056 			}
1057 		} else if (type == FME_ID) {
1058 			switch (fid) {
1059 			case FME_FEATURE_ID_GLOBAL_ERR:
1060 				v = readq(base + FME_ERROR_CAP);
1061 				ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
1062 				inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
1063 				break;
1064 			}
1065 		}
1066 		break;
1067 
1068 	case 1:
1069 		/*
1070 		 * DFHv1 provides interrupt resource information in DFHv1
1071 		 * parameter blocks.
1072 		 */
1073 		p = find_param(params, finfo->param_size, DFHv1_PARAM_ID_MSI_X);
1074 		if (!p)
1075 			break;
1076 
1077 		p++;
1078 		ibase = FIELD_GET(DFHv1_PARAM_MSI_X_STARTV, *p);
1079 		inr = FIELD_GET(DFHv1_PARAM_MSI_X_NUMV, *p);
1080 		break;
1081 
1082 	default:
1083 		dev_warn(binfo->dev, "unexpected DFH version %d\n", finfo->dfh_version);
1084 		break;
1085 	}
1086 
1087 	if (!inr) {
1088 		finfo->irq_base = 0;
1089 		finfo->nr_irqs = 0;
1090 		return 0;
1091 	}
1092 
1093 	dev_dbg(binfo->dev, "feature: 0x%x, irq_base: %u, nr_irqs: %u\n",
1094 		fid, ibase, inr);
1095 
1096 	if (ibase + inr > binfo->nr_irqs) {
1097 		dev_err(binfo->dev,
1098 			"Invalid interrupt number in feature 0x%x\n", fid);
1099 		return -EINVAL;
1100 	}
1101 
1102 	for (i = 0; i < inr; i++) {
1103 		virq = binfo->irq_table[ibase + i];
1104 		if (virq < 0 || virq > NR_IRQS) {
1105 			dev_err(binfo->dev,
1106 				"Invalid irq table entry for feature 0x%x\n",
1107 				fid);
1108 			return -EINVAL;
1109 		}
1110 	}
1111 
1112 	finfo->irq_base = ibase;
1113 	finfo->nr_irqs = inr;
1114 
1115 	return 0;
1116 }
1117 
dfh_get_param_size(void __iomem * dfh_base,resource_size_t max)1118 static int dfh_get_param_size(void __iomem *dfh_base, resource_size_t max)
1119 {
1120 	int size = 0;
1121 	u64 v, next;
1122 
1123 	if (!FIELD_GET(DFHv1_CSR_SIZE_GRP_HAS_PARAMS,
1124 		       readq(dfh_base + DFHv1_CSR_SIZE_GRP)))
1125 		return 0;
1126 
1127 	while (size + DFHv1_PARAM_HDR < max) {
1128 		v = readq(dfh_base + DFHv1_PARAM_HDR + size);
1129 
1130 		next = FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, v);
1131 		if (!next)
1132 			return -EINVAL;
1133 
1134 		size += next * sizeof(u64);
1135 
1136 		if (FIELD_GET(DFHv1_PARAM_HDR_NEXT_EOP, v))
1137 			return size;
1138 	}
1139 
1140 	return -ENOENT;
1141 }
1142 
1143 /*
1144  * when create sub feature instances, for private features, it doesn't need
1145  * to provide resource size and feature id as they could be read from DFH
1146  * register. For afu sub feature, its register region only contains user
1147  * defined registers, so never trust any information from it, just use the
1148  * resource size information provided by its parent FIU.
1149  */
1150 static int
create_feature_instance(struct build_feature_devs_info * binfo,resource_size_t ofst,resource_size_t size,u16 fid)1151 create_feature_instance(struct build_feature_devs_info *binfo,
1152 			resource_size_t ofst, resource_size_t size, u16 fid)
1153 {
1154 	struct dfl_feature_info *finfo;
1155 	resource_size_t start, end;
1156 	int dfh_psize = 0;
1157 	u8 revision = 0;
1158 	u64 v, addr_off;
1159 	u8 dfh_ver = 0;
1160 	int ret;
1161 
1162 	if (fid != FEATURE_ID_AFU) {
1163 		v = readq(binfo->ioaddr + ofst);
1164 		revision = FIELD_GET(DFH_REVISION, v);
1165 		dfh_ver = FIELD_GET(DFH_VERSION, v);
1166 		/* read feature size and id if inputs are invalid */
1167 		size = size ? size : feature_size(v);
1168 		fid = fid ? fid : feature_id(v);
1169 		if (dfh_ver == 1) {
1170 			dfh_psize = dfh_get_param_size(binfo->ioaddr + ofst, size);
1171 			if (dfh_psize < 0) {
1172 				dev_err(binfo->dev,
1173 					"failed to read size of DFHv1 parameters %d\n",
1174 					dfh_psize);
1175 				return dfh_psize;
1176 			}
1177 			dev_dbg(binfo->dev, "dfhv1_psize %d\n", dfh_psize);
1178 		}
1179 	}
1180 
1181 	if (binfo->len - ofst < size)
1182 		return -EINVAL;
1183 
1184 	finfo = kzalloc_flex(*finfo, params, dfh_psize / sizeof(u64));
1185 	if (!finfo)
1186 		return -ENOMEM;
1187 
1188 	memcpy_fromio(finfo->params, binfo->ioaddr + ofst + DFHv1_PARAM_HDR, dfh_psize);
1189 	finfo->param_size = dfh_psize;
1190 
1191 	finfo->fid = fid;
1192 	finfo->revision = revision;
1193 	finfo->dfh_version = dfh_ver;
1194 	if (dfh_ver == 1) {
1195 		v = readq(binfo->ioaddr + ofst + DFHv1_CSR_ADDR);
1196 		addr_off = FIELD_GET(DFHv1_CSR_ADDR_MASK, v);
1197 		if (FIELD_GET(DFHv1_CSR_ADDR_REL, v))
1198 			start = addr_off << 1;
1199 		else
1200 			start = binfo->start + ofst + addr_off;
1201 
1202 		v = readq(binfo->ioaddr + ofst + DFHv1_CSR_SIZE_GRP);
1203 		end = start + FIELD_GET(DFHv1_CSR_SIZE_GRP_SIZE, v) - 1;
1204 	} else {
1205 		start = binfo->start + ofst;
1206 		end = start + size - 1;
1207 	}
1208 	finfo->mmio_res.flags = IORESOURCE_MEM;
1209 	finfo->mmio_res.start = start;
1210 	finfo->mmio_res.end = end;
1211 
1212 	ret = parse_feature_irqs(binfo, ofst, finfo);
1213 	if (ret) {
1214 		kfree(finfo);
1215 		return ret;
1216 	}
1217 
1218 	list_add_tail(&finfo->node, &binfo->sub_features);
1219 	binfo->feature_num++;
1220 
1221 	return 0;
1222 }
1223 
parse_feature_port_afu(struct build_feature_devs_info * binfo,resource_size_t ofst)1224 static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
1225 				  resource_size_t ofst)
1226 {
1227 	u64 v = readq(binfo->ioaddr + PORT_HDR_CAP);
1228 	u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10;
1229 
1230 	WARN_ON(!size);
1231 
1232 	return create_feature_instance(binfo, ofst, size, FEATURE_ID_AFU);
1233 }
1234 
1235 #define is_feature_dev_detected(binfo) ((binfo)->type != DFL_ID_MAX)
1236 
parse_feature_afu(struct build_feature_devs_info * binfo,resource_size_t ofst)1237 static int parse_feature_afu(struct build_feature_devs_info *binfo,
1238 			     resource_size_t ofst)
1239 {
1240 	if (!is_feature_dev_detected(binfo)) {
1241 		dev_err(binfo->dev, "this AFU does not belong to any FIU.\n");
1242 		return -EINVAL;
1243 	}
1244 
1245 	switch (binfo->type) {
1246 	case PORT_ID:
1247 		return parse_feature_port_afu(binfo, ofst);
1248 	default:
1249 		dev_info(binfo->dev, "AFU belonging to FIU is not supported yet.\n");
1250 	}
1251 
1252 	return 0;
1253 }
1254 
build_info_prepare(struct build_feature_devs_info * binfo,resource_size_t start,resource_size_t len)1255 static int build_info_prepare(struct build_feature_devs_info *binfo,
1256 			      resource_size_t start, resource_size_t len)
1257 {
1258 	struct device *dev = binfo->dev;
1259 	void __iomem *ioaddr;
1260 
1261 	if (!devm_request_mem_region(dev, start, len, dev_name(dev))) {
1262 		dev_err(dev, "request region fail, start:%pa, len:%pa\n",
1263 			&start, &len);
1264 		return -EBUSY;
1265 	}
1266 
1267 	ioaddr = devm_ioremap(dev, start, len);
1268 	if (!ioaddr) {
1269 		dev_err(dev, "ioremap region fail, start:%pa, len:%pa\n",
1270 			&start, &len);
1271 		return -ENOMEM;
1272 	}
1273 
1274 	binfo->start = start;
1275 	binfo->len = len;
1276 	binfo->ioaddr = ioaddr;
1277 
1278 	return 0;
1279 }
1280 
build_info_complete(struct build_feature_devs_info * binfo)1281 static void build_info_complete(struct build_feature_devs_info *binfo)
1282 {
1283 	devm_iounmap(binfo->dev, binfo->ioaddr);
1284 	devm_release_mem_region(binfo->dev, binfo->start, binfo->len);
1285 }
1286 
parse_feature_fiu(struct build_feature_devs_info * binfo,resource_size_t ofst)1287 static int parse_feature_fiu(struct build_feature_devs_info *binfo,
1288 			     resource_size_t ofst)
1289 {
1290 	enum dfl_id_type type;
1291 	int ret = 0;
1292 	u32 offset;
1293 	u16 id;
1294 	u64 v;
1295 
1296 	if (is_feature_dev_detected(binfo)) {
1297 		build_info_complete(binfo);
1298 
1299 		ret = build_info_commit_dev(binfo);
1300 		if (ret)
1301 			return ret;
1302 
1303 		ret = build_info_prepare(binfo, binfo->start + ofst,
1304 					 binfo->len - ofst);
1305 		if (ret)
1306 			return ret;
1307 	}
1308 
1309 	v = readq(binfo->ioaddr + DFH);
1310 	id = FIELD_GET(DFH_ID, v);
1311 
1312 	type = dfh_id_to_type(id);
1313 	if (type >= DFL_ID_MAX)
1314 		return -EINVAL;
1315 
1316 	binfo->type = type;
1317 	binfo->feature_num = 0;
1318 	INIT_LIST_HEAD(&binfo->sub_features);
1319 
1320 	ret = create_feature_instance(binfo, 0, 0, 0);
1321 	if (ret)
1322 		return ret;
1323 	/*
1324 	 * find and parse FIU's child AFU via its NEXT_AFU register.
1325 	 * please note that only Port has valid NEXT_AFU pointer per spec.
1326 	 */
1327 	v = readq(binfo->ioaddr + NEXT_AFU);
1328 
1329 	offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v);
1330 	if (offset)
1331 		return parse_feature_afu(binfo, offset);
1332 
1333 	dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id);
1334 
1335 	return ret;
1336 }
1337 
parse_feature_private(struct build_feature_devs_info * binfo,resource_size_t ofst)1338 static int parse_feature_private(struct build_feature_devs_info *binfo,
1339 				 resource_size_t ofst)
1340 {
1341 	if (!is_feature_dev_detected(binfo)) {
1342 		dev_err(binfo->dev, "the private feature 0x%x does not belong to any AFU.\n",
1343 			feature_id(readq(binfo->ioaddr + ofst)));
1344 		return -EINVAL;
1345 	}
1346 
1347 	return create_feature_instance(binfo, ofst, 0, 0);
1348 }
1349 
1350 /**
1351  * parse_feature - parse a feature on given device feature list
1352  *
1353  * @binfo: build feature devices information.
1354  * @ofst: offset to current FIU header
1355  */
parse_feature(struct build_feature_devs_info * binfo,resource_size_t ofst)1356 static int parse_feature(struct build_feature_devs_info *binfo,
1357 			 resource_size_t ofst)
1358 {
1359 	u64 v;
1360 	u32 type;
1361 
1362 	v = readq(binfo->ioaddr + ofst + DFH);
1363 	type = FIELD_GET(DFH_TYPE, v);
1364 
1365 	switch (type) {
1366 	case DFH_TYPE_AFU:
1367 		return parse_feature_afu(binfo, ofst);
1368 	case DFH_TYPE_PRIVATE:
1369 		return parse_feature_private(binfo, ofst);
1370 	case DFH_TYPE_FIU:
1371 		return parse_feature_fiu(binfo, ofst);
1372 	default:
1373 		dev_info(binfo->dev,
1374 			 "Feature Type %x is not supported.\n", type);
1375 	}
1376 
1377 	return 0;
1378 }
1379 
parse_feature_list(struct build_feature_devs_info * binfo,resource_size_t start,resource_size_t len)1380 static int parse_feature_list(struct build_feature_devs_info *binfo,
1381 			      resource_size_t start, resource_size_t len)
1382 {
1383 	resource_size_t end = start + len;
1384 	int ret = 0;
1385 	u32 ofst = 0;
1386 	u64 v;
1387 
1388 	ret = build_info_prepare(binfo, start, len);
1389 	if (ret)
1390 		return ret;
1391 
1392 	/* walk through the device feature list via DFH's next DFH pointer. */
1393 	for (; start < end; start += ofst) {
1394 		if (end - start < DFH_SIZE) {
1395 			dev_err(binfo->dev, "The region is too small to contain a feature.\n");
1396 			return -EINVAL;
1397 		}
1398 
1399 		ret = parse_feature(binfo, start - binfo->start);
1400 		if (ret)
1401 			return ret;
1402 
1403 		v = readq(binfo->ioaddr + start - binfo->start + DFH);
1404 		ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
1405 
1406 		/* stop parsing if EOL(End of List) is set or offset is 0 */
1407 		if ((v & DFH_EOL) || !ofst)
1408 			break;
1409 	}
1410 
1411 	/* commit current feature device when reach the end of list */
1412 	build_info_complete(binfo);
1413 
1414 	if (is_feature_dev_detected(binfo))
1415 		ret = build_info_commit_dev(binfo);
1416 
1417 	return ret;
1418 }
1419 
dfl_fpga_enum_info_alloc(struct device * dev)1420 struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev)
1421 {
1422 	struct dfl_fpga_enum_info *info;
1423 
1424 	get_device(dev);
1425 
1426 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
1427 	if (!info) {
1428 		put_device(dev);
1429 		return NULL;
1430 	}
1431 
1432 	info->dev = dev;
1433 	INIT_LIST_HEAD(&info->dfls);
1434 
1435 	return info;
1436 }
1437 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc);
1438 
dfl_fpga_enum_info_free(struct dfl_fpga_enum_info * info)1439 void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info)
1440 {
1441 	struct dfl_fpga_enum_dfl *tmp, *dfl;
1442 	struct device *dev;
1443 
1444 	if (!info)
1445 		return;
1446 
1447 	dev = info->dev;
1448 
1449 	/* remove all device feature lists in the list. */
1450 	list_for_each_entry_safe(dfl, tmp, &info->dfls, node) {
1451 		list_del(&dfl->node);
1452 		devm_kfree(dev, dfl);
1453 	}
1454 
1455 	/* remove irq table */
1456 	if (info->irq_table)
1457 		devm_kfree(dev, info->irq_table);
1458 
1459 	devm_kfree(dev, info);
1460 	put_device(dev);
1461 }
1462 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free);
1463 
1464 /**
1465  * dfl_fpga_enum_info_add_dfl - add info of a device feature list to enum info
1466  *
1467  * @info: ptr to dfl_fpga_enum_info
1468  * @start: mmio resource address of the device feature list.
1469  * @len: mmio resource length of the device feature list.
1470  *
1471  * One FPGA device may have one or more Device Feature Lists (DFLs), use this
1472  * function to add information of each DFL to common data structure for next
1473  * step enumeration.
1474  *
1475  * Return: 0 on success, negative error code otherwise.
1476  */
dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info * info,resource_size_t start,resource_size_t len)1477 int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
1478 			       resource_size_t start, resource_size_t len)
1479 {
1480 	struct dfl_fpga_enum_dfl *dfl;
1481 
1482 	dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL);
1483 	if (!dfl)
1484 		return -ENOMEM;
1485 
1486 	dfl->start = start;
1487 	dfl->len = len;
1488 
1489 	list_add_tail(&dfl->node, &info->dfls);
1490 
1491 	return 0;
1492 }
1493 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
1494 
1495 /**
1496  * dfl_fpga_enum_info_add_irq - add irq table to enum info
1497  *
1498  * @info: ptr to dfl_fpga_enum_info
1499  * @nr_irqs: number of irqs of the DFL fpga device to be enumerated.
1500  * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
1501  *	       this device.
1502  *
1503  * One FPGA device may have several interrupts. This function adds irq
1504  * information of the DFL fpga device to enum info for next step enumeration.
1505  * This function should be called before dfl_fpga_feature_devs_enumerate().
1506  * As we only support one irq domain for all DFLs in the same enum info, adding
1507  * irq table a second time for the same enum info will return error.
1508  *
1509  * If we need to enumerate DFLs which belong to different irq domains, we
1510  * should fill more enum info and enumerate them one by one.
1511  *
1512  * Return: 0 on success, negative error code otherwise.
1513  */
dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info * info,unsigned int nr_irqs,int * irq_table)1514 int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info,
1515 			       unsigned int nr_irqs, int *irq_table)
1516 {
1517 	if (!nr_irqs || !irq_table)
1518 		return -EINVAL;
1519 
1520 	if (info->irq_table)
1521 		return -EEXIST;
1522 
1523 	info->irq_table = devm_kmemdup(info->dev, irq_table,
1524 				       sizeof(int) * nr_irqs, GFP_KERNEL);
1525 	if (!info->irq_table)
1526 		return -ENOMEM;
1527 
1528 	info->nr_irqs = nr_irqs;
1529 
1530 	return 0;
1531 }
1532 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq);
1533 
remove_feature_dev(struct device * dev,void * data)1534 static int remove_feature_dev(struct device *dev, void *data)
1535 {
1536 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
1537 
1538 	feature_dev_unregister(fdata);
1539 
1540 	return 0;
1541 }
1542 
remove_feature_devs(struct dfl_fpga_cdev * cdev)1543 static void remove_feature_devs(struct dfl_fpga_cdev *cdev)
1544 {
1545 	device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev);
1546 }
1547 
1548 /**
1549  * dfl_fpga_feature_devs_enumerate - enumerate feature devices
1550  * @info: information for enumeration.
1551  *
1552  * This function creates a container device (base FPGA region), enumerates
1553  * feature devices based on the enumeration info and creates platform devices
1554  * under the container device.
1555  *
1556  * Return: dfl_fpga_cdev struct on success, -errno on failure
1557  */
1558 struct dfl_fpga_cdev *
dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info * info)1559 dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
1560 {
1561 	struct build_feature_devs_info *binfo;
1562 	struct dfl_fpga_enum_dfl *dfl;
1563 	struct dfl_fpga_cdev *cdev;
1564 	int ret = 0;
1565 
1566 	if (!info->dev)
1567 		return ERR_PTR(-ENODEV);
1568 
1569 	cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL);
1570 	if (!cdev)
1571 		return ERR_PTR(-ENOMEM);
1572 
1573 	cdev->parent = info->dev;
1574 	mutex_init(&cdev->lock);
1575 	INIT_LIST_HEAD(&cdev->port_dev_list);
1576 
1577 	cdev->region = fpga_region_register(info->dev, NULL, NULL);
1578 	if (IS_ERR(cdev->region)) {
1579 		ret = PTR_ERR(cdev->region);
1580 		goto free_cdev_exit;
1581 	}
1582 
1583 	/* create and init build info for enumeration */
1584 	binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
1585 	if (!binfo) {
1586 		ret = -ENOMEM;
1587 		goto unregister_region_exit;
1588 	}
1589 
1590 	binfo->type = DFL_ID_MAX;
1591 	binfo->dev = info->dev;
1592 	binfo->cdev = cdev;
1593 
1594 	binfo->nr_irqs = info->nr_irqs;
1595 	if (info->nr_irqs)
1596 		binfo->irq_table = info->irq_table;
1597 
1598 	/*
1599 	 * start enumeration for all feature devices based on Device Feature
1600 	 * Lists.
1601 	 */
1602 	list_for_each_entry(dfl, &info->dfls, node) {
1603 		ret = parse_feature_list(binfo, dfl->start, dfl->len);
1604 		if (ret) {
1605 			remove_feature_devs(cdev);
1606 			build_info_free(binfo);
1607 			goto unregister_region_exit;
1608 		}
1609 	}
1610 
1611 	build_info_free(binfo);
1612 
1613 	return cdev;
1614 
1615 unregister_region_exit:
1616 	fpga_region_unregister(cdev->region);
1617 free_cdev_exit:
1618 	devm_kfree(info->dev, cdev);
1619 	return ERR_PTR(ret);
1620 }
1621 EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
1622 
1623 /**
1624  * dfl_fpga_feature_devs_remove - remove all feature devices
1625  * @cdev: fpga container device.
1626  *
1627  * Remove the container device and all feature devices under given container
1628  * devices.
1629  */
dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev * cdev)1630 void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
1631 {
1632 	mutex_lock(&cdev->lock);
1633 	if (cdev->fme_dev)
1634 		put_device(cdev->fme_dev);
1635 
1636 	mutex_unlock(&cdev->lock);
1637 
1638 	remove_feature_devs(cdev);
1639 
1640 	fpga_region_unregister(cdev->region);
1641 	devm_kfree(cdev->parent, cdev);
1642 }
1643 EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
1644 
1645 /**
1646  * __dfl_fpga_cdev_find_port_data - find a port under given container device
1647  *
1648  * @cdev: container device
1649  * @data: data passed to match function
1650  * @match: match function used to find specific port from the port device list
1651  *
1652  * Find a port device under container device. This function needs to be
1653  * invoked with lock held.
1654  *
1655  * Return: pointer to port's platform device if successful, NULL otherwise.
1656  *
1657  * NOTE: you will need to drop the device reference with put_device() after use.
1658  */
1659 struct dfl_feature_dev_data *
__dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev * cdev,void * data,int (* match)(struct dfl_feature_dev_data *,void *))1660 __dfl_fpga_cdev_find_port_data(struct dfl_fpga_cdev *cdev, void *data,
1661 			       int (*match)(struct dfl_feature_dev_data *, void *))
1662 {
1663 	struct dfl_feature_dev_data *fdata;
1664 
1665 	list_for_each_entry(fdata, &cdev->port_dev_list, node) {
1666 		if (match(fdata, data))
1667 			return fdata;
1668 	}
1669 
1670 	return NULL;
1671 }
1672 EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port_data);
1673 
dfl_fpga_init(void)1674 static int __init dfl_fpga_init(void)
1675 {
1676 	int ret;
1677 
1678 	ret = bus_register(&dfl_bus_type);
1679 	if (ret)
1680 		return ret;
1681 
1682 	dfl_ids_init();
1683 
1684 	ret = dfl_chardev_init();
1685 	if (ret) {
1686 		dfl_ids_destroy();
1687 		bus_unregister(&dfl_bus_type);
1688 	}
1689 
1690 	return ret;
1691 }
1692 
1693 /**
1694  * dfl_fpga_cdev_release_port - release a port platform device
1695  *
1696  * @cdev: parent container device.
1697  * @port_id: id of the port platform device.
1698  *
1699  * This function allows user to release a port platform device. This is a
1700  * mandatory step before turn a port from PF into VF for SRIOV support.
1701  *
1702  * Return: 0 on success, negative error code otherwise.
1703  */
dfl_fpga_cdev_release_port(struct dfl_fpga_cdev * cdev,int port_id)1704 int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
1705 {
1706 	struct dfl_feature_dev_data *fdata;
1707 	int ret = -ENODEV;
1708 
1709 	mutex_lock(&cdev->lock);
1710 	fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id,
1711 					       dfl_fpga_check_port_id);
1712 	if (!fdata)
1713 		goto unlock_exit;
1714 
1715 	if (!fdata->dev) {
1716 		ret = -EBUSY;
1717 		goto unlock_exit;
1718 	}
1719 
1720 	mutex_lock(&fdata->lock);
1721 	ret = dfl_feature_dev_use_begin(fdata, true);
1722 	mutex_unlock(&fdata->lock);
1723 	if (ret)
1724 		goto unlock_exit;
1725 
1726 	feature_dev_unregister(fdata);
1727 	cdev->released_port_num++;
1728 unlock_exit:
1729 	mutex_unlock(&cdev->lock);
1730 	return ret;
1731 }
1732 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
1733 
1734 /**
1735  * dfl_fpga_cdev_assign_port - assign a port platform device back
1736  *
1737  * @cdev: parent container device.
1738  * @port_id: id of the port platform device.
1739  *
1740  * This function allows user to assign a port platform device back. This is
1741  * a mandatory step after disable SRIOV support.
1742  *
1743  * Return: 0 on success, negative error code otherwise.
1744  */
dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev * cdev,int port_id)1745 int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
1746 {
1747 	struct dfl_feature_dev_data *fdata;
1748 	int ret = -ENODEV;
1749 
1750 	mutex_lock(&cdev->lock);
1751 	fdata = __dfl_fpga_cdev_find_port_data(cdev, &port_id,
1752 					       dfl_fpga_check_port_id);
1753 	if (!fdata)
1754 		goto unlock_exit;
1755 
1756 	if (fdata->dev) {
1757 		ret = -EBUSY;
1758 		goto unlock_exit;
1759 	}
1760 
1761 	ret = feature_dev_register(fdata);
1762 	if (ret)
1763 		goto unlock_exit;
1764 
1765 	mutex_lock(&fdata->lock);
1766 	dfl_feature_dev_use_end(fdata);
1767 	mutex_unlock(&fdata->lock);
1768 
1769 	cdev->released_port_num--;
1770 unlock_exit:
1771 	mutex_unlock(&cdev->lock);
1772 	return ret;
1773 }
1774 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
1775 
config_port_access_mode(struct device * fme_dev,int port_id,bool is_vf)1776 static void config_port_access_mode(struct device *fme_dev, int port_id,
1777 				    bool is_vf)
1778 {
1779 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(fme_dev);
1780 	void __iomem *base;
1781 	u64 v;
1782 
1783 	base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
1784 
1785 	v = readq(base + FME_HDR_PORT_OFST(port_id));
1786 
1787 	v &= ~FME_PORT_OFST_ACC_CTRL;
1788 	v |= FIELD_PREP(FME_PORT_OFST_ACC_CTRL,
1789 			is_vf ? FME_PORT_OFST_ACC_VF : FME_PORT_OFST_ACC_PF);
1790 
1791 	writeq(v, base + FME_HDR_PORT_OFST(port_id));
1792 }
1793 
1794 #define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true)
1795 #define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false)
1796 
1797 /**
1798  * dfl_fpga_cdev_config_ports_pf - configure ports to PF access mode
1799  *
1800  * @cdev: parent container device.
1801  *
1802  * This function is needed in sriov configuration routine. It could be used to
1803  * configure the all released ports from VF access mode to PF.
1804  */
dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev * cdev)1805 void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
1806 {
1807 	struct dfl_feature_dev_data *fdata;
1808 
1809 	mutex_lock(&cdev->lock);
1810 	list_for_each_entry(fdata, &cdev->port_dev_list, node) {
1811 		if (fdata->dev)
1812 			continue;
1813 
1814 		config_port_pf_mode(cdev->fme_dev, fdata->id);
1815 	}
1816 	mutex_unlock(&cdev->lock);
1817 }
1818 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
1819 
1820 /**
1821  * dfl_fpga_cdev_config_ports_vf - configure ports to VF access mode
1822  *
1823  * @cdev: parent container device.
1824  * @num_vfs: VF device number.
1825  *
1826  * This function is needed in sriov configuration routine. It could be used to
1827  * configure the released ports from PF access mode to VF.
1828  *
1829  * Return: 0 on success, negative error code otherwise.
1830  */
dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev * cdev,int num_vfs)1831 int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
1832 {
1833 	struct dfl_feature_dev_data *fdata;
1834 	int ret = 0;
1835 
1836 	mutex_lock(&cdev->lock);
1837 	/*
1838 	 * can't turn multiple ports into 1 VF device, only 1 port for 1 VF
1839 	 * device, so if released port number doesn't match VF device number,
1840 	 * then reject the request with -EINVAL error code.
1841 	 */
1842 	if (cdev->released_port_num != num_vfs) {
1843 		ret = -EINVAL;
1844 		goto done;
1845 	}
1846 
1847 	list_for_each_entry(fdata, &cdev->port_dev_list, node) {
1848 		if (fdata->dev)
1849 			continue;
1850 
1851 		config_port_vf_mode(cdev->fme_dev, fdata->id);
1852 	}
1853 done:
1854 	mutex_unlock(&cdev->lock);
1855 	return ret;
1856 }
1857 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf);
1858 
dfl_irq_handler(int irq,void * arg)1859 static irqreturn_t dfl_irq_handler(int irq, void *arg)
1860 {
1861 	struct eventfd_ctx *trigger = arg;
1862 
1863 	eventfd_signal(trigger);
1864 	return IRQ_HANDLED;
1865 }
1866 
do_set_irq_trigger(struct dfl_feature * feature,unsigned int idx,int fd)1867 static int do_set_irq_trigger(struct dfl_feature *feature, unsigned int idx,
1868 			      int fd)
1869 {
1870 	struct platform_device *pdev = feature->dev;
1871 	struct eventfd_ctx *trigger;
1872 	int irq, ret;
1873 
1874 	irq = feature->irq_ctx[idx].irq;
1875 
1876 	if (feature->irq_ctx[idx].trigger) {
1877 		free_irq(irq, feature->irq_ctx[idx].trigger);
1878 		kfree(feature->irq_ctx[idx].name);
1879 		eventfd_ctx_put(feature->irq_ctx[idx].trigger);
1880 		feature->irq_ctx[idx].trigger = NULL;
1881 	}
1882 
1883 	if (fd < 0)
1884 		return 0;
1885 
1886 	feature->irq_ctx[idx].name =
1887 		kasprintf(GFP_KERNEL, "fpga-irq[%u](%s-%x)", idx,
1888 			  dev_name(&pdev->dev), feature->id);
1889 	if (!feature->irq_ctx[idx].name)
1890 		return -ENOMEM;
1891 
1892 	trigger = eventfd_ctx_fdget(fd);
1893 	if (IS_ERR(trigger)) {
1894 		ret = PTR_ERR(trigger);
1895 		goto free_name;
1896 	}
1897 
1898 	ret = request_irq(irq, dfl_irq_handler, 0,
1899 			  feature->irq_ctx[idx].name, trigger);
1900 	if (!ret) {
1901 		feature->irq_ctx[idx].trigger = trigger;
1902 		return ret;
1903 	}
1904 
1905 	eventfd_ctx_put(trigger);
1906 free_name:
1907 	kfree(feature->irq_ctx[idx].name);
1908 
1909 	return ret;
1910 }
1911 
1912 /**
1913  * dfl_fpga_set_irq_triggers - set eventfd triggers for dfl feature interrupts
1914  *
1915  * @feature: dfl sub feature.
1916  * @start: start of irq index in this dfl sub feature.
1917  * @count: number of irqs.
1918  * @fds: eventfds to bind with irqs. unbind related irq if fds[n] is negative.
1919  *	 unbind "count" specified number of irqs if fds ptr is NULL.
1920  *
1921  * Bind given eventfds with irqs in this dfl sub feature. Unbind related irq if
1922  * fds[n] is negative. Unbind "count" specified number of irqs if fds ptr is
1923  * NULL.
1924  *
1925  * Return: 0 on success, negative error code otherwise.
1926  */
dfl_fpga_set_irq_triggers(struct dfl_feature * feature,unsigned int start,unsigned int count,int32_t * fds)1927 int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start,
1928 			      unsigned int count, int32_t *fds)
1929 {
1930 	unsigned int i;
1931 	int ret = 0;
1932 
1933 	/* overflow */
1934 	if (unlikely(start + count < start))
1935 		return -EINVAL;
1936 
1937 	/* exceeds nr_irqs */
1938 	if (start + count > feature->nr_irqs)
1939 		return -EINVAL;
1940 
1941 	for (i = 0; i < count; i++) {
1942 		int fd = fds ? fds[i] : -1;
1943 
1944 		ret = do_set_irq_trigger(feature, start + i, fd);
1945 		if (ret) {
1946 			while (i--)
1947 				do_set_irq_trigger(feature, start + i, -1);
1948 			break;
1949 		}
1950 	}
1951 
1952 	return ret;
1953 }
1954 EXPORT_SYMBOL_GPL(dfl_fpga_set_irq_triggers);
1955 
1956 /**
1957  * dfl_feature_ioctl_get_num_irqs - dfl feature _GET_IRQ_NUM ioctl interface.
1958  * @pdev: the feature device which has the sub feature
1959  * @feature: the dfl sub feature
1960  * @arg: ioctl argument
1961  *
1962  * Return: 0 on success, negative error code otherwise.
1963  */
dfl_feature_ioctl_get_num_irqs(struct platform_device * pdev,struct dfl_feature * feature,unsigned long arg)1964 long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev,
1965 				    struct dfl_feature *feature,
1966 				    unsigned long arg)
1967 {
1968 	return put_user(feature->nr_irqs, (__u32 __user *)arg);
1969 }
1970 EXPORT_SYMBOL_GPL(dfl_feature_ioctl_get_num_irqs);
1971 
1972 /**
1973  * dfl_feature_ioctl_set_irq - dfl feature _SET_IRQ ioctl interface.
1974  * @pdev: the feature device which has the sub feature
1975  * @feature: the dfl sub feature
1976  * @arg: ioctl argument
1977  *
1978  * Return: 0 on success, negative error code otherwise.
1979  */
dfl_feature_ioctl_set_irq(struct platform_device * pdev,struct dfl_feature * feature,unsigned long arg)1980 long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
1981 			       struct dfl_feature *feature,
1982 			       unsigned long arg)
1983 {
1984 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
1985 	struct dfl_fpga_irq_set hdr;
1986 	s32 *fds;
1987 	long ret;
1988 
1989 	if (!feature->nr_irqs)
1990 		return -ENOENT;
1991 
1992 	if (copy_from_user(&hdr, (void __user *)arg, sizeof(hdr)))
1993 		return -EFAULT;
1994 
1995 	if (!hdr.count || (hdr.start + hdr.count > feature->nr_irqs) ||
1996 	    (hdr.start + hdr.count < hdr.start))
1997 		return -EINVAL;
1998 
1999 	fds = memdup_array_user((void __user *)(arg + sizeof(hdr)),
2000 				hdr.count, sizeof(s32));
2001 	if (IS_ERR(fds))
2002 		return PTR_ERR(fds);
2003 
2004 	mutex_lock(&fdata->lock);
2005 	ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds);
2006 	mutex_unlock(&fdata->lock);
2007 
2008 	kfree(fds);
2009 	return ret;
2010 }
2011 EXPORT_SYMBOL_GPL(dfl_feature_ioctl_set_irq);
2012 
dfl_fpga_exit(void)2013 static void __exit dfl_fpga_exit(void)
2014 {
2015 	dfl_chardev_uinit();
2016 	dfl_ids_destroy();
2017 	bus_unregister(&dfl_bus_type);
2018 }
2019 
2020 subsys_initcall(dfl_fpga_init);
2021 module_exit(dfl_fpga_exit);
2022 
2023 MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support");
2024 MODULE_AUTHOR("Intel Corporation");
2025 MODULE_LICENSE("GPL v2");
2026