xref: /linux/drivers/fpga/dfl-fme-pr.c (revision fb437bc8fe36c964d9bf2f4b568e6c77c235c5e3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for FPGA Management Engine (FME) Partial Reconfiguration
4  *
5  * Copyright (C) 2017-2018 Intel Corporation, Inc.
6  *
7  * Authors:
8  *   Kang Luwei <luwei.kang@intel.com>
9  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
10  *   Wu Hao <hao.wu@intel.com>
11  *   Joseph Grecco <joe.grecco@intel.com>
12  *   Enno Luebbers <enno.luebbers@intel.com>
13  *   Tim Whisonant <tim.whisonant@intel.com>
14  *   Ananda Ravuri <ananda.ravuri@intel.com>
15  *   Christopher Rauer <christopher.rauer@intel.com>
16  *   Henry Mitchel <henry.mitchel@intel.com>
17  */
18 
19 #include <linux/types.h>
20 #include <linux/device.h>
21 #include <linux/vmalloc.h>
22 #include <linux/uaccess.h>
23 #include <linux/fpga/fpga-mgr.h>
24 #include <linux/fpga/fpga-bridge.h>
25 #include <linux/fpga/fpga-region.h>
26 #include <linux/fpga-dfl.h>
27 
28 #include "dfl.h"
29 #include "dfl-fme.h"
30 #include "dfl-fme-pr.h"
31 
32 static struct dfl_fme_region *
33 dfl_fme_region_find_by_port_id(struct dfl_fme *fme, int port_id)
34 {
35 	struct dfl_fme_region *fme_region;
36 
37 	list_for_each_entry(fme_region, &fme->region_list, node)
38 		if (fme_region->port_id == port_id)
39 			return fme_region;
40 
41 	return NULL;
42 }
43 
44 static int dfl_fme_region_match(struct device *dev, const void *data)
45 {
46 	return dev->parent == data;
47 }
48 
49 static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)
50 {
51 	struct dfl_fme_region *fme_region;
52 	struct fpga_region *region;
53 
54 	fme_region = dfl_fme_region_find_by_port_id(fme, port_id);
55 	if (!fme_region)
56 		return NULL;
57 
58 	region = fpga_region_class_find(NULL, &fme_region->region->dev,
59 					dfl_fme_region_match);
60 	if (!region)
61 		return NULL;
62 
63 	return region;
64 }
65 
66 static int fme_pr(struct platform_device *pdev, unsigned long arg)
67 {
68 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
69 	void __user *argp = (void __user *)arg;
70 	struct dfl_fpga_fme_port_pr port_pr;
71 	struct fpga_image_info *info;
72 	struct fpga_region *region;
73 	void __iomem *fme_hdr;
74 	struct dfl_fme *fme;
75 	unsigned long minsz;
76 	void *buf = NULL;
77 	int ret = 0;
78 	u64 v;
79 
80 	minsz = offsetofend(struct dfl_fpga_fme_port_pr, buffer_address);
81 
82 	if (copy_from_user(&port_pr, argp, minsz))
83 		return -EFAULT;
84 
85 	if (port_pr.argsz < minsz || port_pr.flags)
86 		return -EINVAL;
87 
88 	if (!IS_ALIGNED(port_pr.buffer_size, 4))
89 		return -EINVAL;
90 
91 	/* get fme header region */
92 	fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
93 					       FME_FEATURE_ID_HEADER);
94 
95 	/* check port id */
96 	v = readq(fme_hdr + FME_HDR_CAP);
97 	if (port_pr.port_id >= FIELD_GET(FME_CAP_NUM_PORTS, v)) {
98 		dev_dbg(&pdev->dev, "port number more than maximum\n");
99 		return -EINVAL;
100 	}
101 
102 	if (!access_ok(VERIFY_READ,
103 		       (void __user *)(unsigned long)port_pr.buffer_address,
104 		       port_pr.buffer_size))
105 		return -EFAULT;
106 
107 	buf = vmalloc(port_pr.buffer_size);
108 	if (!buf)
109 		return -ENOMEM;
110 
111 	if (copy_from_user(buf,
112 			   (void __user *)(unsigned long)port_pr.buffer_address,
113 			   port_pr.buffer_size)) {
114 		ret = -EFAULT;
115 		goto free_exit;
116 	}
117 
118 	/* prepare fpga_image_info for PR */
119 	info = fpga_image_info_alloc(&pdev->dev);
120 	if (!info) {
121 		ret = -ENOMEM;
122 		goto free_exit;
123 	}
124 
125 	info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
126 
127 	mutex_lock(&pdata->lock);
128 	fme = dfl_fpga_pdata_get_private(pdata);
129 	/* fme device has been unregistered. */
130 	if (!fme) {
131 		ret = -EINVAL;
132 		goto unlock_exit;
133 	}
134 
135 	region = dfl_fme_region_find(fme, port_pr.port_id);
136 	if (!region) {
137 		ret = -EINVAL;
138 		goto unlock_exit;
139 	}
140 
141 	fpga_image_info_free(region->info);
142 
143 	info->buf = buf;
144 	info->count = port_pr.buffer_size;
145 	info->region_id = port_pr.port_id;
146 	region->info = info;
147 
148 	ret = fpga_region_program_fpga(region);
149 
150 	/*
151 	 * it allows userspace to reset the PR region's logic by disabling and
152 	 * reenabling the bridge to clear things out between accleration runs.
153 	 * so no need to hold the bridges after partial reconfiguration.
154 	 */
155 	if (region->get_bridges)
156 		fpga_bridges_put(&region->bridge_list);
157 
158 	put_device(&region->dev);
159 unlock_exit:
160 	mutex_unlock(&pdata->lock);
161 free_exit:
162 	vfree(buf);
163 	if (copy_to_user((void __user *)arg, &port_pr, minsz))
164 		return -EFAULT;
165 
166 	return ret;
167 }
168 
169 /**
170  * dfl_fme_create_mgr - create fpga mgr platform device as child device
171  *
172  * @pdata: fme platform_device's pdata
173  *
174  * Return: mgr platform device if successful, and error code otherwise.
175  */
176 static struct platform_device *
177 dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
178 		   struct dfl_feature *feature)
179 {
180 	struct platform_device *mgr, *fme = pdata->dev;
181 	struct dfl_fme_mgr_pdata mgr_pdata;
182 	int ret = -ENOMEM;
183 
184 	if (!feature->ioaddr)
185 		return ERR_PTR(-ENODEV);
186 
187 	mgr_pdata.ioaddr = feature->ioaddr;
188 
189 	/*
190 	 * Each FME has only one fpga-mgr, so allocate platform device using
191 	 * the same FME platform device id.
192 	 */
193 	mgr = platform_device_alloc(DFL_FPGA_FME_MGR, fme->id);
194 	if (!mgr)
195 		return ERR_PTR(ret);
196 
197 	mgr->dev.parent = &fme->dev;
198 
199 	ret = platform_device_add_data(mgr, &mgr_pdata, sizeof(mgr_pdata));
200 	if (ret)
201 		goto create_mgr_err;
202 
203 	ret = platform_device_add(mgr);
204 	if (ret)
205 		goto create_mgr_err;
206 
207 	return mgr;
208 
209 create_mgr_err:
210 	platform_device_put(mgr);
211 	return ERR_PTR(ret);
212 }
213 
214 /**
215  * dfl_fme_destroy_mgr - destroy fpga mgr platform device
216  * @pdata: fme platform device's pdata
217  */
218 static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
219 {
220 	struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
221 
222 	platform_device_unregister(priv->mgr);
223 }
224 
225 /**
226  * dfl_fme_create_bridge - create fme fpga bridge platform device as child
227  *
228  * @pdata: fme platform device's pdata
229  * @port_id: port id for the bridge to be created.
230  *
231  * Return: bridge platform device if successful, and error code otherwise.
232  */
233 static struct dfl_fme_bridge *
234 dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
235 {
236 	struct device *dev = &pdata->dev->dev;
237 	struct dfl_fme_br_pdata br_pdata;
238 	struct dfl_fme_bridge *fme_br;
239 	int ret = -ENOMEM;
240 
241 	fme_br = devm_kzalloc(dev, sizeof(*fme_br), GFP_KERNEL);
242 	if (!fme_br)
243 		return ERR_PTR(ret);
244 
245 	br_pdata.cdev = pdata->dfl_cdev;
246 	br_pdata.port_id = port_id;
247 
248 	fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
249 					   PLATFORM_DEVID_AUTO);
250 	if (!fme_br->br)
251 		return ERR_PTR(ret);
252 
253 	fme_br->br->dev.parent = dev;
254 
255 	ret = platform_device_add_data(fme_br->br, &br_pdata, sizeof(br_pdata));
256 	if (ret)
257 		goto create_br_err;
258 
259 	ret = platform_device_add(fme_br->br);
260 	if (ret)
261 		goto create_br_err;
262 
263 	return fme_br;
264 
265 create_br_err:
266 	platform_device_put(fme_br->br);
267 	return ERR_PTR(ret);
268 }
269 
270 /**
271  * dfl_fme_destroy_bridge - destroy fpga bridge platform device
272  * @fme_br: fme bridge to destroy
273  */
274 static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
275 {
276 	platform_device_unregister(fme_br->br);
277 }
278 
279 /**
280  * dfl_fme_destroy_bridge - destroy all fpga bridge platform device
281  * @pdata: fme platform device's pdata
282  */
283 static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
284 {
285 	struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
286 	struct dfl_fme_bridge *fbridge, *tmp;
287 
288 	list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
289 		list_del(&fbridge->node);
290 		dfl_fme_destroy_bridge(fbridge);
291 	}
292 }
293 
294 /**
295  * dfl_fme_create_region - create fpga region platform device as child
296  *
297  * @pdata: fme platform device's pdata
298  * @mgr: mgr platform device needed for region
299  * @br: br platform device needed for region
300  * @port_id: port id
301  *
302  * Return: fme region if successful, and error code otherwise.
303  */
304 static struct dfl_fme_region *
305 dfl_fme_create_region(struct dfl_feature_platform_data *pdata,
306 		      struct platform_device *mgr,
307 		      struct platform_device *br, int port_id)
308 {
309 	struct dfl_fme_region_pdata region_pdata;
310 	struct device *dev = &pdata->dev->dev;
311 	struct dfl_fme_region *fme_region;
312 	int ret = -ENOMEM;
313 
314 	fme_region = devm_kzalloc(dev, sizeof(*fme_region), GFP_KERNEL);
315 	if (!fme_region)
316 		return ERR_PTR(ret);
317 
318 	region_pdata.mgr = mgr;
319 	region_pdata.br = br;
320 
321 	/*
322 	 * Each FPGA device may have more than one port, so allocate platform
323 	 * device using the same port platform device id.
324 	 */
325 	fme_region->region = platform_device_alloc(DFL_FPGA_FME_REGION, br->id);
326 	if (!fme_region->region)
327 		return ERR_PTR(ret);
328 
329 	fme_region->region->dev.parent = dev;
330 
331 	ret = platform_device_add_data(fme_region->region, &region_pdata,
332 				       sizeof(region_pdata));
333 	if (ret)
334 		goto create_region_err;
335 
336 	ret = platform_device_add(fme_region->region);
337 	if (ret)
338 		goto create_region_err;
339 
340 	fme_region->port_id = port_id;
341 
342 	return fme_region;
343 
344 create_region_err:
345 	platform_device_put(fme_region->region);
346 	return ERR_PTR(ret);
347 }
348 
349 /**
350  * dfl_fme_destroy_region - destroy fme region
351  * @fme_region: fme region to destroy
352  */
353 static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)
354 {
355 	platform_device_unregister(fme_region->region);
356 }
357 
358 /**
359  * dfl_fme_destroy_regions - destroy all fme regions
360  * @pdata: fme platform device's pdata
361  */
362 static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
363 {
364 	struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
365 	struct dfl_fme_region *fme_region, *tmp;
366 
367 	list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
368 		list_del(&fme_region->node);
369 		dfl_fme_destroy_region(fme_region);
370 	}
371 }
372 
373 static int pr_mgmt_init(struct platform_device *pdev,
374 			struct dfl_feature *feature)
375 {
376 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
377 	struct dfl_fme_region *fme_region;
378 	struct dfl_fme_bridge *fme_br;
379 	struct platform_device *mgr;
380 	struct dfl_fme *priv;
381 	void __iomem *fme_hdr;
382 	int ret = -ENODEV, i = 0;
383 	u64 fme_cap, port_offset;
384 
385 	fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
386 					       FME_FEATURE_ID_HEADER);
387 
388 	mutex_lock(&pdata->lock);
389 	priv = dfl_fpga_pdata_get_private(pdata);
390 
391 	/* Initialize the region and bridge sub device list */
392 	INIT_LIST_HEAD(&priv->region_list);
393 	INIT_LIST_HEAD(&priv->bridge_list);
394 
395 	/* Create fpga mgr platform device */
396 	mgr = dfl_fme_create_mgr(pdata, feature);
397 	if (IS_ERR(mgr)) {
398 		dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
399 		goto unlock;
400 	}
401 
402 	priv->mgr = mgr;
403 
404 	/* Read capability register to check number of regions and bridges */
405 	fme_cap = readq(fme_hdr + FME_HDR_CAP);
406 	for (; i < FIELD_GET(FME_CAP_NUM_PORTS, fme_cap); i++) {
407 		port_offset = readq(fme_hdr + FME_HDR_PORT_OFST(i));
408 		if (!(port_offset & FME_PORT_OFST_IMP))
409 			continue;
410 
411 		/* Create bridge for each port */
412 		fme_br = dfl_fme_create_bridge(pdata, i);
413 		if (IS_ERR(fme_br)) {
414 			ret = PTR_ERR(fme_br);
415 			goto destroy_region;
416 		}
417 
418 		list_add(&fme_br->node, &priv->bridge_list);
419 
420 		/* Create region for each port */
421 		fme_region = dfl_fme_create_region(pdata, mgr,
422 						   fme_br->br, i);
423 		if (IS_ERR(fme_region)) {
424 			ret = PTR_ERR(fme_region);
425 			goto destroy_region;
426 		}
427 
428 		list_add(&fme_region->node, &priv->region_list);
429 	}
430 	mutex_unlock(&pdata->lock);
431 
432 	return 0;
433 
434 destroy_region:
435 	dfl_fme_destroy_regions(pdata);
436 	dfl_fme_destroy_bridges(pdata);
437 	dfl_fme_destroy_mgr(pdata);
438 unlock:
439 	mutex_unlock(&pdata->lock);
440 	return ret;
441 }
442 
443 static void pr_mgmt_uinit(struct platform_device *pdev,
444 			  struct dfl_feature *feature)
445 {
446 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
447 	struct dfl_fme *priv;
448 
449 	mutex_lock(&pdata->lock);
450 	priv = dfl_fpga_pdata_get_private(pdata);
451 
452 	dfl_fme_destroy_regions(pdata);
453 	dfl_fme_destroy_bridges(pdata);
454 	dfl_fme_destroy_mgr(pdata);
455 	mutex_unlock(&pdata->lock);
456 }
457 
458 static long fme_pr_ioctl(struct platform_device *pdev,
459 			 struct dfl_feature *feature,
460 			 unsigned int cmd, unsigned long arg)
461 {
462 	long ret;
463 
464 	switch (cmd) {
465 	case DFL_FPGA_FME_PORT_PR:
466 		ret = fme_pr(pdev, arg);
467 		break;
468 	default:
469 		ret = -ENODEV;
470 	}
471 
472 	return ret;
473 }
474 
475 const struct dfl_feature_ops pr_mgmt_ops = {
476 	.init = pr_mgmt_init,
477 	.uinit = pr_mgmt_uinit,
478 	.ioctl = fme_pr_ioctl,
479 };
480