xref: /linux/drivers/fpga/dfl-fme-pr.c (revision 13845bdc869f136f92ad3d40ea09b867bb4ce467)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for FPGA Management Engine (FME) Partial Reconfiguration
4  *
5  * Copyright (C) 2017-2018 Intel Corporation, Inc.
6  *
7  * Authors:
8  *   Kang Luwei <luwei.kang@intel.com>
9  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
10  *   Wu Hao <hao.wu@intel.com>
11  *   Joseph Grecco <joe.grecco@intel.com>
12  *   Enno Luebbers <enno.luebbers@intel.com>
13  *   Tim Whisonant <tim.whisonant@intel.com>
14  *   Ananda Ravuri <ananda.ravuri@intel.com>
15  *   Christopher Rauer <christopher.rauer@intel.com>
16  *   Henry Mitchel <henry.mitchel@intel.com>
17  */
18 
19 #include <linux/types.h>
20 #include <linux/device.h>
21 #include <linux/vmalloc.h>
22 #include <linux/uaccess.h>
23 #include <linux/fpga/fpga-mgr.h>
24 #include <linux/fpga/fpga-bridge.h>
25 #include <linux/fpga/fpga-region.h>
26 #include <linux/fpga-dfl.h>
27 
28 #include "dfl.h"
29 #include "dfl-fme.h"
30 #include "dfl-fme-pr.h"
31 
32 static struct dfl_fme_region *
33 dfl_fme_region_find_by_port_id(struct dfl_fme *fme, int port_id)
34 {
35 	struct dfl_fme_region *fme_region;
36 
37 	list_for_each_entry(fme_region, &fme->region_list, node)
38 		if (fme_region->port_id == port_id)
39 			return fme_region;
40 
41 	return NULL;
42 }
43 
44 static int dfl_fme_region_match(struct device *dev, const void *data)
45 {
46 	return dev->parent == data;
47 }
48 
49 static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)
50 {
51 	struct dfl_fme_region *fme_region;
52 	struct fpga_region *region;
53 
54 	fme_region = dfl_fme_region_find_by_port_id(fme, port_id);
55 	if (!fme_region)
56 		return NULL;
57 
58 	region = fpga_region_class_find(NULL, &fme_region->region->dev,
59 					dfl_fme_region_match);
60 	if (!region)
61 		return NULL;
62 
63 	return region;
64 }
65 
66 static int fme_pr(struct platform_device *pdev, unsigned long arg)
67 {
68 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
69 	void __user *argp = (void __user *)arg;
70 	struct dfl_fpga_fme_port_pr port_pr;
71 	struct fpga_image_info *info;
72 	struct fpga_region *region;
73 	void __iomem *fme_hdr;
74 	struct dfl_fme *fme;
75 	unsigned long minsz;
76 	void *buf = NULL;
77 	size_t length;
78 	int ret = 0;
79 	u64 v;
80 
81 	minsz = offsetofend(struct dfl_fpga_fme_port_pr, buffer_address);
82 
83 	if (copy_from_user(&port_pr, argp, minsz))
84 		return -EFAULT;
85 
86 	if (port_pr.argsz < minsz || port_pr.flags)
87 		return -EINVAL;
88 
89 	/* get fme header region */
90 	fme_hdr = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
91 
92 	/* check port id */
93 	v = readq(fme_hdr + FME_HDR_CAP);
94 	if (port_pr.port_id >= FIELD_GET(FME_CAP_NUM_PORTS, v)) {
95 		dev_dbg(&pdev->dev, "port number more than maximum\n");
96 		return -EINVAL;
97 	}
98 
99 	/*
100 	 * align PR buffer per PR bandwidth, as HW ignores the extra padding
101 	 * data automatically.
102 	 */
103 	length = ALIGN(port_pr.buffer_size, 4);
104 
105 	buf = vmalloc(length);
106 	if (!buf)
107 		return -ENOMEM;
108 
109 	if (copy_from_user(buf,
110 			   (void __user *)(unsigned long)port_pr.buffer_address,
111 			   port_pr.buffer_size)) {
112 		ret = -EFAULT;
113 		goto free_exit;
114 	}
115 
116 	/* prepare fpga_image_info for PR */
117 	info = fpga_image_info_alloc(&pdev->dev);
118 	if (!info) {
119 		ret = -ENOMEM;
120 		goto free_exit;
121 	}
122 
123 	info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
124 
125 	mutex_lock(&fdata->lock);
126 	fme = dfl_fpga_fdata_get_private(fdata);
127 	/* fme device has been unregistered. */
128 	if (!fme) {
129 		ret = -EINVAL;
130 		goto unlock_exit;
131 	}
132 
133 	region = dfl_fme_region_find(fme, port_pr.port_id);
134 	if (!region) {
135 		ret = -EINVAL;
136 		goto unlock_exit;
137 	}
138 
139 	fpga_image_info_free(region->info);
140 
141 	info->buf = buf;
142 	info->count = length;
143 	info->region_id = port_pr.port_id;
144 	region->info = info;
145 
146 	ret = fpga_region_program_fpga(region);
147 
148 	/*
149 	 * it allows userspace to reset the PR region's logic by disabling and
150 	 * reenabling the bridge to clear things out between acceleration runs.
151 	 * so no need to hold the bridges after partial reconfiguration.
152 	 */
153 	if (region->get_bridges)
154 		fpga_bridges_put(&region->bridge_list);
155 
156 	put_device(&region->dev);
157 unlock_exit:
158 	mutex_unlock(&fdata->lock);
159 free_exit:
160 	vfree(buf);
161 	return ret;
162 }
163 
164 /**
165  * dfl_fme_create_mgr - create fpga mgr platform device as child device
166  * @fdata: fme feature dev data
167  * @feature: sub feature info
168  *
169  * Return: mgr platform device if successful, and error code otherwise.
170  */
171 static struct platform_device *
172 dfl_fme_create_mgr(struct dfl_feature_dev_data *fdata,
173 		   struct dfl_feature *feature)
174 {
175 	struct platform_device *mgr, *fme = fdata->dev;
176 	struct dfl_fme_mgr_pdata mgr_pdata;
177 	int ret = -ENOMEM;
178 
179 	if (!feature->ioaddr)
180 		return ERR_PTR(-ENODEV);
181 
182 	mgr_pdata.ioaddr = feature->ioaddr;
183 
184 	/*
185 	 * Each FME has only one fpga-mgr, so allocate platform device using
186 	 * the same FME platform device id.
187 	 */
188 	mgr = platform_device_alloc(DFL_FPGA_FME_MGR, fme->id);
189 	if (!mgr)
190 		return ERR_PTR(ret);
191 
192 	mgr->dev.parent = &fme->dev;
193 
194 	ret = platform_device_add_data(mgr, &mgr_pdata, sizeof(mgr_pdata));
195 	if (ret)
196 		goto create_mgr_err;
197 
198 	ret = platform_device_add(mgr);
199 	if (ret)
200 		goto create_mgr_err;
201 
202 	return mgr;
203 
204 create_mgr_err:
205 	platform_device_put(mgr);
206 	return ERR_PTR(ret);
207 }
208 
209 /**
210  * dfl_fme_destroy_mgr - destroy fpga mgr platform device
211  * @fdata: fme feature dev data
212  */
213 static void dfl_fme_destroy_mgr(struct dfl_feature_dev_data *fdata)
214 {
215 	struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
216 
217 	platform_device_unregister(priv->mgr);
218 }
219 
220 /**
221  * dfl_fme_create_bridge - create fme fpga bridge platform device as child
222  *
223  * @fdata: fme feature dev data
224  * @port_id: port id for the bridge to be created.
225  *
226  * Return: bridge platform device if successful, and error code otherwise.
227  */
228 static struct dfl_fme_bridge *
229 dfl_fme_create_bridge(struct dfl_feature_dev_data *fdata, int port_id)
230 {
231 	struct device *dev = &fdata->dev->dev;
232 	struct dfl_fme_br_pdata br_pdata;
233 	struct dfl_fme_bridge *fme_br;
234 	int ret = -ENOMEM;
235 
236 	fme_br = devm_kzalloc(dev, sizeof(*fme_br), GFP_KERNEL);
237 	if (!fme_br)
238 		return ERR_PTR(ret);
239 
240 	br_pdata.cdev = fdata->dfl_cdev;
241 	br_pdata.port_id = port_id;
242 
243 	fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
244 					   PLATFORM_DEVID_AUTO);
245 	if (!fme_br->br)
246 		return ERR_PTR(ret);
247 
248 	fme_br->br->dev.parent = dev;
249 
250 	ret = platform_device_add_data(fme_br->br, &br_pdata, sizeof(br_pdata));
251 	if (ret)
252 		goto create_br_err;
253 
254 	ret = platform_device_add(fme_br->br);
255 	if (ret)
256 		goto create_br_err;
257 
258 	return fme_br;
259 
260 create_br_err:
261 	platform_device_put(fme_br->br);
262 	return ERR_PTR(ret);
263 }
264 
265 /**
266  * dfl_fme_destroy_bridge - destroy fpga bridge platform device
267  * @fme_br: fme bridge to destroy
268  */
269 static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
270 {
271 	platform_device_unregister(fme_br->br);
272 }
273 
274 /**
275  * dfl_fme_destroy_bridges - destroy all fpga bridge platform device
276  * @fdata: fme feature dev data
277  */
278 static void dfl_fme_destroy_bridges(struct dfl_feature_dev_data *fdata)
279 {
280 	struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
281 	struct dfl_fme_bridge *fbridge, *tmp;
282 
283 	list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
284 		list_del(&fbridge->node);
285 		dfl_fme_destroy_bridge(fbridge);
286 	}
287 }
288 
289 /**
290  * dfl_fme_create_region - create fpga region platform device as child
291  *
292  * @fdata: fme feature dev data
293  * @mgr: mgr platform device needed for region
294  * @br: br platform device needed for region
295  * @port_id: port id
296  *
297  * Return: fme region if successful, and error code otherwise.
298  */
299 static struct dfl_fme_region *
300 dfl_fme_create_region(struct dfl_feature_dev_data *fdata,
301 		      struct platform_device *mgr,
302 		      struct platform_device *br, int port_id)
303 {
304 	struct dfl_fme_region_pdata region_pdata;
305 	struct device *dev = &fdata->dev->dev;
306 	struct dfl_fme_region *fme_region;
307 	int ret = -ENOMEM;
308 
309 	fme_region = devm_kzalloc(dev, sizeof(*fme_region), GFP_KERNEL);
310 	if (!fme_region)
311 		return ERR_PTR(ret);
312 
313 	region_pdata.mgr = mgr;
314 	region_pdata.br = br;
315 
316 	/*
317 	 * Each FPGA device may have more than one port, so allocate platform
318 	 * device using the same port platform device id.
319 	 */
320 	fme_region->region = platform_device_alloc(DFL_FPGA_FME_REGION, br->id);
321 	if (!fme_region->region)
322 		return ERR_PTR(ret);
323 
324 	fme_region->region->dev.parent = dev;
325 
326 	ret = platform_device_add_data(fme_region->region, &region_pdata,
327 				       sizeof(region_pdata));
328 	if (ret)
329 		goto create_region_err;
330 
331 	ret = platform_device_add(fme_region->region);
332 	if (ret)
333 		goto create_region_err;
334 
335 	fme_region->port_id = port_id;
336 
337 	return fme_region;
338 
339 create_region_err:
340 	platform_device_put(fme_region->region);
341 	return ERR_PTR(ret);
342 }
343 
344 /**
345  * dfl_fme_destroy_region - destroy fme region
346  * @fme_region: fme region to destroy
347  */
348 static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)
349 {
350 	platform_device_unregister(fme_region->region);
351 }
352 
353 /**
354  * dfl_fme_destroy_regions - destroy all fme regions
355  * @fdata: fme feature dev data
356  */
357 static void dfl_fme_destroy_regions(struct dfl_feature_dev_data *fdata)
358 {
359 	struct dfl_fme *priv = dfl_fpga_fdata_get_private(fdata);
360 	struct dfl_fme_region *fme_region, *tmp;
361 
362 	list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
363 		list_del(&fme_region->node);
364 		dfl_fme_destroy_region(fme_region);
365 	}
366 }
367 
368 static int pr_mgmt_init(struct platform_device *pdev,
369 			struct dfl_feature *feature)
370 {
371 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
372 	struct dfl_fme_region *fme_region;
373 	struct dfl_fme_bridge *fme_br;
374 	struct platform_device *mgr;
375 	struct dfl_fme *priv;
376 	void __iomem *fme_hdr;
377 	int ret = -ENODEV, i = 0;
378 	u64 fme_cap, port_offset;
379 
380 	fme_hdr = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
381 
382 	mutex_lock(&fdata->lock);
383 	priv = dfl_fpga_fdata_get_private(fdata);
384 
385 	/* Initialize the region and bridge sub device list */
386 	INIT_LIST_HEAD(&priv->region_list);
387 	INIT_LIST_HEAD(&priv->bridge_list);
388 
389 	/* Create fpga mgr platform device */
390 	mgr = dfl_fme_create_mgr(fdata, feature);
391 	if (IS_ERR(mgr)) {
392 		dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
393 		goto unlock;
394 	}
395 
396 	priv->mgr = mgr;
397 
398 	/* Read capability register to check number of regions and bridges */
399 	fme_cap = readq(fme_hdr + FME_HDR_CAP);
400 	for (; i < FIELD_GET(FME_CAP_NUM_PORTS, fme_cap); i++) {
401 		port_offset = readq(fme_hdr + FME_HDR_PORT_OFST(i));
402 		if (!(port_offset & FME_PORT_OFST_IMP))
403 			continue;
404 
405 		/* Create bridge for each port */
406 		fme_br = dfl_fme_create_bridge(fdata, i);
407 		if (IS_ERR(fme_br)) {
408 			ret = PTR_ERR(fme_br);
409 			goto destroy_region;
410 		}
411 
412 		list_add(&fme_br->node, &priv->bridge_list);
413 
414 		/* Create region for each port */
415 		fme_region = dfl_fme_create_region(fdata, mgr,
416 						   fme_br->br, i);
417 		if (IS_ERR(fme_region)) {
418 			ret = PTR_ERR(fme_region);
419 			goto destroy_region;
420 		}
421 
422 		list_add(&fme_region->node, &priv->region_list);
423 	}
424 	mutex_unlock(&fdata->lock);
425 
426 	return 0;
427 
428 destroy_region:
429 	dfl_fme_destroy_regions(fdata);
430 	dfl_fme_destroy_bridges(fdata);
431 	dfl_fme_destroy_mgr(fdata);
432 unlock:
433 	mutex_unlock(&fdata->lock);
434 	return ret;
435 }
436 
437 static void pr_mgmt_uinit(struct platform_device *pdev,
438 			  struct dfl_feature *feature)
439 {
440 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
441 
442 	mutex_lock(&fdata->lock);
443 
444 	dfl_fme_destroy_regions(fdata);
445 	dfl_fme_destroy_bridges(fdata);
446 	dfl_fme_destroy_mgr(fdata);
447 	mutex_unlock(&fdata->lock);
448 }
449 
450 static long fme_pr_ioctl(struct platform_device *pdev,
451 			 struct dfl_feature *feature,
452 			 unsigned int cmd, unsigned long arg)
453 {
454 	long ret;
455 
456 	switch (cmd) {
457 	case DFL_FPGA_FME_PORT_PR:
458 		ret = fme_pr(pdev, arg);
459 		break;
460 	default:
461 		ret = -ENODEV;
462 	}
463 
464 	return ret;
465 }
466 
467 const struct dfl_feature_id fme_pr_mgmt_id_table[] = {
468 	{.id = FME_FEATURE_ID_PR_MGMT,},
469 	{0}
470 };
471 
472 const struct dfl_feature_ops fme_pr_mgmt_ops = {
473 	.init = pr_mgmt_init,
474 	.uinit = pr_mgmt_uinit,
475 	.ioctl = fme_pr_ioctl,
476 };
477