1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Driver for FPGA Management Engine (FME) Partial Reconfiguration 4 * 5 * Copyright (C) 2017-2018 Intel Corporation, Inc. 6 * 7 * Authors: 8 * Kang Luwei <luwei.kang@intel.com> 9 * Xiao Guangrong <guangrong.xiao@linux.intel.com> 10 * Wu Hao <hao.wu@intel.com> 11 * Joseph Grecco <joe.grecco@intel.com> 12 * Enno Luebbers <enno.luebbers@intel.com> 13 * Tim Whisonant <tim.whisonant@intel.com> 14 * Ananda Ravuri <ananda.ravuri@intel.com> 15 * Christopher Rauer <christopher.rauer@intel.com> 16 * Henry Mitchel <henry.mitchel@intel.com> 17 */ 18 19 #include <linux/types.h> 20 #include <linux/device.h> 21 #include <linux/vmalloc.h> 22 #include <linux/uaccess.h> 23 #include <linux/fpga/fpga-mgr.h> 24 #include <linux/fpga/fpga-bridge.h> 25 #include <linux/fpga/fpga-region.h> 26 #include <linux/fpga-dfl.h> 27 28 #include "dfl.h" 29 #include "dfl-fme.h" 30 #include "dfl-fme-pr.h" 31 32 static struct dfl_fme_region * 33 dfl_fme_region_find_by_port_id(struct dfl_fme *fme, int port_id) 34 { 35 struct dfl_fme_region *fme_region; 36 37 list_for_each_entry(fme_region, &fme->region_list, node) 38 if (fme_region->port_id == port_id) 39 return fme_region; 40 41 return NULL; 42 } 43 44 static int dfl_fme_region_match(struct device *dev, const void *data) 45 { 46 return dev->parent == data; 47 } 48 49 static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id) 50 { 51 struct dfl_fme_region *fme_region; 52 struct fpga_region *region; 53 54 fme_region = dfl_fme_region_find_by_port_id(fme, port_id); 55 if (!fme_region) 56 return NULL; 57 58 region = fpga_region_class_find(NULL, &fme_region->region->dev, 59 dfl_fme_region_match); 60 if (!region) 61 return NULL; 62 63 return region; 64 } 65 66 static int fme_pr(struct platform_device *pdev, unsigned long arg) 67 { 68 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); 69 void __user *argp = (void __user *)arg; 70 struct dfl_fpga_fme_port_pr port_pr; 71 struct fpga_image_info *info; 72 struct fpga_region *region; 73 void __iomem *fme_hdr; 74 struct dfl_fme *fme; 75 unsigned long minsz; 76 void *buf = NULL; 77 size_t length; 78 int ret = 0; 79 u64 v; 80 81 minsz = offsetofend(struct dfl_fpga_fme_port_pr, buffer_address); 82 83 if (copy_from_user(&port_pr, argp, minsz)) 84 return -EFAULT; 85 86 if (port_pr.argsz < minsz || port_pr.flags) 87 return -EINVAL; 88 89 /* get fme header region */ 90 fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev, 91 FME_FEATURE_ID_HEADER); 92 93 /* check port id */ 94 v = readq(fme_hdr + FME_HDR_CAP); 95 if (port_pr.port_id >= FIELD_GET(FME_CAP_NUM_PORTS, v)) { 96 dev_dbg(&pdev->dev, "port number more than maximum\n"); 97 return -EINVAL; 98 } 99 100 if (!access_ok((void __user *)(unsigned long)port_pr.buffer_address, 101 port_pr.buffer_size)) 102 return -EFAULT; 103 104 /* 105 * align PR buffer per PR bandwidth, as HW ignores the extra padding 106 * data automatically. 107 */ 108 length = ALIGN(port_pr.buffer_size, 4); 109 110 buf = vmalloc(length); 111 if (!buf) 112 return -ENOMEM; 113 114 if (copy_from_user(buf, 115 (void __user *)(unsigned long)port_pr.buffer_address, 116 port_pr.buffer_size)) { 117 ret = -EFAULT; 118 goto free_exit; 119 } 120 121 /* prepare fpga_image_info for PR */ 122 info = fpga_image_info_alloc(&pdev->dev); 123 if (!info) { 124 ret = -ENOMEM; 125 goto free_exit; 126 } 127 128 info->flags |= FPGA_MGR_PARTIAL_RECONFIG; 129 130 mutex_lock(&pdata->lock); 131 fme = dfl_fpga_pdata_get_private(pdata); 132 /* fme device has been unregistered. */ 133 if (!fme) { 134 ret = -EINVAL; 135 goto unlock_exit; 136 } 137 138 region = dfl_fme_region_find(fme, port_pr.port_id); 139 if (!region) { 140 ret = -EINVAL; 141 goto unlock_exit; 142 } 143 144 fpga_image_info_free(region->info); 145 146 info->buf = buf; 147 info->count = length; 148 info->region_id = port_pr.port_id; 149 region->info = info; 150 151 ret = fpga_region_program_fpga(region); 152 153 /* 154 * it allows userspace to reset the PR region's logic by disabling and 155 * reenabling the bridge to clear things out between accleration runs. 156 * so no need to hold the bridges after partial reconfiguration. 157 */ 158 if (region->get_bridges) 159 fpga_bridges_put(®ion->bridge_list); 160 161 put_device(®ion->dev); 162 unlock_exit: 163 mutex_unlock(&pdata->lock); 164 free_exit: 165 vfree(buf); 166 return ret; 167 } 168 169 /** 170 * dfl_fme_create_mgr - create fpga mgr platform device as child device 171 * 172 * @pdata: fme platform_device's pdata 173 * 174 * Return: mgr platform device if successful, and error code otherwise. 175 */ 176 static struct platform_device * 177 dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata, 178 struct dfl_feature *feature) 179 { 180 struct platform_device *mgr, *fme = pdata->dev; 181 struct dfl_fme_mgr_pdata mgr_pdata; 182 int ret = -ENOMEM; 183 184 if (!feature->ioaddr) 185 return ERR_PTR(-ENODEV); 186 187 mgr_pdata.ioaddr = feature->ioaddr; 188 189 /* 190 * Each FME has only one fpga-mgr, so allocate platform device using 191 * the same FME platform device id. 192 */ 193 mgr = platform_device_alloc(DFL_FPGA_FME_MGR, fme->id); 194 if (!mgr) 195 return ERR_PTR(ret); 196 197 mgr->dev.parent = &fme->dev; 198 199 ret = platform_device_add_data(mgr, &mgr_pdata, sizeof(mgr_pdata)); 200 if (ret) 201 goto create_mgr_err; 202 203 ret = platform_device_add(mgr); 204 if (ret) 205 goto create_mgr_err; 206 207 return mgr; 208 209 create_mgr_err: 210 platform_device_put(mgr); 211 return ERR_PTR(ret); 212 } 213 214 /** 215 * dfl_fme_destroy_mgr - destroy fpga mgr platform device 216 * @pdata: fme platform device's pdata 217 */ 218 static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata) 219 { 220 struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata); 221 222 platform_device_unregister(priv->mgr); 223 } 224 225 /** 226 * dfl_fme_create_bridge - create fme fpga bridge platform device as child 227 * 228 * @pdata: fme platform device's pdata 229 * @port_id: port id for the bridge to be created. 230 * 231 * Return: bridge platform device if successful, and error code otherwise. 232 */ 233 static struct dfl_fme_bridge * 234 dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id) 235 { 236 struct device *dev = &pdata->dev->dev; 237 struct dfl_fme_br_pdata br_pdata; 238 struct dfl_fme_bridge *fme_br; 239 int ret = -ENOMEM; 240 241 fme_br = devm_kzalloc(dev, sizeof(*fme_br), GFP_KERNEL); 242 if (!fme_br) 243 return ERR_PTR(ret); 244 245 br_pdata.cdev = pdata->dfl_cdev; 246 br_pdata.port_id = port_id; 247 248 fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE, 249 PLATFORM_DEVID_AUTO); 250 if (!fme_br->br) 251 return ERR_PTR(ret); 252 253 fme_br->br->dev.parent = dev; 254 255 ret = platform_device_add_data(fme_br->br, &br_pdata, sizeof(br_pdata)); 256 if (ret) 257 goto create_br_err; 258 259 ret = platform_device_add(fme_br->br); 260 if (ret) 261 goto create_br_err; 262 263 return fme_br; 264 265 create_br_err: 266 platform_device_put(fme_br->br); 267 return ERR_PTR(ret); 268 } 269 270 /** 271 * dfl_fme_destroy_bridge - destroy fpga bridge platform device 272 * @fme_br: fme bridge to destroy 273 */ 274 static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br) 275 { 276 platform_device_unregister(fme_br->br); 277 } 278 279 /** 280 * dfl_fme_destroy_bridge - destroy all fpga bridge platform device 281 * @pdata: fme platform device's pdata 282 */ 283 static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata) 284 { 285 struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata); 286 struct dfl_fme_bridge *fbridge, *tmp; 287 288 list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) { 289 list_del(&fbridge->node); 290 dfl_fme_destroy_bridge(fbridge); 291 } 292 } 293 294 /** 295 * dfl_fme_create_region - create fpga region platform device as child 296 * 297 * @pdata: fme platform device's pdata 298 * @mgr: mgr platform device needed for region 299 * @br: br platform device needed for region 300 * @port_id: port id 301 * 302 * Return: fme region if successful, and error code otherwise. 303 */ 304 static struct dfl_fme_region * 305 dfl_fme_create_region(struct dfl_feature_platform_data *pdata, 306 struct platform_device *mgr, 307 struct platform_device *br, int port_id) 308 { 309 struct dfl_fme_region_pdata region_pdata; 310 struct device *dev = &pdata->dev->dev; 311 struct dfl_fme_region *fme_region; 312 int ret = -ENOMEM; 313 314 fme_region = devm_kzalloc(dev, sizeof(*fme_region), GFP_KERNEL); 315 if (!fme_region) 316 return ERR_PTR(ret); 317 318 region_pdata.mgr = mgr; 319 region_pdata.br = br; 320 321 /* 322 * Each FPGA device may have more than one port, so allocate platform 323 * device using the same port platform device id. 324 */ 325 fme_region->region = platform_device_alloc(DFL_FPGA_FME_REGION, br->id); 326 if (!fme_region->region) 327 return ERR_PTR(ret); 328 329 fme_region->region->dev.parent = dev; 330 331 ret = platform_device_add_data(fme_region->region, ®ion_pdata, 332 sizeof(region_pdata)); 333 if (ret) 334 goto create_region_err; 335 336 ret = platform_device_add(fme_region->region); 337 if (ret) 338 goto create_region_err; 339 340 fme_region->port_id = port_id; 341 342 return fme_region; 343 344 create_region_err: 345 platform_device_put(fme_region->region); 346 return ERR_PTR(ret); 347 } 348 349 /** 350 * dfl_fme_destroy_region - destroy fme region 351 * @fme_region: fme region to destroy 352 */ 353 static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region) 354 { 355 platform_device_unregister(fme_region->region); 356 } 357 358 /** 359 * dfl_fme_destroy_regions - destroy all fme regions 360 * @pdata: fme platform device's pdata 361 */ 362 static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata) 363 { 364 struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata); 365 struct dfl_fme_region *fme_region, *tmp; 366 367 list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) { 368 list_del(&fme_region->node); 369 dfl_fme_destroy_region(fme_region); 370 } 371 } 372 373 static int pr_mgmt_init(struct platform_device *pdev, 374 struct dfl_feature *feature) 375 { 376 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); 377 struct dfl_fme_region *fme_region; 378 struct dfl_fme_bridge *fme_br; 379 struct platform_device *mgr; 380 struct dfl_fme *priv; 381 void __iomem *fme_hdr; 382 int ret = -ENODEV, i = 0; 383 u64 fme_cap, port_offset; 384 385 fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev, 386 FME_FEATURE_ID_HEADER); 387 388 mutex_lock(&pdata->lock); 389 priv = dfl_fpga_pdata_get_private(pdata); 390 391 /* Initialize the region and bridge sub device list */ 392 INIT_LIST_HEAD(&priv->region_list); 393 INIT_LIST_HEAD(&priv->bridge_list); 394 395 /* Create fpga mgr platform device */ 396 mgr = dfl_fme_create_mgr(pdata, feature); 397 if (IS_ERR(mgr)) { 398 dev_err(&pdev->dev, "fail to create fpga mgr pdev\n"); 399 goto unlock; 400 } 401 402 priv->mgr = mgr; 403 404 /* Read capability register to check number of regions and bridges */ 405 fme_cap = readq(fme_hdr + FME_HDR_CAP); 406 for (; i < FIELD_GET(FME_CAP_NUM_PORTS, fme_cap); i++) { 407 port_offset = readq(fme_hdr + FME_HDR_PORT_OFST(i)); 408 if (!(port_offset & FME_PORT_OFST_IMP)) 409 continue; 410 411 /* Create bridge for each port */ 412 fme_br = dfl_fme_create_bridge(pdata, i); 413 if (IS_ERR(fme_br)) { 414 ret = PTR_ERR(fme_br); 415 goto destroy_region; 416 } 417 418 list_add(&fme_br->node, &priv->bridge_list); 419 420 /* Create region for each port */ 421 fme_region = dfl_fme_create_region(pdata, mgr, 422 fme_br->br, i); 423 if (IS_ERR(fme_region)) { 424 ret = PTR_ERR(fme_region); 425 goto destroy_region; 426 } 427 428 list_add(&fme_region->node, &priv->region_list); 429 } 430 mutex_unlock(&pdata->lock); 431 432 return 0; 433 434 destroy_region: 435 dfl_fme_destroy_regions(pdata); 436 dfl_fme_destroy_bridges(pdata); 437 dfl_fme_destroy_mgr(pdata); 438 unlock: 439 mutex_unlock(&pdata->lock); 440 return ret; 441 } 442 443 static void pr_mgmt_uinit(struct platform_device *pdev, 444 struct dfl_feature *feature) 445 { 446 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); 447 448 mutex_lock(&pdata->lock); 449 450 dfl_fme_destroy_regions(pdata); 451 dfl_fme_destroy_bridges(pdata); 452 dfl_fme_destroy_mgr(pdata); 453 mutex_unlock(&pdata->lock); 454 } 455 456 static long fme_pr_ioctl(struct platform_device *pdev, 457 struct dfl_feature *feature, 458 unsigned int cmd, unsigned long arg) 459 { 460 long ret; 461 462 switch (cmd) { 463 case DFL_FPGA_FME_PORT_PR: 464 ret = fme_pr(pdev, arg); 465 break; 466 default: 467 ret = -ENODEV; 468 } 469 470 return ret; 471 } 472 473 const struct dfl_feature_ops pr_mgmt_ops = { 474 .init = pr_mgmt_init, 475 .uinit = pr_mgmt_uinit, 476 .ioctl = fme_pr_ioctl, 477 }; 478