1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * RapidIO mport character device 4 * 5 * Copyright 2014-2015 Integrated Device Technology, Inc. 6 * Alexandre Bounine <alexandre.bounine@idt.com> 7 * Copyright 2014-2015 Prodrive Technologies 8 * Andre van Herk <andre.van.herk@prodrive-technologies.com> 9 * Jerry Jacobs <jerry.jacobs@prodrive-technologies.com> 10 * Copyright (C) 2014 Texas Instruments Incorporated 11 * Aurelien Jacquiot <a-jacquiot@ti.com> 12 */ 13 #include <linux/module.h> 14 #include <linux/kernel.h> 15 #include <linux/cdev.h> 16 #include <linux/ioctl.h> 17 #include <linux/uaccess.h> 18 #include <linux/list.h> 19 #include <linux/fs.h> 20 #include <linux/err.h> 21 #include <linux/net.h> 22 #include <linux/poll.h> 23 #include <linux/spinlock.h> 24 #include <linux/sched.h> 25 #include <linux/kfifo.h> 26 27 #include <linux/mm.h> 28 #include <linux/slab.h> 29 #include <linux/vmalloc.h> 30 #include <linux/mman.h> 31 32 #include <linux/dma-mapping.h> 33 #ifdef CONFIG_RAPIDIO_DMA_ENGINE 34 #include <linux/dmaengine.h> 35 #endif 36 37 #include <linux/rio.h> 38 #include <linux/rio_ids.h> 39 #include <linux/rio_drv.h> 40 #include <linux/rio_mport_cdev.h> 41 42 #include "../rio.h" 43 44 #define DRV_NAME "rio_mport" 45 #define DRV_PREFIX DRV_NAME ": " 46 #define DEV_NAME "rio_mport" 47 #define DRV_VERSION "1.0.0" 48 49 /* Debug output filtering masks */ 50 enum { 51 DBG_NONE = 0, 52 DBG_INIT = BIT(0), /* driver init */ 53 DBG_EXIT = BIT(1), /* driver exit */ 54 DBG_MPORT = BIT(2), /* mport add/remove */ 55 DBG_RDEV = BIT(3), /* RapidIO device add/remove */ 56 DBG_DMA = BIT(4), /* DMA transfer messages */ 57 DBG_MMAP = BIT(5), /* mapping messages */ 58 DBG_IBW = BIT(6), /* inbound window */ 59 DBG_EVENT = BIT(7), /* event handling messages */ 60 DBG_OBW = BIT(8), /* outbound window messages */ 61 DBG_DBELL = BIT(9), /* doorbell messages */ 62 DBG_ALL = ~0, 63 }; 64 65 #ifdef DEBUG 66 #define rmcd_debug(level, fmt, arg...) \ 67 do { \ 68 if (DBG_##level & dbg_level) \ 69 pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \ 70 } while (0) 71 #else 72 #define rmcd_debug(level, fmt, arg...) \ 73 no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg) 74 #endif 75 76 #define rmcd_warn(fmt, arg...) \ 77 pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg) 78 79 #define rmcd_error(fmt, arg...) \ 80 pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg) 81 82 MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>"); 83 MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>"); 84 MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>"); 85 MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>"); 86 MODULE_DESCRIPTION("RapidIO mport character device driver"); 87 MODULE_LICENSE("GPL"); 88 MODULE_VERSION(DRV_VERSION); 89 90 static int dma_timeout = 3000; /* DMA transfer timeout in msec */ 91 module_param(dma_timeout, int, S_IRUGO); 92 MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)"); 93 94 #ifdef DEBUG 95 static u32 dbg_level = DBG_NONE; 96 module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO); 97 MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); 98 #endif 99 100 /* 101 * Internal memory mapping structure 102 */ 103 enum rio_mport_map_dir { 104 MAP_INBOUND, 105 MAP_OUTBOUND, 106 MAP_DMA, 107 }; 108 109 struct rio_mport_mapping { 110 struct list_head node; 111 struct mport_dev *md; 112 enum rio_mport_map_dir dir; 113 u16 rioid; 114 u64 rio_addr; 115 dma_addr_t phys_addr; /* for mmap */ 116 void *virt_addr; /* kernel address, for dma_free_coherent */ 117 u64 size; 118 struct kref ref; /* refcount of vmas sharing the mapping */ 119 struct file *filp; 120 }; 121 122 #define MPORT_EVENT_DEPTH 10 123 124 /* 125 * mport_dev driver-specific structure that represents mport device 126 * @active mport device status flag 127 * @node list node to maintain list of registered mports 128 * @cdev character device 129 * @dev associated device object 130 * @mport associated subsystem's master port device object 131 * @buf_mutex lock for buffer handling 132 * @file_mutex - lock for open files list 133 * @file_list - list of open files on given mport 134 * @properties properties of this mport 135 * @portwrites queue of inbound portwrites 136 * @pw_lock lock for port write queue 137 * @mappings queue for memory mappings 138 * @dma_chan DMA channels associated with this device 139 * @dma_ref: 140 * @comp: 141 */ 142 struct mport_dev { 143 atomic_t active; 144 struct list_head node; 145 struct cdev cdev; 146 struct device dev; 147 struct rio_mport *mport; 148 struct mutex buf_mutex; 149 struct mutex file_mutex; 150 struct list_head file_list; 151 struct rio_mport_properties properties; 152 struct list_head doorbells; 153 spinlock_t db_lock; 154 struct list_head portwrites; 155 spinlock_t pw_lock; 156 struct list_head mappings; 157 #ifdef CONFIG_RAPIDIO_DMA_ENGINE 158 struct dma_chan *dma_chan; 159 struct kref dma_ref; 160 struct completion comp; 161 #endif 162 }; 163 164 /* 165 * mport_cdev_priv - data structure specific to individual file object 166 * associated with an open device 167 * @md master port character device object 168 * @async_queue - asynchronous notification queue 169 * @list - file objects tracking list 170 * @db_filters inbound doorbell filters for this descriptor 171 * @pw_filters portwrite filters for this descriptor 172 * @event_fifo event fifo for this descriptor 173 * @event_rx_wait wait queue for this descriptor 174 * @fifo_lock lock for event_fifo 175 * @event_mask event mask for this descriptor 176 * @dmach DMA engine channel allocated for specific file object 177 */ 178 struct mport_cdev_priv { 179 struct mport_dev *md; 180 struct fasync_struct *async_queue; 181 struct list_head list; 182 struct list_head db_filters; 183 struct list_head pw_filters; 184 struct kfifo event_fifo; 185 wait_queue_head_t event_rx_wait; 186 spinlock_t fifo_lock; 187 u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ 188 #ifdef CONFIG_RAPIDIO_DMA_ENGINE 189 struct dma_chan *dmach; 190 struct list_head async_list; 191 spinlock_t req_lock; 192 struct mutex dma_lock; 193 struct kref dma_ref; 194 struct completion comp; 195 #endif 196 }; 197 198 /* 199 * rio_mport_pw_filter - structure to describe a portwrite filter 200 * md_node node in mport device's list 201 * priv_node node in private file object's list 202 * priv reference to private data 203 * filter actual portwrite filter 204 */ 205 struct rio_mport_pw_filter { 206 struct list_head md_node; 207 struct list_head priv_node; 208 struct mport_cdev_priv *priv; 209 struct rio_pw_filter filter; 210 }; 211 212 /* 213 * rio_mport_db_filter - structure to describe a doorbell filter 214 * @data_node reference to device node 215 * @priv_node node in private data 216 * @priv reference to private data 217 * @filter actual doorbell filter 218 */ 219 struct rio_mport_db_filter { 220 struct list_head data_node; 221 struct list_head priv_node; 222 struct mport_cdev_priv *priv; 223 struct rio_doorbell_filter filter; 224 }; 225 226 static LIST_HEAD(mport_devs); 227 static DEFINE_MUTEX(mport_devs_lock); 228 229 #if (0) /* used by commented out portion of poll function : FIXME */ 230 static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait); 231 #endif 232 233 static const struct class dev_class = { 234 .name = DRV_NAME, 235 }; 236 static dev_t dev_number; 237 238 static void mport_release_mapping(struct kref *ref); 239 240 static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg, 241 int local) 242 { 243 struct rio_mport *mport = priv->md->mport; 244 struct rio_mport_maint_io maint_io; 245 u32 *buffer; 246 u32 offset; 247 size_t length; 248 int ret, i; 249 250 if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io)))) 251 return -EFAULT; 252 253 if ((maint_io.offset % 4) || 254 (maint_io.length == 0) || (maint_io.length % 4) || 255 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) 256 return -EINVAL; 257 258 buffer = vmalloc(maint_io.length); 259 if (buffer == NULL) 260 return -ENOMEM; 261 length = maint_io.length/sizeof(u32); 262 offset = maint_io.offset; 263 264 for (i = 0; i < length; i++) { 265 if (local) 266 ret = __rio_local_read_config_32(mport, 267 offset, &buffer[i]); 268 else 269 ret = rio_mport_read_config_32(mport, maint_io.rioid, 270 maint_io.hopcount, offset, &buffer[i]); 271 if (ret) 272 goto out; 273 274 offset += 4; 275 } 276 277 if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer, 278 buffer, maint_io.length))) 279 ret = -EFAULT; 280 out: 281 vfree(buffer); 282 return ret; 283 } 284 285 static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg, 286 int local) 287 { 288 struct rio_mport *mport = priv->md->mport; 289 struct rio_mport_maint_io maint_io; 290 u32 *buffer; 291 u32 offset; 292 size_t length; 293 int ret = -EINVAL, i; 294 295 if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io)))) 296 return -EFAULT; 297 298 if ((maint_io.offset % 4) || 299 (maint_io.length == 0) || (maint_io.length % 4) || 300 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) 301 return -EINVAL; 302 303 buffer = vmalloc(maint_io.length); 304 if (buffer == NULL) 305 return -ENOMEM; 306 length = maint_io.length; 307 308 if (unlikely(copy_from_user(buffer, 309 (void __user *)(uintptr_t)maint_io.buffer, length))) { 310 ret = -EFAULT; 311 goto out; 312 } 313 314 offset = maint_io.offset; 315 length /= sizeof(u32); 316 317 for (i = 0; i < length; i++) { 318 if (local) 319 ret = __rio_local_write_config_32(mport, 320 offset, buffer[i]); 321 else 322 ret = rio_mport_write_config_32(mport, maint_io.rioid, 323 maint_io.hopcount, 324 offset, buffer[i]); 325 if (ret) 326 goto out; 327 328 offset += 4; 329 } 330 331 out: 332 vfree(buffer); 333 return ret; 334 } 335 336 337 /* 338 * Inbound/outbound memory mapping functions 339 */ 340 static int 341 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, 342 u16 rioid, u64 raddr, u32 size, 343 dma_addr_t *paddr) 344 { 345 struct rio_mport *mport = md->mport; 346 struct rio_mport_mapping *map; 347 int ret; 348 349 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size); 350 351 map = kzalloc_obj(*map); 352 if (map == NULL) 353 return -ENOMEM; 354 355 ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr); 356 if (ret < 0) 357 goto err_map_outb; 358 359 map->dir = MAP_OUTBOUND; 360 map->rioid = rioid; 361 map->rio_addr = raddr; 362 map->size = size; 363 map->phys_addr = *paddr; 364 map->filp = filp; 365 map->md = md; 366 kref_init(&map->ref); 367 list_add_tail(&map->node, &md->mappings); 368 return 0; 369 err_map_outb: 370 kfree(map); 371 return ret; 372 } 373 374 static int 375 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, 376 u16 rioid, u64 raddr, u32 size, 377 dma_addr_t *paddr) 378 { 379 struct rio_mport_mapping *map; 380 int err = -ENOMEM; 381 382 mutex_lock(&md->buf_mutex); 383 list_for_each_entry(map, &md->mappings, node) { 384 if (map->dir != MAP_OUTBOUND) 385 continue; 386 if (rioid == map->rioid && 387 raddr == map->rio_addr && size == map->size) { 388 *paddr = map->phys_addr; 389 err = 0; 390 break; 391 } else if (rioid == map->rioid && 392 raddr < (map->rio_addr + map->size - 1) && 393 (raddr + size) > map->rio_addr) { 394 err = -EBUSY; 395 break; 396 } 397 } 398 399 /* If not found, create new */ 400 if (err == -ENOMEM) 401 err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr, 402 size, paddr); 403 mutex_unlock(&md->buf_mutex); 404 return err; 405 } 406 407 static int rio_mport_obw_map(struct file *filp, void __user *arg) 408 { 409 struct mport_cdev_priv *priv = filp->private_data; 410 struct mport_dev *data = priv->md; 411 struct rio_mmap map; 412 dma_addr_t paddr; 413 int ret; 414 415 if (unlikely(copy_from_user(&map, arg, sizeof(map)))) 416 return -EFAULT; 417 418 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx", 419 map.rioid, map.rio_addr, map.length); 420 421 ret = rio_mport_get_outbound_mapping(data, filp, map.rioid, 422 map.rio_addr, map.length, &paddr); 423 if (ret < 0) { 424 rmcd_error("Failed to set OBW err= %d", ret); 425 return ret; 426 } 427 428 map.handle = paddr; 429 430 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) 431 return -EFAULT; 432 return 0; 433 } 434 435 /* 436 * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space 437 * 438 * @priv: driver private data 439 * @arg: buffer handle returned by allocation routine 440 */ 441 static int rio_mport_obw_free(struct file *filp, void __user *arg) 442 { 443 struct mport_cdev_priv *priv = filp->private_data; 444 struct mport_dev *md = priv->md; 445 u64 handle; 446 struct rio_mport_mapping *map, *_map; 447 448 if (!md->mport->ops->unmap_outb) 449 return -EPROTONOSUPPORT; 450 451 if (copy_from_user(&handle, arg, sizeof(handle))) 452 return -EFAULT; 453 454 rmcd_debug(OBW, "h=0x%llx", handle); 455 456 mutex_lock(&md->buf_mutex); 457 list_for_each_entry_safe(map, _map, &md->mappings, node) { 458 if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) { 459 if (map->filp == filp) { 460 rmcd_debug(OBW, "kref_put h=0x%llx", handle); 461 map->filp = NULL; 462 kref_put(&map->ref, mport_release_mapping); 463 } 464 break; 465 } 466 } 467 mutex_unlock(&md->buf_mutex); 468 469 return 0; 470 } 471 472 /* 473 * maint_hdid_set() - Set the host Device ID 474 * @priv: driver private data 475 * @arg: Device Id 476 */ 477 static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) 478 { 479 struct mport_dev *md = priv->md; 480 u16 hdid; 481 482 if (copy_from_user(&hdid, arg, sizeof(hdid))) 483 return -EFAULT; 484 485 md->mport->host_deviceid = hdid; 486 md->properties.hdid = hdid; 487 rio_local_set_device_id(md->mport, hdid); 488 489 rmcd_debug(MPORT, "Set host device Id to %d", hdid); 490 491 return 0; 492 } 493 494 /* 495 * maint_comptag_set() - Set the host Component Tag 496 * @priv: driver private data 497 * @arg: Component Tag 498 */ 499 static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) 500 { 501 struct mport_dev *md = priv->md; 502 u32 comptag; 503 504 if (copy_from_user(&comptag, arg, sizeof(comptag))) 505 return -EFAULT; 506 507 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); 508 509 rmcd_debug(MPORT, "Set host Component Tag to %d", comptag); 510 511 return 0; 512 } 513 514 #ifdef CONFIG_RAPIDIO_DMA_ENGINE 515 516 struct mport_dma_req { 517 struct kref refcount; 518 struct list_head node; 519 struct file *filp; 520 struct mport_cdev_priv *priv; 521 enum rio_transfer_sync sync; 522 struct sg_table sgt; 523 struct page **page_list; 524 unsigned int nr_pages; 525 struct rio_mport_mapping *map; 526 struct dma_chan *dmach; 527 enum dma_data_direction dir; 528 dma_cookie_t cookie; 529 enum dma_status status; 530 struct completion req_comp; 531 }; 532 533 static void mport_release_def_dma(struct kref *dma_ref) 534 { 535 struct mport_dev *md = 536 container_of(dma_ref, struct mport_dev, dma_ref); 537 538 rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id); 539 rio_release_dma(md->dma_chan); 540 md->dma_chan = NULL; 541 } 542 543 static void mport_release_dma(struct kref *dma_ref) 544 { 545 struct mport_cdev_priv *priv = 546 container_of(dma_ref, struct mport_cdev_priv, dma_ref); 547 548 rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id); 549 complete(&priv->comp); 550 } 551 552 static void dma_req_free(struct kref *ref) 553 { 554 struct mport_dma_req *req = container_of(ref, struct mport_dma_req, 555 refcount); 556 struct mport_cdev_priv *priv = req->priv; 557 558 dma_unmap_sg(req->dmach->device->dev, 559 req->sgt.sgl, req->sgt.nents, req->dir); 560 sg_free_table(&req->sgt); 561 if (req->page_list) { 562 unpin_user_pages(req->page_list, req->nr_pages); 563 kfree(req->page_list); 564 } 565 566 if (req->map) { 567 mutex_lock(&req->map->md->buf_mutex); 568 kref_put(&req->map->ref, mport_release_mapping); 569 mutex_unlock(&req->map->md->buf_mutex); 570 } 571 572 kref_put(&priv->dma_ref, mport_release_dma); 573 574 kfree(req); 575 } 576 577 static void dma_xfer_callback(void *param) 578 { 579 struct mport_dma_req *req = (struct mport_dma_req *)param; 580 struct mport_cdev_priv *priv = req->priv; 581 582 req->status = dma_async_is_tx_complete(priv->dmach, req->cookie, 583 NULL, NULL); 584 complete(&req->req_comp); 585 kref_put(&req->refcount, dma_req_free); 586 } 587 588 /* 589 * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA 590 * transfer object. 591 * Returns pointer to DMA transaction descriptor allocated by DMA driver on 592 * success or ERR_PTR (and/or NULL) if failed. Caller must check returned 593 * non-NULL pointer using IS_ERR macro. 594 */ 595 static struct dma_async_tx_descriptor 596 *prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer, 597 struct sg_table *sgt, int nents, enum dma_transfer_direction dir, 598 enum dma_ctrl_flags flags) 599 { 600 struct rio_dma_data tx_data; 601 602 tx_data.sg = sgt->sgl; 603 tx_data.sg_len = nents; 604 tx_data.rio_addr_u = 0; 605 tx_data.rio_addr = transfer->rio_addr; 606 if (dir == DMA_MEM_TO_DEV) { 607 switch (transfer->method) { 608 case RIO_EXCHANGE_NWRITE: 609 tx_data.wr_type = RDW_ALL_NWRITE; 610 break; 611 case RIO_EXCHANGE_NWRITE_R_ALL: 612 tx_data.wr_type = RDW_ALL_NWRITE_R; 613 break; 614 case RIO_EXCHANGE_NWRITE_R: 615 tx_data.wr_type = RDW_LAST_NWRITE_R; 616 break; 617 case RIO_EXCHANGE_DEFAULT: 618 tx_data.wr_type = RDW_DEFAULT; 619 break; 620 default: 621 return ERR_PTR(-EINVAL); 622 } 623 } 624 625 return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags); 626 } 627 628 /* Request DMA channel associated with this mport device. 629 * Try to request DMA channel for every new process that opened given 630 * mport. If a new DMA channel is not available use default channel 631 * which is the first DMA channel opened on mport device. 632 */ 633 static int get_dma_channel(struct mport_cdev_priv *priv) 634 { 635 mutex_lock(&priv->dma_lock); 636 if (!priv->dmach) { 637 priv->dmach = rio_request_mport_dma(priv->md->mport); 638 if (!priv->dmach) { 639 /* Use default DMA channel if available */ 640 if (priv->md->dma_chan) { 641 priv->dmach = priv->md->dma_chan; 642 kref_get(&priv->md->dma_ref); 643 } else { 644 rmcd_error("Failed to get DMA channel"); 645 mutex_unlock(&priv->dma_lock); 646 return -ENODEV; 647 } 648 } else if (!priv->md->dma_chan) { 649 /* Register default DMA channel if we do not have one */ 650 priv->md->dma_chan = priv->dmach; 651 kref_init(&priv->md->dma_ref); 652 rmcd_debug(DMA, "Register DMA_chan %d as default", 653 priv->dmach->chan_id); 654 } 655 656 kref_init(&priv->dma_ref); 657 init_completion(&priv->comp); 658 } 659 660 kref_get(&priv->dma_ref); 661 mutex_unlock(&priv->dma_lock); 662 return 0; 663 } 664 665 static void put_dma_channel(struct mport_cdev_priv *priv) 666 { 667 kref_put(&priv->dma_ref, mport_release_dma); 668 } 669 670 /* 671 * DMA transfer functions 672 */ 673 static int do_dma_request(struct mport_dma_req *req, 674 struct rio_transfer_io *xfer, 675 enum rio_transfer_sync sync, int nents) 676 { 677 struct mport_cdev_priv *priv; 678 struct sg_table *sgt; 679 struct dma_chan *chan; 680 struct dma_async_tx_descriptor *tx; 681 dma_cookie_t cookie; 682 unsigned long tmo = msecs_to_jiffies(dma_timeout); 683 enum dma_transfer_direction dir; 684 long wret; 685 int ret = 0; 686 687 priv = req->priv; 688 sgt = &req->sgt; 689 690 chan = priv->dmach; 691 dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; 692 693 rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s", 694 current->comm, task_pid_nr(current), 695 dev_name(&chan->dev->device), 696 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE"); 697 698 /* Initialize DMA transaction request */ 699 tx = prep_dma_xfer(chan, xfer, sgt, nents, dir, 700 DMA_CTRL_ACK | DMA_PREP_INTERRUPT); 701 702 if (!tx) { 703 rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx", 704 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", 705 xfer->rio_addr, xfer->length); 706 ret = -EIO; 707 goto err_out; 708 } else if (IS_ERR(tx)) { 709 ret = PTR_ERR(tx); 710 rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret, 711 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", 712 xfer->rio_addr, xfer->length); 713 goto err_out; 714 } 715 716 tx->callback = dma_xfer_callback; 717 tx->callback_param = req; 718 719 req->status = DMA_IN_PROGRESS; 720 kref_get(&req->refcount); 721 722 cookie = dmaengine_submit(tx); 723 req->cookie = cookie; 724 725 rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current), 726 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); 727 728 if (dma_submit_error(cookie)) { 729 rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)", 730 cookie, xfer->rio_addr, xfer->length); 731 kref_put(&req->refcount, dma_req_free); 732 ret = -EIO; 733 goto err_out; 734 } 735 736 dma_async_issue_pending(chan); 737 738 if (sync == RIO_TRANSFER_ASYNC) { 739 spin_lock(&priv->req_lock); 740 list_add_tail(&req->node, &priv->async_list); 741 spin_unlock(&priv->req_lock); 742 return cookie; 743 } else if (sync == RIO_TRANSFER_FAF) 744 return 0; 745 746 wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo); 747 748 if (wret == 0) { 749 /* Timeout on wait occurred */ 750 rmcd_error("%s(%d) timed out waiting for DMA_%s %d", 751 current->comm, task_pid_nr(current), 752 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); 753 return -ETIMEDOUT; 754 } else if (wret == -ERESTARTSYS) { 755 /* Wait_for_completion was interrupted by a signal but DMA may 756 * be in progress 757 */ 758 rmcd_error("%s(%d) wait for DMA_%s %d was interrupted", 759 current->comm, task_pid_nr(current), 760 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); 761 return -EINTR; 762 } 763 764 if (req->status != DMA_COMPLETE) { 765 /* DMA transaction completion was signaled with error */ 766 rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)", 767 current->comm, task_pid_nr(current), 768 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", 769 cookie, req->status, ret); 770 ret = -EIO; 771 } 772 773 err_out: 774 return ret; 775 } 776 777 /* 778 * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from 779 * the remote RapidIO device 780 * @filp: file pointer associated with the call 781 * @transfer_mode: DMA transfer mode 782 * @sync: synchronization mode 783 * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR 784 * DMA_DEV_TO_MEM = read) 785 * @xfer: data transfer descriptor structure 786 */ 787 static int 788 rio_dma_transfer(struct file *filp, u32 transfer_mode, 789 enum rio_transfer_sync sync, enum dma_data_direction dir, 790 struct rio_transfer_io *xfer) 791 { 792 struct mport_cdev_priv *priv = filp->private_data; 793 unsigned long nr_pages = 0; 794 struct page **page_list = NULL; 795 struct mport_dma_req *req; 796 struct mport_dev *md = priv->md; 797 struct dma_chan *chan; 798 int ret; 799 int nents; 800 801 if (xfer->length == 0) 802 return -EINVAL; 803 req = kzalloc_obj(*req); 804 if (!req) 805 return -ENOMEM; 806 807 ret = get_dma_channel(priv); 808 if (ret) { 809 kfree(req); 810 return ret; 811 } 812 chan = priv->dmach; 813 814 kref_init(&req->refcount); 815 init_completion(&req->req_comp); 816 req->dir = dir; 817 req->filp = filp; 818 req->priv = priv; 819 req->dmach = chan; 820 req->sync = sync; 821 822 /* 823 * If parameter loc_addr != NULL, we are transferring data from/to 824 * data buffer allocated in user-space: lock in memory user-space 825 * buffer pages and build an SG table for DMA transfer request 826 * 827 * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is 828 * used for DMA data transfers: build single entry SG table using 829 * offset within the internal buffer specified by handle parameter. 830 */ 831 if (xfer->loc_addr) { 832 unsigned int offset; 833 long pinned; 834 835 offset = lower_32_bits(offset_in_page(xfer->loc_addr)); 836 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT; 837 838 page_list = kmalloc_objs(*page_list, nr_pages); 839 if (page_list == NULL) { 840 ret = -ENOMEM; 841 goto err_req; 842 } 843 844 pinned = pin_user_pages_fast( 845 (unsigned long)xfer->loc_addr & PAGE_MASK, 846 nr_pages, 847 dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0, 848 page_list); 849 850 if (pinned != nr_pages) { 851 if (pinned < 0) { 852 rmcd_error("pin_user_pages_fast err=%ld", 853 pinned); 854 nr_pages = 0; 855 } else { 856 rmcd_error("pinned %ld out of %ld pages", 857 pinned, nr_pages); 858 /* 859 * Set nr_pages up to mean "how many pages to unpin, in 860 * the error handler: 861 */ 862 nr_pages = pinned; 863 } 864 ret = -EFAULT; 865 goto err_pg; 866 } 867 868 ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages, 869 offset, xfer->length, GFP_KERNEL); 870 if (ret) { 871 rmcd_error("sg_alloc_table failed with err=%d", ret); 872 goto err_pg; 873 } 874 875 req->page_list = page_list; 876 req->nr_pages = nr_pages; 877 } else { 878 dma_addr_t baddr; 879 struct rio_mport_mapping *map; 880 881 baddr = (dma_addr_t)xfer->handle; 882 883 mutex_lock(&md->buf_mutex); 884 list_for_each_entry(map, &md->mappings, node) { 885 if (baddr >= map->phys_addr && 886 baddr < (map->phys_addr + map->size)) { 887 kref_get(&map->ref); 888 req->map = map; 889 break; 890 } 891 } 892 mutex_unlock(&md->buf_mutex); 893 894 if (req->map == NULL) { 895 ret = -ENOMEM; 896 goto err_req; 897 } 898 899 if (xfer->length + xfer->offset > req->map->size) { 900 ret = -EINVAL; 901 goto err_req; 902 } 903 904 ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL); 905 if (unlikely(ret)) { 906 rmcd_error("sg_alloc_table failed for internal buf"); 907 goto err_req; 908 } 909 910 sg_set_buf(req->sgt.sgl, 911 req->map->virt_addr + (baddr - req->map->phys_addr) + 912 xfer->offset, xfer->length); 913 } 914 915 nents = dma_map_sg(chan->device->dev, 916 req->sgt.sgl, req->sgt.nents, dir); 917 if (nents == 0) { 918 rmcd_error("Failed to map SG list"); 919 ret = -EFAULT; 920 goto err_pg; 921 } 922 923 ret = do_dma_request(req, xfer, sync, nents); 924 925 if (ret >= 0) { 926 if (sync == RIO_TRANSFER_ASYNC) 927 return ret; /* return ASYNC cookie */ 928 } else { 929 rmcd_debug(DMA, "do_dma_request failed with err=%d", ret); 930 } 931 932 err_pg: 933 if (!req->page_list) { 934 unpin_user_pages(page_list, nr_pages); 935 kfree(page_list); 936 } 937 err_req: 938 kref_put(&req->refcount, dma_req_free); 939 return ret; 940 } 941 942 static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg) 943 { 944 struct mport_cdev_priv *priv = filp->private_data; 945 struct rio_transaction transaction; 946 struct rio_transfer_io *transfer; 947 enum dma_data_direction dir; 948 int i, ret = 0; 949 size_t size; 950 951 if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction)))) 952 return -EFAULT; 953 954 if (transaction.count != 1) /* only single transfer for now */ 955 return -EINVAL; 956 957 if ((transaction.transfer_mode & 958 priv->md->properties.transfer_mode) == 0) 959 return -ENODEV; 960 961 size = array_size(sizeof(*transfer), transaction.count); 962 transfer = vmalloc(size); 963 if (!transfer) 964 return -ENOMEM; 965 966 if (unlikely(copy_from_user(transfer, 967 (void __user *)(uintptr_t)transaction.block, 968 size))) { 969 ret = -EFAULT; 970 goto out_free; 971 } 972 973 dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ? 974 DMA_FROM_DEVICE : DMA_TO_DEVICE; 975 for (i = 0; i < transaction.count && ret == 0; i++) 976 ret = rio_dma_transfer(filp, transaction.transfer_mode, 977 transaction.sync, dir, &transfer[i]); 978 979 if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block, 980 transfer, size))) 981 ret = -EFAULT; 982 983 out_free: 984 vfree(transfer); 985 986 return ret; 987 } 988 989 static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg) 990 { 991 struct mport_cdev_priv *priv; 992 struct rio_async_tx_wait w_param; 993 struct mport_dma_req *req; 994 dma_cookie_t cookie; 995 unsigned long tmo; 996 long wret; 997 int found = 0; 998 int ret; 999 1000 priv = (struct mport_cdev_priv *)filp->private_data; 1001 1002 if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param)))) 1003 return -EFAULT; 1004 1005 cookie = w_param.token; 1006 if (w_param.timeout) 1007 tmo = msecs_to_jiffies(w_param.timeout); 1008 else /* Use default DMA timeout */ 1009 tmo = msecs_to_jiffies(dma_timeout); 1010 1011 spin_lock(&priv->req_lock); 1012 list_for_each_entry(req, &priv->async_list, node) { 1013 if (req->cookie == cookie) { 1014 list_del(&req->node); 1015 found = 1; 1016 break; 1017 } 1018 } 1019 spin_unlock(&priv->req_lock); 1020 1021 if (!found) 1022 return -EAGAIN; 1023 1024 wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo); 1025 1026 if (wret == 0) { 1027 /* Timeout on wait occurred */ 1028 rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s", 1029 current->comm, task_pid_nr(current), 1030 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE"); 1031 ret = -ETIMEDOUT; 1032 goto err_tmo; 1033 } else if (wret == -ERESTARTSYS) { 1034 /* Wait_for_completion was interrupted by a signal but DMA may 1035 * be still in progress 1036 */ 1037 rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted", 1038 current->comm, task_pid_nr(current), 1039 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE"); 1040 ret = -EINTR; 1041 goto err_tmo; 1042 } 1043 1044 if (req->status != DMA_COMPLETE) { 1045 /* DMA transaction completion signaled with transfer error */ 1046 rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d", 1047 current->comm, task_pid_nr(current), 1048 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE", 1049 req->status); 1050 ret = -EIO; 1051 } else 1052 ret = 0; 1053 1054 if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED) 1055 kref_put(&req->refcount, dma_req_free); 1056 1057 return ret; 1058 1059 err_tmo: 1060 /* Return request back into async queue */ 1061 spin_lock(&priv->req_lock); 1062 list_add_tail(&req->node, &priv->async_list); 1063 spin_unlock(&priv->req_lock); 1064 return ret; 1065 } 1066 1067 static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, 1068 u64 size, struct rio_mport_mapping **mapping) 1069 { 1070 struct rio_mport_mapping *map; 1071 1072 map = kzalloc_obj(*map); 1073 if (map == NULL) 1074 return -ENOMEM; 1075 1076 map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size, 1077 &map->phys_addr, GFP_KERNEL); 1078 if (map->virt_addr == NULL) { 1079 kfree(map); 1080 return -ENOMEM; 1081 } 1082 1083 map->dir = MAP_DMA; 1084 map->size = size; 1085 map->filp = filp; 1086 map->md = md; 1087 kref_init(&map->ref); 1088 mutex_lock(&md->buf_mutex); 1089 list_add_tail(&map->node, &md->mappings); 1090 mutex_unlock(&md->buf_mutex); 1091 *mapping = map; 1092 1093 return 0; 1094 } 1095 1096 static int rio_mport_alloc_dma(struct file *filp, void __user *arg) 1097 { 1098 struct mport_cdev_priv *priv = filp->private_data; 1099 struct mport_dev *md = priv->md; 1100 struct rio_dma_mem map; 1101 struct rio_mport_mapping *mapping = NULL; 1102 int ret; 1103 1104 if (unlikely(copy_from_user(&map, arg, sizeof(map)))) 1105 return -EFAULT; 1106 1107 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); 1108 if (ret) 1109 return ret; 1110 1111 map.dma_handle = mapping->phys_addr; 1112 1113 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { 1114 mutex_lock(&md->buf_mutex); 1115 kref_put(&mapping->ref, mport_release_mapping); 1116 mutex_unlock(&md->buf_mutex); 1117 return -EFAULT; 1118 } 1119 1120 return 0; 1121 } 1122 1123 static int rio_mport_free_dma(struct file *filp, void __user *arg) 1124 { 1125 struct mport_cdev_priv *priv = filp->private_data; 1126 struct mport_dev *md = priv->md; 1127 u64 handle; 1128 int ret = -EFAULT; 1129 struct rio_mport_mapping *map, *_map; 1130 1131 if (copy_from_user(&handle, arg, sizeof(handle))) 1132 return -EFAULT; 1133 rmcd_debug(EXIT, "filp=%p", filp); 1134 1135 mutex_lock(&md->buf_mutex); 1136 list_for_each_entry_safe(map, _map, &md->mappings, node) { 1137 if (map->dir == MAP_DMA && map->phys_addr == handle && 1138 map->filp == filp) { 1139 kref_put(&map->ref, mport_release_mapping); 1140 ret = 0; 1141 break; 1142 } 1143 } 1144 mutex_unlock(&md->buf_mutex); 1145 1146 if (ret == -EFAULT) { 1147 rmcd_debug(DMA, "ERR no matching mapping"); 1148 return ret; 1149 } 1150 1151 return 0; 1152 } 1153 #else 1154 static int rio_mport_transfer_ioctl(struct file *filp, void *arg) 1155 { 1156 return -ENODEV; 1157 } 1158 1159 static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg) 1160 { 1161 return -ENODEV; 1162 } 1163 1164 static int rio_mport_alloc_dma(struct file *filp, void __user *arg) 1165 { 1166 return -ENODEV; 1167 } 1168 1169 static int rio_mport_free_dma(struct file *filp, void __user *arg) 1170 { 1171 return -ENODEV; 1172 } 1173 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ 1174 1175 /* 1176 * Inbound/outbound memory mapping functions 1177 */ 1178 1179 static int 1180 rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, 1181 u64 raddr, u64 size, 1182 struct rio_mport_mapping **mapping) 1183 { 1184 struct rio_mport *mport = md->mport; 1185 struct rio_mport_mapping *map; 1186 int ret; 1187 1188 /* rio_map_inb_region() accepts u32 size */ 1189 if (size > 0xffffffff) 1190 return -EINVAL; 1191 1192 map = kzalloc_obj(*map); 1193 if (map == NULL) 1194 return -ENOMEM; 1195 1196 map->virt_addr = dma_alloc_coherent(mport->dev.parent, size, 1197 &map->phys_addr, GFP_KERNEL); 1198 if (map->virt_addr == NULL) { 1199 ret = -ENOMEM; 1200 goto err_dma_alloc; 1201 } 1202 1203 if (raddr == RIO_MAP_ANY_ADDR) 1204 raddr = map->phys_addr; 1205 ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0); 1206 if (ret < 0) 1207 goto err_map_inb; 1208 1209 map->dir = MAP_INBOUND; 1210 map->rio_addr = raddr; 1211 map->size = size; 1212 map->filp = filp; 1213 map->md = md; 1214 kref_init(&map->ref); 1215 mutex_lock(&md->buf_mutex); 1216 list_add_tail(&map->node, &md->mappings); 1217 mutex_unlock(&md->buf_mutex); 1218 *mapping = map; 1219 return 0; 1220 1221 err_map_inb: 1222 dma_free_coherent(mport->dev.parent, size, 1223 map->virt_addr, map->phys_addr); 1224 err_dma_alloc: 1225 kfree(map); 1226 return ret; 1227 } 1228 1229 static int 1230 rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, 1231 u64 raddr, u64 size, 1232 struct rio_mport_mapping **mapping) 1233 { 1234 struct rio_mport_mapping *map; 1235 int err = -ENOMEM; 1236 1237 if (raddr == RIO_MAP_ANY_ADDR) 1238 goto get_new; 1239 1240 mutex_lock(&md->buf_mutex); 1241 list_for_each_entry(map, &md->mappings, node) { 1242 if (map->dir != MAP_INBOUND) 1243 continue; 1244 if (raddr == map->rio_addr && size == map->size) { 1245 /* allow exact match only */ 1246 *mapping = map; 1247 err = 0; 1248 break; 1249 } else if (raddr < (map->rio_addr + map->size - 1) && 1250 (raddr + size) > map->rio_addr) { 1251 err = -EBUSY; 1252 break; 1253 } 1254 } 1255 mutex_unlock(&md->buf_mutex); 1256 1257 if (err != -ENOMEM) 1258 return err; 1259 get_new: 1260 /* not found, create new */ 1261 return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping); 1262 } 1263 1264 static int rio_mport_map_inbound(struct file *filp, void __user *arg) 1265 { 1266 struct mport_cdev_priv *priv = filp->private_data; 1267 struct mport_dev *md = priv->md; 1268 struct rio_mmap map; 1269 struct rio_mport_mapping *mapping = NULL; 1270 int ret; 1271 1272 if (!md->mport->ops->map_inb) 1273 return -EPROTONOSUPPORT; 1274 if (unlikely(copy_from_user(&map, arg, sizeof(map)))) 1275 return -EFAULT; 1276 1277 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); 1278 1279 ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr, 1280 map.length, &mapping); 1281 if (ret) 1282 return ret; 1283 1284 map.handle = mapping->phys_addr; 1285 map.rio_addr = mapping->rio_addr; 1286 1287 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { 1288 /* Delete mapping if it was created by this request */ 1289 if (ret == 0 && mapping->filp == filp) { 1290 mutex_lock(&md->buf_mutex); 1291 kref_put(&mapping->ref, mport_release_mapping); 1292 mutex_unlock(&md->buf_mutex); 1293 } 1294 return -EFAULT; 1295 } 1296 1297 return 0; 1298 } 1299 1300 /* 1301 * rio_mport_inbound_free() - unmap from RapidIO address space and free 1302 * previously allocated inbound DMA coherent buffer 1303 * @priv: driver private data 1304 * @arg: buffer handle returned by allocation routine 1305 */ 1306 static int rio_mport_inbound_free(struct file *filp, void __user *arg) 1307 { 1308 struct mport_cdev_priv *priv = filp->private_data; 1309 struct mport_dev *md = priv->md; 1310 u64 handle; 1311 struct rio_mport_mapping *map, *_map; 1312 1313 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); 1314 1315 if (!md->mport->ops->unmap_inb) 1316 return -EPROTONOSUPPORT; 1317 1318 if (copy_from_user(&handle, arg, sizeof(handle))) 1319 return -EFAULT; 1320 1321 mutex_lock(&md->buf_mutex); 1322 list_for_each_entry_safe(map, _map, &md->mappings, node) { 1323 if (map->dir == MAP_INBOUND && map->phys_addr == handle) { 1324 if (map->filp == filp) { 1325 map->filp = NULL; 1326 kref_put(&map->ref, mport_release_mapping); 1327 } 1328 break; 1329 } 1330 } 1331 mutex_unlock(&md->buf_mutex); 1332 1333 return 0; 1334 } 1335 1336 /* 1337 * maint_port_idx_get() - Get the port index of the mport instance 1338 * @priv: driver private data 1339 * @arg: port index 1340 */ 1341 static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg) 1342 { 1343 struct mport_dev *md = priv->md; 1344 u32 port_idx = md->mport->index; 1345 1346 rmcd_debug(MPORT, "port_index=%d", port_idx); 1347 1348 if (copy_to_user(arg, &port_idx, sizeof(port_idx))) 1349 return -EFAULT; 1350 1351 return 0; 1352 } 1353 1354 static int rio_mport_add_event(struct mport_cdev_priv *priv, 1355 struct rio_event *event) 1356 { 1357 int overflow; 1358 1359 if (!(priv->event_mask & event->header)) 1360 return -EACCES; 1361 1362 spin_lock(&priv->fifo_lock); 1363 overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event) 1364 || kfifo_in(&priv->event_fifo, (unsigned char *)event, 1365 sizeof(*event)) != sizeof(*event); 1366 spin_unlock(&priv->fifo_lock); 1367 1368 wake_up_interruptible(&priv->event_rx_wait); 1369 1370 if (overflow) { 1371 dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n"); 1372 return -EBUSY; 1373 } 1374 1375 return 0; 1376 } 1377 1378 static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id, 1379 u16 src, u16 dst, u16 info) 1380 { 1381 struct mport_dev *data = dev_id; 1382 struct mport_cdev_priv *priv; 1383 struct rio_mport_db_filter *db_filter; 1384 struct rio_event event; 1385 int handled; 1386 1387 event.header = RIO_DOORBELL; 1388 event.u.doorbell.rioid = src; 1389 event.u.doorbell.payload = info; 1390 1391 handled = 0; 1392 spin_lock(&data->db_lock); 1393 list_for_each_entry(db_filter, &data->doorbells, data_node) { 1394 if (((db_filter->filter.rioid == RIO_INVALID_DESTID || 1395 db_filter->filter.rioid == src)) && 1396 info >= db_filter->filter.low && 1397 info <= db_filter->filter.high) { 1398 priv = db_filter->priv; 1399 rio_mport_add_event(priv, &event); 1400 handled = 1; 1401 } 1402 } 1403 spin_unlock(&data->db_lock); 1404 1405 if (!handled) 1406 dev_warn(&data->dev, 1407 "%s: spurious DB received from 0x%x, info=0x%04x\n", 1408 __func__, src, info); 1409 } 1410 1411 static int rio_mport_add_db_filter(struct mport_cdev_priv *priv, 1412 void __user *arg) 1413 { 1414 struct mport_dev *md = priv->md; 1415 struct rio_mport_db_filter *db_filter; 1416 struct rio_doorbell_filter filter; 1417 unsigned long flags; 1418 int ret; 1419 1420 if (copy_from_user(&filter, arg, sizeof(filter))) 1421 return -EFAULT; 1422 1423 if (filter.low > filter.high) 1424 return -EINVAL; 1425 1426 ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high, 1427 rio_mport_doorbell_handler); 1428 if (ret) { 1429 rmcd_error("%s failed to register IBDB, err=%d", 1430 dev_name(&md->dev), ret); 1431 return ret; 1432 } 1433 1434 db_filter = kzalloc_obj(*db_filter); 1435 if (db_filter == NULL) { 1436 rio_release_inb_dbell(md->mport, filter.low, filter.high); 1437 return -ENOMEM; 1438 } 1439 1440 db_filter->filter = filter; 1441 db_filter->priv = priv; 1442 spin_lock_irqsave(&md->db_lock, flags); 1443 list_add_tail(&db_filter->priv_node, &priv->db_filters); 1444 list_add_tail(&db_filter->data_node, &md->doorbells); 1445 spin_unlock_irqrestore(&md->db_lock, flags); 1446 1447 return 0; 1448 } 1449 1450 static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter) 1451 { 1452 list_del(&db_filter->data_node); 1453 list_del(&db_filter->priv_node); 1454 kfree(db_filter); 1455 } 1456 1457 static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv, 1458 void __user *arg) 1459 { 1460 struct rio_mport_db_filter *db_filter; 1461 struct rio_doorbell_filter filter; 1462 unsigned long flags; 1463 int ret = -EINVAL; 1464 1465 if (copy_from_user(&filter, arg, sizeof(filter))) 1466 return -EFAULT; 1467 1468 if (filter.low > filter.high) 1469 return -EINVAL; 1470 1471 spin_lock_irqsave(&priv->md->db_lock, flags); 1472 list_for_each_entry(db_filter, &priv->db_filters, priv_node) { 1473 if (db_filter->filter.rioid == filter.rioid && 1474 db_filter->filter.low == filter.low && 1475 db_filter->filter.high == filter.high) { 1476 rio_mport_delete_db_filter(db_filter); 1477 ret = 0; 1478 break; 1479 } 1480 } 1481 spin_unlock_irqrestore(&priv->md->db_lock, flags); 1482 1483 if (!ret) 1484 rio_release_inb_dbell(priv->md->mport, filter.low, filter.high); 1485 1486 return ret; 1487 } 1488 1489 static int rio_mport_match_pw(union rio_pw_msg *msg, 1490 struct rio_pw_filter *filter) 1491 { 1492 if ((msg->em.comptag & filter->mask) < filter->low || 1493 (msg->em.comptag & filter->mask) > filter->high) 1494 return 0; 1495 return 1; 1496 } 1497 1498 static int rio_mport_pw_handler(struct rio_mport *mport, void *context, 1499 union rio_pw_msg *msg, int step) 1500 { 1501 struct mport_dev *md = context; 1502 struct mport_cdev_priv *priv; 1503 struct rio_mport_pw_filter *pw_filter; 1504 struct rio_event event; 1505 int handled; 1506 1507 event.header = RIO_PORTWRITE; 1508 memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE); 1509 1510 handled = 0; 1511 spin_lock(&md->pw_lock); 1512 list_for_each_entry(pw_filter, &md->portwrites, md_node) { 1513 if (rio_mport_match_pw(msg, &pw_filter->filter)) { 1514 priv = pw_filter->priv; 1515 rio_mport_add_event(priv, &event); 1516 handled = 1; 1517 } 1518 } 1519 spin_unlock(&md->pw_lock); 1520 1521 if (!handled) { 1522 printk_ratelimited(KERN_WARNING DRV_NAME 1523 ": mport%d received spurious PW from 0x%08x\n", 1524 mport->id, msg->em.comptag); 1525 } 1526 1527 return 0; 1528 } 1529 1530 static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv, 1531 void __user *arg) 1532 { 1533 struct mport_dev *md = priv->md; 1534 struct rio_mport_pw_filter *pw_filter; 1535 struct rio_pw_filter filter; 1536 unsigned long flags; 1537 int hadd = 0; 1538 1539 if (copy_from_user(&filter, arg, sizeof(filter))) 1540 return -EFAULT; 1541 1542 pw_filter = kzalloc_obj(*pw_filter); 1543 if (pw_filter == NULL) 1544 return -ENOMEM; 1545 1546 pw_filter->filter = filter; 1547 pw_filter->priv = priv; 1548 spin_lock_irqsave(&md->pw_lock, flags); 1549 if (list_empty(&md->portwrites)) 1550 hadd = 1; 1551 list_add_tail(&pw_filter->priv_node, &priv->pw_filters); 1552 list_add_tail(&pw_filter->md_node, &md->portwrites); 1553 spin_unlock_irqrestore(&md->pw_lock, flags); 1554 1555 if (hadd) { 1556 int ret; 1557 1558 ret = rio_add_mport_pw_handler(md->mport, md, 1559 rio_mport_pw_handler); 1560 if (ret) { 1561 dev_err(&md->dev, 1562 "%s: failed to add IB_PW handler, err=%d\n", 1563 __func__, ret); 1564 return ret; 1565 } 1566 rio_pw_enable(md->mport, 1); 1567 } 1568 1569 return 0; 1570 } 1571 1572 static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter) 1573 { 1574 list_del(&pw_filter->md_node); 1575 list_del(&pw_filter->priv_node); 1576 kfree(pw_filter); 1577 } 1578 1579 static int rio_mport_match_pw_filter(struct rio_pw_filter *a, 1580 struct rio_pw_filter *b) 1581 { 1582 if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high)) 1583 return 1; 1584 return 0; 1585 } 1586 1587 static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv, 1588 void __user *arg) 1589 { 1590 struct mport_dev *md = priv->md; 1591 struct rio_mport_pw_filter *pw_filter; 1592 struct rio_pw_filter filter; 1593 unsigned long flags; 1594 int ret = -EINVAL; 1595 int hdel = 0; 1596 1597 if (copy_from_user(&filter, arg, sizeof(filter))) 1598 return -EFAULT; 1599 1600 spin_lock_irqsave(&md->pw_lock, flags); 1601 list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) { 1602 if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) { 1603 rio_mport_delete_pw_filter(pw_filter); 1604 ret = 0; 1605 break; 1606 } 1607 } 1608 1609 if (list_empty(&md->portwrites)) 1610 hdel = 1; 1611 spin_unlock_irqrestore(&md->pw_lock, flags); 1612 1613 if (hdel) { 1614 rio_del_mport_pw_handler(md->mport, priv->md, 1615 rio_mport_pw_handler); 1616 rio_pw_enable(md->mport, 0); 1617 } 1618 1619 return ret; 1620 } 1621 1622 /* 1623 * rio_release_dev - release routine for kernel RIO device object 1624 * @dev: kernel device object associated with a RIO device structure 1625 * 1626 * Frees a RIO device struct associated a RIO device struct. 1627 * The RIO device struct is freed. 1628 */ 1629 static void rio_release_dev(struct device *dev) 1630 { 1631 struct rio_dev *rdev; 1632 1633 rdev = to_rio_dev(dev); 1634 pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev)); 1635 kfree(rdev); 1636 } 1637 1638 1639 static void rio_release_net(struct device *dev) 1640 { 1641 struct rio_net *net; 1642 1643 net = to_rio_net(dev); 1644 rmcd_debug(RDEV, "net_%d", net->id); 1645 kfree(net); 1646 } 1647 1648 1649 /* 1650 * rio_mport_add_riodev - creates a kernel RIO device object 1651 * 1652 * Allocates a RIO device data structure and initializes required fields based 1653 * on device's configuration space contents. 1654 * If the device has switch capabilities, then a switch specific portion is 1655 * allocated and configured. 1656 */ 1657 static int rio_mport_add_riodev(struct mport_cdev_priv *priv, 1658 void __user *arg) 1659 { 1660 struct mport_dev *md = priv->md; 1661 struct rio_rdev_info dev_info; 1662 struct rio_dev *rdev; 1663 struct rio_switch *rswitch = NULL; 1664 struct rio_mport *mport; 1665 struct device *dev; 1666 size_t size; 1667 u32 rval; 1668 u32 swpinfo = 0; 1669 u16 destid; 1670 u8 hopcount; 1671 int err; 1672 1673 if (copy_from_user(&dev_info, arg, sizeof(dev_info))) 1674 return -EFAULT; 1675 dev_info.name[sizeof(dev_info.name) - 1] = '\0'; 1676 1677 rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name, 1678 dev_info.comptag, dev_info.destid, dev_info.hopcount); 1679 1680 dev = bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name); 1681 if (dev) { 1682 rmcd_debug(RDEV, "device %s already exists", dev_info.name); 1683 put_device(dev); 1684 return -EEXIST; 1685 } 1686 1687 size = sizeof(*rdev); 1688 mport = md->mport; 1689 destid = dev_info.destid; 1690 hopcount = dev_info.hopcount; 1691 1692 if (rio_mport_read_config_32(mport, destid, hopcount, 1693 RIO_PEF_CAR, &rval)) 1694 return -EIO; 1695 1696 if (rval & RIO_PEF_SWITCH) { 1697 rio_mport_read_config_32(mport, destid, hopcount, 1698 RIO_SWP_INFO_CAR, &swpinfo); 1699 size += struct_size(rswitch, nextdev, RIO_GET_TOTAL_PORTS(swpinfo)); 1700 } 1701 1702 rdev = kzalloc(size, GFP_KERNEL); 1703 if (rdev == NULL) 1704 return -ENOMEM; 1705 1706 if (mport->net == NULL) { 1707 struct rio_net *net; 1708 1709 net = rio_alloc_net(mport); 1710 if (!net) { 1711 err = -ENOMEM; 1712 rmcd_debug(RDEV, "failed to allocate net object"); 1713 goto cleanup; 1714 } 1715 1716 net->id = mport->id; 1717 net->hport = mport; 1718 dev_set_name(&net->dev, "rnet_%d", net->id); 1719 net->dev.parent = &mport->dev; 1720 net->dev.release = rio_release_net; 1721 err = rio_add_net(net); 1722 if (err) { 1723 rmcd_debug(RDEV, "failed to register net, err=%d", err); 1724 put_device(&net->dev); 1725 mport->net = NULL; 1726 goto cleanup; 1727 } 1728 } 1729 1730 rdev->net = mport->net; 1731 rdev->pef = rval; 1732 rdev->swpinfo = swpinfo; 1733 rio_mport_read_config_32(mport, destid, hopcount, 1734 RIO_DEV_ID_CAR, &rval); 1735 rdev->did = rval >> 16; 1736 rdev->vid = rval & 0xffff; 1737 rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR, 1738 &rdev->device_rev); 1739 rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR, 1740 &rval); 1741 rdev->asm_did = rval >> 16; 1742 rdev->asm_vid = rval & 0xffff; 1743 rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR, 1744 &rval); 1745 rdev->asm_rev = rval >> 16; 1746 1747 if (rdev->pef & RIO_PEF_EXT_FEATURES) { 1748 rdev->efptr = rval & 0xffff; 1749 rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid, 1750 hopcount, &rdev->phys_rmap); 1751 1752 rdev->em_efptr = rio_mport_get_feature(mport, 0, destid, 1753 hopcount, RIO_EFB_ERR_MGMNT); 1754 } 1755 1756 rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR, 1757 &rdev->src_ops); 1758 rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR, 1759 &rdev->dst_ops); 1760 1761 rdev->comp_tag = dev_info.comptag; 1762 rdev->destid = destid; 1763 /* hopcount is stored as specified by a caller, regardles of EP or SW */ 1764 rdev->hopcount = hopcount; 1765 1766 if (rdev->pef & RIO_PEF_SWITCH) { 1767 rswitch = rdev->rswitch; 1768 rswitch->route_table = NULL; 1769 } 1770 1771 if (strlen(dev_info.name)) 1772 dev_set_name(&rdev->dev, "%s", dev_info.name); 1773 else if (rdev->pef & RIO_PEF_SWITCH) 1774 dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id, 1775 rdev->comp_tag & RIO_CTAG_UDEVID); 1776 else 1777 dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id, 1778 rdev->comp_tag & RIO_CTAG_UDEVID); 1779 1780 INIT_LIST_HEAD(&rdev->net_list); 1781 rdev->dev.parent = &mport->net->dev; 1782 rio_attach_device(rdev); 1783 rdev->dev.release = rio_release_dev; 1784 1785 if (rdev->dst_ops & RIO_DST_OPS_DOORBELL) 1786 rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], 1787 0, 0xffff); 1788 err = rio_add_device(rdev); 1789 if (err) { 1790 put_device(&rdev->dev); 1791 return err; 1792 } 1793 1794 rio_dev_get(rdev); 1795 1796 return 0; 1797 cleanup: 1798 kfree(rdev); 1799 return err; 1800 } 1801 1802 static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg) 1803 { 1804 struct rio_rdev_info dev_info; 1805 struct rio_dev *rdev = NULL; 1806 struct device *dev; 1807 struct rio_mport *mport; 1808 struct rio_net *net; 1809 1810 if (copy_from_user(&dev_info, arg, sizeof(dev_info))) 1811 return -EFAULT; 1812 dev_info.name[sizeof(dev_info.name) - 1] = '\0'; 1813 1814 mport = priv->md->mport; 1815 1816 /* If device name is specified, removal by name has priority */ 1817 if (strlen(dev_info.name)) { 1818 dev = bus_find_device_by_name(&rio_bus_type, NULL, 1819 dev_info.name); 1820 if (dev) 1821 rdev = to_rio_dev(dev); 1822 } else { 1823 do { 1824 rdev = rio_get_comptag(dev_info.comptag, rdev); 1825 if (rdev && rdev->dev.parent == &mport->net->dev && 1826 rdev->destid == dev_info.destid && 1827 rdev->hopcount == dev_info.hopcount) 1828 break; 1829 } while (rdev); 1830 } 1831 1832 if (!rdev) { 1833 rmcd_debug(RDEV, 1834 "device name:%s ct:0x%x did:0x%x hc:0x%x not found", 1835 dev_info.name, dev_info.comptag, dev_info.destid, 1836 dev_info.hopcount); 1837 return -ENODEV; 1838 } 1839 1840 net = rdev->net; 1841 rio_dev_put(rdev); 1842 rio_del_device(rdev, RIO_DEVICE_SHUTDOWN); 1843 1844 if (list_empty(&net->devices)) { 1845 rio_free_net(net); 1846 mport->net = NULL; 1847 } 1848 1849 return 0; 1850 } 1851 1852 /* 1853 * Mport cdev management 1854 */ 1855 1856 /* 1857 * mport_cdev_open() - Open character device (mport) 1858 */ 1859 static int mport_cdev_open(struct inode *inode, struct file *filp) 1860 { 1861 int ret; 1862 int minor = iminor(inode); 1863 struct mport_dev *chdev; 1864 struct mport_cdev_priv *priv; 1865 1866 /* Test for valid device */ 1867 if (minor >= RIO_MAX_MPORTS) { 1868 rmcd_error("Invalid minor device number"); 1869 return -EINVAL; 1870 } 1871 1872 chdev = container_of(inode->i_cdev, struct mport_dev, cdev); 1873 1874 rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp); 1875 1876 if (atomic_read(&chdev->active) == 0) 1877 return -ENODEV; 1878 1879 get_device(&chdev->dev); 1880 1881 priv = kzalloc_obj(*priv); 1882 if (!priv) { 1883 put_device(&chdev->dev); 1884 return -ENOMEM; 1885 } 1886 1887 priv->md = chdev; 1888 1889 INIT_LIST_HEAD(&priv->db_filters); 1890 INIT_LIST_HEAD(&priv->pw_filters); 1891 spin_lock_init(&priv->fifo_lock); 1892 init_waitqueue_head(&priv->event_rx_wait); 1893 ret = kfifo_alloc(&priv->event_fifo, 1894 sizeof(struct rio_event) * MPORT_EVENT_DEPTH, 1895 GFP_KERNEL); 1896 if (ret < 0) { 1897 put_device(&chdev->dev); 1898 dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n"); 1899 ret = -ENOMEM; 1900 goto err_fifo; 1901 } 1902 1903 #ifdef CONFIG_RAPIDIO_DMA_ENGINE 1904 INIT_LIST_HEAD(&priv->async_list); 1905 spin_lock_init(&priv->req_lock); 1906 mutex_init(&priv->dma_lock); 1907 #endif 1908 mutex_lock(&chdev->file_mutex); 1909 list_add_tail(&priv->list, &chdev->file_list); 1910 mutex_unlock(&chdev->file_mutex); 1911 1912 filp->private_data = priv; 1913 goto out; 1914 err_fifo: 1915 kfree(priv); 1916 out: 1917 return ret; 1918 } 1919 1920 static int mport_cdev_fasync(int fd, struct file *filp, int mode) 1921 { 1922 struct mport_cdev_priv *priv = filp->private_data; 1923 1924 return fasync_helper(fd, filp, mode, &priv->async_queue); 1925 } 1926 1927 #ifdef CONFIG_RAPIDIO_DMA_ENGINE 1928 static void mport_cdev_release_dma(struct file *filp) 1929 { 1930 struct mport_cdev_priv *priv = filp->private_data; 1931 struct mport_dev *md; 1932 struct mport_dma_req *req, *req_next; 1933 unsigned long tmo = msecs_to_jiffies(dma_timeout); 1934 long wret; 1935 LIST_HEAD(list); 1936 1937 rmcd_debug(EXIT, "from filp=%p %s(%d)", 1938 filp, current->comm, task_pid_nr(current)); 1939 1940 if (!priv->dmach) { 1941 rmcd_debug(EXIT, "No DMA channel for filp=%p", filp); 1942 return; 1943 } 1944 1945 md = priv->md; 1946 1947 spin_lock(&priv->req_lock); 1948 if (!list_empty(&priv->async_list)) { 1949 rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)", 1950 filp, current->comm, task_pid_nr(current)); 1951 list_splice_init(&priv->async_list, &list); 1952 } 1953 spin_unlock(&priv->req_lock); 1954 1955 if (!list_empty(&list)) { 1956 rmcd_debug(EXIT, "temp list not empty"); 1957 list_for_each_entry_safe(req, req_next, &list, node) { 1958 rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s", 1959 req->filp, req->cookie, 1960 completion_done(&req->req_comp)?"yes":"no"); 1961 list_del(&req->node); 1962 kref_put(&req->refcount, dma_req_free); 1963 } 1964 } 1965 1966 put_dma_channel(priv); 1967 wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo); 1968 1969 if (wret <= 0) { 1970 rmcd_error("%s(%d) failed waiting for DMA release err=%ld", 1971 current->comm, task_pid_nr(current), wret); 1972 } 1973 1974 if (priv->dmach != priv->md->dma_chan) { 1975 rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)", 1976 filp, current->comm, task_pid_nr(current)); 1977 rio_release_dma(priv->dmach); 1978 } else { 1979 rmcd_debug(EXIT, "Adjust default DMA channel refcount"); 1980 kref_put(&md->dma_ref, mport_release_def_dma); 1981 } 1982 1983 priv->dmach = NULL; 1984 } 1985 #else 1986 #define mport_cdev_release_dma(priv) do {} while (0) 1987 #endif 1988 1989 /* 1990 * mport_cdev_release() - Release character device 1991 */ 1992 static int mport_cdev_release(struct inode *inode, struct file *filp) 1993 { 1994 struct mport_cdev_priv *priv = filp->private_data; 1995 struct mport_dev *chdev; 1996 struct rio_mport_pw_filter *pw_filter, *pw_filter_next; 1997 struct rio_mport_db_filter *db_filter, *db_filter_next; 1998 struct rio_mport_mapping *map, *_map; 1999 unsigned long flags; 2000 2001 rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp); 2002 2003 chdev = priv->md; 2004 mport_cdev_release_dma(filp); 2005 2006 priv->event_mask = 0; 2007 2008 spin_lock_irqsave(&chdev->pw_lock, flags); 2009 if (!list_empty(&priv->pw_filters)) { 2010 list_for_each_entry_safe(pw_filter, pw_filter_next, 2011 &priv->pw_filters, priv_node) 2012 rio_mport_delete_pw_filter(pw_filter); 2013 } 2014 spin_unlock_irqrestore(&chdev->pw_lock, flags); 2015 2016 spin_lock_irqsave(&chdev->db_lock, flags); 2017 list_for_each_entry_safe(db_filter, db_filter_next, 2018 &priv->db_filters, priv_node) { 2019 rio_mport_delete_db_filter(db_filter); 2020 } 2021 spin_unlock_irqrestore(&chdev->db_lock, flags); 2022 2023 kfifo_free(&priv->event_fifo); 2024 2025 mutex_lock(&chdev->buf_mutex); 2026 list_for_each_entry_safe(map, _map, &chdev->mappings, node) { 2027 if (map->filp == filp) { 2028 rmcd_debug(EXIT, "release mapping %p filp=%p", 2029 map->virt_addr, filp); 2030 kref_put(&map->ref, mport_release_mapping); 2031 } 2032 } 2033 mutex_unlock(&chdev->buf_mutex); 2034 2035 mport_cdev_fasync(-1, filp, 0); 2036 filp->private_data = NULL; 2037 mutex_lock(&chdev->file_mutex); 2038 list_del(&priv->list); 2039 mutex_unlock(&chdev->file_mutex); 2040 put_device(&chdev->dev); 2041 kfree(priv); 2042 return 0; 2043 } 2044 2045 /* 2046 * mport_cdev_ioctl() - IOCTLs for character device 2047 */ 2048 static long mport_cdev_ioctl(struct file *filp, 2049 unsigned int cmd, unsigned long arg) 2050 { 2051 int err = -EINVAL; 2052 struct mport_cdev_priv *data = filp->private_data; 2053 struct mport_dev *md = data->md; 2054 2055 if (atomic_read(&md->active) == 0) 2056 return -ENODEV; 2057 2058 switch (cmd) { 2059 case RIO_MPORT_MAINT_READ_LOCAL: 2060 return rio_mport_maint_rd(data, (void __user *)arg, 1); 2061 case RIO_MPORT_MAINT_WRITE_LOCAL: 2062 return rio_mport_maint_wr(data, (void __user *)arg, 1); 2063 case RIO_MPORT_MAINT_READ_REMOTE: 2064 return rio_mport_maint_rd(data, (void __user *)arg, 0); 2065 case RIO_MPORT_MAINT_WRITE_REMOTE: 2066 return rio_mport_maint_wr(data, (void __user *)arg, 0); 2067 case RIO_MPORT_MAINT_HDID_SET: 2068 return maint_hdid_set(data, (void __user *)arg); 2069 case RIO_MPORT_MAINT_COMPTAG_SET: 2070 return maint_comptag_set(data, (void __user *)arg); 2071 case RIO_MPORT_MAINT_PORT_IDX_GET: 2072 return maint_port_idx_get(data, (void __user *)arg); 2073 case RIO_MPORT_GET_PROPERTIES: 2074 md->properties.hdid = md->mport->host_deviceid; 2075 if (copy_to_user((void __user *)arg, &(md->properties), 2076 sizeof(md->properties))) 2077 return -EFAULT; 2078 return 0; 2079 case RIO_ENABLE_DOORBELL_RANGE: 2080 return rio_mport_add_db_filter(data, (void __user *)arg); 2081 case RIO_DISABLE_DOORBELL_RANGE: 2082 return rio_mport_remove_db_filter(data, (void __user *)arg); 2083 case RIO_ENABLE_PORTWRITE_RANGE: 2084 return rio_mport_add_pw_filter(data, (void __user *)arg); 2085 case RIO_DISABLE_PORTWRITE_RANGE: 2086 return rio_mport_remove_pw_filter(data, (void __user *)arg); 2087 case RIO_SET_EVENT_MASK: 2088 data->event_mask = (u32)arg; 2089 return 0; 2090 case RIO_GET_EVENT_MASK: 2091 if (copy_to_user((void __user *)arg, &data->event_mask, 2092 sizeof(u32))) 2093 return -EFAULT; 2094 return 0; 2095 case RIO_MAP_OUTBOUND: 2096 return rio_mport_obw_map(filp, (void __user *)arg); 2097 case RIO_MAP_INBOUND: 2098 return rio_mport_map_inbound(filp, (void __user *)arg); 2099 case RIO_UNMAP_OUTBOUND: 2100 return rio_mport_obw_free(filp, (void __user *)arg); 2101 case RIO_UNMAP_INBOUND: 2102 return rio_mport_inbound_free(filp, (void __user *)arg); 2103 case RIO_ALLOC_DMA: 2104 return rio_mport_alloc_dma(filp, (void __user *)arg); 2105 case RIO_FREE_DMA: 2106 return rio_mport_free_dma(filp, (void __user *)arg); 2107 case RIO_WAIT_FOR_ASYNC: 2108 return rio_mport_wait_for_async_dma(filp, (void __user *)arg); 2109 case RIO_TRANSFER: 2110 return rio_mport_transfer_ioctl(filp, (void __user *)arg); 2111 case RIO_DEV_ADD: 2112 return rio_mport_add_riodev(data, (void __user *)arg); 2113 case RIO_DEV_DEL: 2114 return rio_mport_del_riodev(data, (void __user *)arg); 2115 default: 2116 break; 2117 } 2118 2119 return err; 2120 } 2121 2122 /* 2123 * mport_release_mapping - free mapping resources and info structure 2124 * @ref: a pointer to the kref within struct rio_mport_mapping 2125 * 2126 * NOTE: Shall be called while holding buf_mutex. 2127 */ 2128 static void mport_release_mapping(struct kref *ref) 2129 { 2130 struct rio_mport_mapping *map = 2131 container_of(ref, struct rio_mport_mapping, ref); 2132 struct rio_mport *mport = map->md->mport; 2133 2134 rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s", 2135 map->dir, map->virt_addr, 2136 &map->phys_addr, mport->name); 2137 2138 list_del(&map->node); 2139 2140 switch (map->dir) { 2141 case MAP_INBOUND: 2142 rio_unmap_inb_region(mport, map->phys_addr); 2143 fallthrough; 2144 case MAP_DMA: 2145 dma_free_coherent(mport->dev.parent, map->size, 2146 map->virt_addr, map->phys_addr); 2147 break; 2148 case MAP_OUTBOUND: 2149 rio_unmap_outb_region(mport, map->rioid, map->rio_addr); 2150 break; 2151 } 2152 kfree(map); 2153 } 2154 2155 static void mport_mm_open(struct vm_area_struct *vma) 2156 { 2157 struct rio_mport_mapping *map = vma->vm_private_data; 2158 2159 rmcd_debug(MMAP, "%pad", &map->phys_addr); 2160 kref_get(&map->ref); 2161 } 2162 2163 static void mport_mm_close(struct vm_area_struct *vma) 2164 { 2165 struct rio_mport_mapping *map = vma->vm_private_data; 2166 2167 rmcd_debug(MMAP, "%pad", &map->phys_addr); 2168 mutex_lock(&map->md->buf_mutex); 2169 kref_put(&map->ref, mport_release_mapping); 2170 mutex_unlock(&map->md->buf_mutex); 2171 } 2172 2173 static const struct vm_operations_struct vm_ops = { 2174 .open = mport_mm_open, 2175 .close = mport_mm_close, 2176 }; 2177 2178 static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma) 2179 { 2180 struct mport_cdev_priv *priv = filp->private_data; 2181 struct mport_dev *md; 2182 size_t size = vma->vm_end - vma->vm_start; 2183 dma_addr_t baddr; 2184 unsigned long offset; 2185 int found = 0, ret; 2186 struct rio_mport_mapping *map; 2187 2188 rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx", 2189 (unsigned int)size, vma->vm_pgoff); 2190 2191 md = priv->md; 2192 baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT); 2193 2194 mutex_lock(&md->buf_mutex); 2195 list_for_each_entry(map, &md->mappings, node) { 2196 if (baddr >= map->phys_addr && 2197 baddr < (map->phys_addr + map->size)) { 2198 found = 1; 2199 break; 2200 } 2201 } 2202 mutex_unlock(&md->buf_mutex); 2203 2204 if (!found) 2205 return -ENOMEM; 2206 2207 offset = baddr - map->phys_addr; 2208 2209 if (size + offset > map->size) 2210 return -EINVAL; 2211 2212 vma->vm_pgoff = offset >> PAGE_SHIFT; 2213 rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff); 2214 2215 if (map->dir == MAP_INBOUND || map->dir == MAP_DMA) 2216 ret = dma_mmap_coherent(md->mport->dev.parent, vma, 2217 map->virt_addr, map->phys_addr, map->size); 2218 else if (map->dir == MAP_OUTBOUND) { 2219 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 2220 ret = vm_iomap_memory(vma, map->phys_addr, map->size); 2221 } else { 2222 rmcd_error("Attempt to mmap unsupported mapping type"); 2223 ret = -EIO; 2224 } 2225 2226 if (!ret) { 2227 vma->vm_private_data = map; 2228 vma->vm_ops = &vm_ops; 2229 mport_mm_open(vma); 2230 } else { 2231 rmcd_error("MMAP exit with err=%d", ret); 2232 } 2233 2234 return ret; 2235 } 2236 2237 static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait) 2238 { 2239 struct mport_cdev_priv *priv = filp->private_data; 2240 2241 poll_wait(filp, &priv->event_rx_wait, wait); 2242 if (kfifo_len(&priv->event_fifo)) 2243 return EPOLLIN | EPOLLRDNORM; 2244 2245 return 0; 2246 } 2247 2248 static ssize_t mport_read(struct file *filp, char __user *buf, size_t count, 2249 loff_t *ppos) 2250 { 2251 struct mport_cdev_priv *priv = filp->private_data; 2252 int copied; 2253 ssize_t ret; 2254 2255 if (!count) 2256 return 0; 2257 2258 if (kfifo_is_empty(&priv->event_fifo) && 2259 (filp->f_flags & O_NONBLOCK)) 2260 return -EAGAIN; 2261 2262 if (count % sizeof(struct rio_event)) 2263 return -EINVAL; 2264 2265 ret = wait_event_interruptible(priv->event_rx_wait, 2266 kfifo_len(&priv->event_fifo) != 0); 2267 if (ret) 2268 return ret; 2269 2270 while (ret < count) { 2271 if (kfifo_to_user(&priv->event_fifo, buf, 2272 sizeof(struct rio_event), &copied)) 2273 return -EFAULT; 2274 ret += copied; 2275 buf += copied; 2276 } 2277 2278 return ret; 2279 } 2280 2281 static ssize_t mport_write(struct file *filp, const char __user *buf, 2282 size_t count, loff_t *ppos) 2283 { 2284 struct mport_cdev_priv *priv = filp->private_data; 2285 struct rio_mport *mport = priv->md->mport; 2286 struct rio_event event; 2287 int len, ret; 2288 2289 if (!count) 2290 return 0; 2291 2292 if (count % sizeof(event)) 2293 return -EINVAL; 2294 2295 len = 0; 2296 while ((count - len) >= (int)sizeof(event)) { 2297 if (copy_from_user(&event, buf, sizeof(event))) 2298 return -EFAULT; 2299 2300 if (event.header != RIO_DOORBELL) 2301 return -EINVAL; 2302 2303 ret = rio_mport_send_doorbell(mport, 2304 event.u.doorbell.rioid, 2305 event.u.doorbell.payload); 2306 if (ret < 0) 2307 return ret; 2308 2309 len += sizeof(event); 2310 buf += sizeof(event); 2311 } 2312 2313 return len; 2314 } 2315 2316 static const struct file_operations mport_fops = { 2317 .owner = THIS_MODULE, 2318 .open = mport_cdev_open, 2319 .release = mport_cdev_release, 2320 .poll = mport_cdev_poll, 2321 .read = mport_read, 2322 .write = mport_write, 2323 .mmap = mport_cdev_mmap, 2324 .fasync = mport_cdev_fasync, 2325 .unlocked_ioctl = mport_cdev_ioctl 2326 }; 2327 2328 /* 2329 * Character device management 2330 */ 2331 2332 static void mport_device_release(struct device *dev) 2333 { 2334 struct mport_dev *md; 2335 2336 rmcd_debug(EXIT, "%s", dev_name(dev)); 2337 md = container_of(dev, struct mport_dev, dev); 2338 kfree(md); 2339 } 2340 2341 /* 2342 * mport_cdev_add() - Create mport_dev from rio_mport 2343 * @mport: RapidIO master port 2344 */ 2345 static struct mport_dev *mport_cdev_add(struct rio_mport *mport) 2346 { 2347 int ret = 0; 2348 struct mport_dev *md; 2349 struct rio_mport_attr attr; 2350 2351 md = kzalloc_obj(*md); 2352 if (!md) { 2353 rmcd_error("Unable allocate a device object"); 2354 return NULL; 2355 } 2356 2357 md->mport = mport; 2358 mutex_init(&md->buf_mutex); 2359 mutex_init(&md->file_mutex); 2360 INIT_LIST_HEAD(&md->file_list); 2361 2362 device_initialize(&md->dev); 2363 md->dev.devt = MKDEV(MAJOR(dev_number), mport->id); 2364 md->dev.class = &dev_class; 2365 md->dev.parent = &mport->dev; 2366 md->dev.release = mport_device_release; 2367 dev_set_name(&md->dev, DEV_NAME "%d", mport->id); 2368 atomic_set(&md->active, 1); 2369 2370 cdev_init(&md->cdev, &mport_fops); 2371 md->cdev.owner = THIS_MODULE; 2372 2373 INIT_LIST_HEAD(&md->doorbells); 2374 spin_lock_init(&md->db_lock); 2375 INIT_LIST_HEAD(&md->portwrites); 2376 spin_lock_init(&md->pw_lock); 2377 INIT_LIST_HEAD(&md->mappings); 2378 2379 md->properties.id = mport->id; 2380 md->properties.sys_size = mport->sys_size; 2381 md->properties.hdid = mport->host_deviceid; 2382 md->properties.index = mport->index; 2383 2384 /* The transfer_mode property will be returned through mport query 2385 * interface 2386 */ 2387 #ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */ 2388 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; 2389 #else 2390 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; 2391 #endif 2392 2393 ret = cdev_device_add(&md->cdev, &md->dev); 2394 if (ret) { 2395 rmcd_error("Failed to register mport %d (err=%d)", 2396 mport->id, ret); 2397 goto err_cdev; 2398 } 2399 ret = rio_query_mport(mport, &attr); 2400 if (!ret) { 2401 md->properties.flags = attr.flags; 2402 md->properties.link_speed = attr.link_speed; 2403 md->properties.link_width = attr.link_width; 2404 md->properties.dma_max_sge = attr.dma_max_sge; 2405 md->properties.dma_max_size = attr.dma_max_size; 2406 md->properties.dma_align = attr.dma_align; 2407 md->properties.cap_sys_size = 0; 2408 md->properties.cap_transfer_mode = 0; 2409 md->properties.cap_addr_size = 0; 2410 } else 2411 pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n", 2412 mport->name, MAJOR(dev_number), mport->id); 2413 2414 mutex_lock(&mport_devs_lock); 2415 list_add_tail(&md->node, &mport_devs); 2416 mutex_unlock(&mport_devs_lock); 2417 2418 pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n", 2419 mport->name, MAJOR(dev_number), mport->id); 2420 2421 return md; 2422 2423 err_cdev: 2424 put_device(&md->dev); 2425 return NULL; 2426 } 2427 2428 /* 2429 * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release 2430 * associated DMA channels. 2431 */ 2432 static void mport_cdev_terminate_dma(struct mport_dev *md) 2433 { 2434 #ifdef CONFIG_RAPIDIO_DMA_ENGINE 2435 struct mport_cdev_priv *client; 2436 2437 rmcd_debug(DMA, "%s", dev_name(&md->dev)); 2438 2439 mutex_lock(&md->file_mutex); 2440 list_for_each_entry(client, &md->file_list, list) { 2441 if (client->dmach) { 2442 dmaengine_terminate_all(client->dmach); 2443 rio_release_dma(client->dmach); 2444 } 2445 } 2446 mutex_unlock(&md->file_mutex); 2447 2448 if (md->dma_chan) { 2449 dmaengine_terminate_all(md->dma_chan); 2450 rio_release_dma(md->dma_chan); 2451 md->dma_chan = NULL; 2452 } 2453 #endif 2454 } 2455 2456 2457 /* 2458 * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open 2459 * mport_cdev files. 2460 */ 2461 static int mport_cdev_kill_fasync(struct mport_dev *md) 2462 { 2463 unsigned int files = 0; 2464 struct mport_cdev_priv *client; 2465 2466 mutex_lock(&md->file_mutex); 2467 list_for_each_entry(client, &md->file_list, list) { 2468 if (client->async_queue) 2469 kill_fasync(&client->async_queue, SIGIO, POLL_HUP); 2470 files++; 2471 } 2472 mutex_unlock(&md->file_mutex); 2473 return files; 2474 } 2475 2476 /* 2477 * mport_cdev_remove() - Remove mport character device 2478 * @dev: Mport device to remove 2479 */ 2480 static void mport_cdev_remove(struct mport_dev *md) 2481 { 2482 struct rio_mport_mapping *map, *_map; 2483 2484 rmcd_debug(EXIT, "Remove %s cdev", md->mport->name); 2485 atomic_set(&md->active, 0); 2486 mport_cdev_terminate_dma(md); 2487 rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler); 2488 cdev_device_del(&md->cdev, &md->dev); 2489 mport_cdev_kill_fasync(md); 2490 2491 /* TODO: do we need to give clients some time to close file 2492 * descriptors? Simple wait for XX, or kref? 2493 */ 2494 2495 /* 2496 * Release DMA buffers allocated for the mport device. 2497 * Disable associated inbound Rapidio requests mapping if applicable. 2498 */ 2499 mutex_lock(&md->buf_mutex); 2500 list_for_each_entry_safe(map, _map, &md->mappings, node) { 2501 kref_put(&map->ref, mport_release_mapping); 2502 } 2503 mutex_unlock(&md->buf_mutex); 2504 2505 if (!list_empty(&md->mappings)) 2506 rmcd_warn("WARNING: %s pending mappings on removal", 2507 md->mport->name); 2508 2509 rio_release_inb_dbell(md->mport, 0, 0x0fff); 2510 2511 put_device(&md->dev); 2512 } 2513 2514 /* 2515 * RIO rio_mport_interface driver 2516 */ 2517 2518 /* 2519 * mport_add_mport() - Add rio_mport from LDM device struct 2520 * @dev: Linux device model struct 2521 */ 2522 static int mport_add_mport(struct device *dev) 2523 { 2524 struct rio_mport *mport = NULL; 2525 struct mport_dev *chdev = NULL; 2526 2527 mport = to_rio_mport(dev); 2528 if (!mport) 2529 return -ENODEV; 2530 2531 chdev = mport_cdev_add(mport); 2532 if (!chdev) 2533 return -ENODEV; 2534 2535 return 0; 2536 } 2537 2538 /* 2539 * mport_remove_mport() - Remove rio_mport from global list 2540 * TODO remove device from global mport_dev list 2541 */ 2542 static void mport_remove_mport(struct device *dev) 2543 { 2544 struct rio_mport *mport = NULL; 2545 struct mport_dev *chdev; 2546 int found = 0; 2547 2548 mport = to_rio_mport(dev); 2549 rmcd_debug(EXIT, "Remove %s", mport->name); 2550 2551 mutex_lock(&mport_devs_lock); 2552 list_for_each_entry(chdev, &mport_devs, node) { 2553 if (chdev->mport->id == mport->id) { 2554 atomic_set(&chdev->active, 0); 2555 list_del(&chdev->node); 2556 found = 1; 2557 break; 2558 } 2559 } 2560 mutex_unlock(&mport_devs_lock); 2561 2562 if (found) 2563 mport_cdev_remove(chdev); 2564 } 2565 2566 /* the rio_mport_interface is used to handle local mport devices */ 2567 static struct class_interface rio_mport_interface __refdata = { 2568 .class = &rio_mport_class, 2569 .add_dev = mport_add_mport, 2570 .remove_dev = mport_remove_mport, 2571 }; 2572 2573 /* 2574 * Linux kernel module 2575 */ 2576 2577 /* 2578 * mport_init - Driver module loading 2579 */ 2580 static int __init mport_init(void) 2581 { 2582 int ret; 2583 2584 /* Create device class needed by udev */ 2585 ret = class_register(&dev_class); 2586 if (ret) { 2587 rmcd_error("Unable to create " DRV_NAME " class"); 2588 return ret; 2589 } 2590 2591 ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME); 2592 if (ret < 0) 2593 goto err_chr; 2594 2595 rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number)); 2596 2597 /* Register to rio_mport_interface */ 2598 ret = class_interface_register(&rio_mport_interface); 2599 if (ret) { 2600 rmcd_error("class_interface_register() failed, err=%d", ret); 2601 goto err_cli; 2602 } 2603 2604 return 0; 2605 2606 err_cli: 2607 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); 2608 err_chr: 2609 class_unregister(&dev_class); 2610 return ret; 2611 } 2612 2613 /** 2614 * mport_exit - Driver module unloading 2615 */ 2616 static void __exit mport_exit(void) 2617 { 2618 class_interface_unregister(&rio_mport_interface); 2619 class_unregister(&dev_class); 2620 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); 2621 } 2622 2623 module_init(mport_init); 2624 module_exit(mport_exit); 2625