1 /* 2 * Copyright (c) 2012 Intel Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 /* 36 * This file contains support for diagnostic functions. It is accessed by 37 * opening the qib_diag device, normally minor number 129. Diagnostic use 38 * of the QLogic_IB chip may render the chip or board unusable until the 39 * driver is unloaded, or in some cases, until the system is rebooted. 40 * 41 * Accesses to the chip through this interface are not similar to going 42 * through the /sys/bus/pci resource mmap interface. 43 */ 44 45 #include <linux/io.h> 46 #include <linux/pci.h> 47 #include <linux/poll.h> 48 #include <linux/vmalloc.h> 49 #include <linux/export.h> 50 #include <linux/fs.h> 51 #include <linux/uaccess.h> 52 53 #include "qib.h" 54 #include "qib_common.h" 55 56 #undef pr_fmt 57 #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt 58 59 /* 60 * Each client that opens the diag device must read then write 61 * offset 0, to prevent lossage from random cat or od. diag_state 62 * sequences this "handshake". 63 */ 64 enum diag_state { UNUSED = 0, OPENED, INIT, READY }; 65 66 /* State for an individual client. PID so children cannot abuse handshake */ 67 static struct qib_diag_client { 68 struct qib_diag_client *next; 69 struct qib_devdata *dd; 70 pid_t pid; 71 enum diag_state state; 72 } *client_pool; 73 74 /* 75 * Get a client struct. Recycled if possible, else kmalloc. 76 * Must be called with qib_mutex held 77 */ 78 static struct qib_diag_client *get_client(struct qib_devdata *dd) 79 { 80 struct qib_diag_client *dc; 81 82 dc = client_pool; 83 if (dc) 84 /* got from pool remove it and use */ 85 client_pool = dc->next; 86 else 87 /* None in pool, alloc and init */ 88 dc = kmalloc(sizeof *dc, GFP_KERNEL); 89 90 if (dc) { 91 dc->next = NULL; 92 dc->dd = dd; 93 dc->pid = current->pid; 94 dc->state = OPENED; 95 } 96 return dc; 97 } 98 99 /* 100 * Return to pool. Must be called with qib_mutex held 101 */ 102 static void return_client(struct qib_diag_client *dc) 103 { 104 struct qib_devdata *dd = dc->dd; 105 struct qib_diag_client *tdc, *rdc; 106 107 rdc = NULL; 108 if (dc == dd->diag_client) { 109 dd->diag_client = dc->next; 110 rdc = dc; 111 } else { 112 tdc = dc->dd->diag_client; 113 while (tdc) { 114 if (dc == tdc->next) { 115 tdc->next = dc->next; 116 rdc = dc; 117 break; 118 } 119 tdc = tdc->next; 120 } 121 } 122 if (rdc) { 123 rdc->state = UNUSED; 124 rdc->dd = NULL; 125 rdc->pid = 0; 126 rdc->next = client_pool; 127 client_pool = rdc; 128 } 129 } 130 131 static int qib_diag_open(struct inode *in, struct file *fp); 132 static int qib_diag_release(struct inode *in, struct file *fp); 133 static ssize_t qib_diag_read(struct file *fp, char __user *data, 134 size_t count, loff_t *off); 135 static ssize_t qib_diag_write(struct file *fp, const char __user *data, 136 size_t count, loff_t *off); 137 138 static const struct file_operations diag_file_ops = { 139 .owner = THIS_MODULE, 140 .write = qib_diag_write, 141 .read = qib_diag_read, 142 .open = qib_diag_open, 143 .release = qib_diag_release, 144 .llseek = default_llseek, 145 }; 146 147 static atomic_t diagpkt_count = ATOMIC_INIT(0); 148 static struct cdev *diagpkt_cdev; 149 static struct device *diagpkt_device; 150 151 static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data, 152 size_t count, loff_t *off); 153 154 static const struct file_operations diagpkt_file_ops = { 155 .owner = THIS_MODULE, 156 .write = qib_diagpkt_write, 157 .llseek = noop_llseek, 158 }; 159 160 int qib_diag_add(struct qib_devdata *dd) 161 { 162 char name[16]; 163 int ret = 0; 164 165 if (atomic_inc_return(&diagpkt_count) == 1) { 166 ret = qib_cdev_init(QIB_DIAGPKT_MINOR, "ipath_diagpkt", 167 &diagpkt_file_ops, &diagpkt_cdev, 168 &diagpkt_device); 169 if (ret) 170 goto done; 171 } 172 173 snprintf(name, sizeof(name), "ipath_diag%d", dd->unit); 174 ret = qib_cdev_init(QIB_DIAG_MINOR_BASE + dd->unit, name, 175 &diag_file_ops, &dd->diag_cdev, 176 &dd->diag_device); 177 done: 178 return ret; 179 } 180 181 static void qib_unregister_observers(struct qib_devdata *dd); 182 183 void qib_diag_remove(struct qib_devdata *dd) 184 { 185 struct qib_diag_client *dc; 186 187 if (atomic_dec_and_test(&diagpkt_count)) 188 qib_cdev_cleanup(&diagpkt_cdev, &diagpkt_device); 189 190 qib_cdev_cleanup(&dd->diag_cdev, &dd->diag_device); 191 192 /* 193 * Return all diag_clients of this device. There should be none, 194 * as we are "guaranteed" that no clients are still open 195 */ 196 while (dd->diag_client) 197 return_client(dd->diag_client); 198 199 /* Now clean up all unused client structs */ 200 while (client_pool) { 201 dc = client_pool; 202 client_pool = dc->next; 203 kfree(dc); 204 } 205 /* Clean up observer list */ 206 qib_unregister_observers(dd); 207 } 208 209 /* qib_remap_ioaddr32 - remap an offset into chip address space to __iomem * 210 * 211 * @dd: the qlogic_ib device 212 * @offs: the offset in chip-space 213 * @cntp: Pointer to max (byte) count for transfer starting at offset 214 * This returns a u32 __iomem * so it can be used for both 64 and 32-bit 215 * mapping. It is needed because with the use of PAT for control of 216 * write-combining, the logically contiguous address-space of the chip 217 * may be split into virtually non-contiguous spaces, with different 218 * attributes, which are them mapped to contiguous physical space 219 * based from the first BAR. 220 * 221 * The code below makes the same assumptions as were made in 222 * init_chip_wc_pat() (qib_init.c), copied here: 223 * Assumes chip address space looks like: 224 * - kregs + sregs + cregs + uregs (in any order) 225 * - piobufs (2K and 4K bufs in either order) 226 * or: 227 * - kregs + sregs + cregs (in any order) 228 * - piobufs (2K and 4K bufs in either order) 229 * - uregs 230 * 231 * If cntp is non-NULL, returns how many bytes from offset can be accessed 232 * Returns 0 if the offset is not mapped. 233 */ 234 static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset, 235 u32 *cntp) 236 { 237 u32 kreglen; 238 u32 snd_bottom, snd_lim = 0; 239 u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase; 240 u32 __iomem *map = NULL; 241 u32 cnt = 0; 242 u32 tot4k, offs4k; 243 244 /* First, simplest case, offset is within the first map. */ 245 kreglen = (dd->kregend - dd->kregbase) * sizeof(u64); 246 if (offset < kreglen) { 247 map = krb32 + (offset / sizeof(u32)); 248 cnt = kreglen - offset; 249 goto mapped; 250 } 251 252 /* 253 * Next check for user regs, the next most common case, 254 * and a cheap check because if they are not in the first map 255 * they are last in chip. 256 */ 257 if (dd->userbase) { 258 /* If user regs mapped, they are after send, so set limit. */ 259 u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase; 260 if (!dd->piovl15base) 261 snd_lim = dd->uregbase; 262 krb32 = (u32 __iomem *)dd->userbase; 263 if (offset >= dd->uregbase && offset < ulim) { 264 map = krb32 + (offset - dd->uregbase) / sizeof(u32); 265 cnt = ulim - offset; 266 goto mapped; 267 } 268 } 269 270 /* 271 * Lastly, check for offset within Send Buffers. 272 * This is gnarly because struct devdata is deliberately vague 273 * about things like 7322 VL15 buffers, and we are not in 274 * chip-specific code here, so should not make many assumptions. 275 * The one we _do_ make is that the only chip that has more sndbufs 276 * than we admit is the 7322, and it has userregs above that, so 277 * we know the snd_lim. 278 */ 279 /* Assume 2K buffers are first. */ 280 snd_bottom = dd->pio2k_bufbase; 281 if (snd_lim == 0) { 282 u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign); 283 snd_lim = snd_bottom + tot2k; 284 } 285 /* If 4k buffers exist, account for them by bumping 286 * appropriate limit. 287 */ 288 tot4k = dd->piobcnt4k * dd->align4k; 289 offs4k = dd->piobufbase >> 32; 290 if (dd->piobcnt4k) { 291 if (snd_bottom > offs4k) 292 snd_bottom = offs4k; 293 else { 294 /* 4k above 2k. Bump snd_lim, if needed*/ 295 if (!dd->userbase || dd->piovl15base) 296 snd_lim = offs4k + tot4k; 297 } 298 } 299 /* 300 * Judgement call: can we ignore the space between SendBuffs and 301 * UserRegs, where we would like to see vl15 buffs, but not more? 302 */ 303 if (offset >= snd_bottom && offset < snd_lim) { 304 offset -= snd_bottom; 305 map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32)); 306 cnt = snd_lim - offset; 307 } 308 309 if (!map && offs4k && dd->piovl15base) { 310 snd_lim = offs4k + tot4k + 2 * dd->align4k; 311 if (offset >= (offs4k + tot4k) && offset < snd_lim) { 312 map = (u32 __iomem *)dd->piovl15base + 313 ((offset - (offs4k + tot4k)) / sizeof(u32)); 314 cnt = snd_lim - offset; 315 } 316 } 317 318 mapped: 319 if (cntp) 320 *cntp = cnt; 321 return map; 322 } 323 324 /* 325 * qib_read_umem64 - read a 64-bit quantity from the chip into user space 326 * @dd: the qlogic_ib device 327 * @uaddr: the location to store the data in user memory 328 * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore) 329 * @count: number of bytes to copy (multiple of 32 bits) 330 * 331 * This function also localizes all chip memory accesses. 332 * The copy should be written such that we read full cacheline packets 333 * from the chip. This is usually used for a single qword 334 * 335 * NOTE: This assumes the chip address is 64-bit aligned. 336 */ 337 static int qib_read_umem64(struct qib_devdata *dd, void __user *uaddr, 338 u32 regoffs, size_t count) 339 { 340 const u64 __iomem *reg_addr; 341 const u64 __iomem *reg_end; 342 u32 limit; 343 int ret; 344 345 reg_addr = (const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit); 346 if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) { 347 ret = -EINVAL; 348 goto bail; 349 } 350 if (count >= limit) 351 count = limit; 352 reg_end = reg_addr + (count / sizeof(u64)); 353 354 /* not very efficient, but it works for now */ 355 while (reg_addr < reg_end) { 356 u64 data = readq(reg_addr); 357 358 if (copy_to_user(uaddr, &data, sizeof(u64))) { 359 ret = -EFAULT; 360 goto bail; 361 } 362 reg_addr++; 363 uaddr += sizeof(u64); 364 } 365 ret = 0; 366 bail: 367 return ret; 368 } 369 370 /* 371 * qib_write_umem64 - write a 64-bit quantity to the chip from user space 372 * @dd: the qlogic_ib device 373 * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore) 374 * @uaddr: the source of the data in user memory 375 * @count: the number of bytes to copy (multiple of 32 bits) 376 * 377 * This is usually used for a single qword 378 * NOTE: This assumes the chip address is 64-bit aligned. 379 */ 380 381 static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs, 382 const void __user *uaddr, size_t count) 383 { 384 u64 __iomem *reg_addr; 385 const u64 __iomem *reg_end; 386 u32 limit; 387 int ret; 388 389 reg_addr = (u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit); 390 if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) { 391 ret = -EINVAL; 392 goto bail; 393 } 394 if (count >= limit) 395 count = limit; 396 reg_end = reg_addr + (count / sizeof(u64)); 397 398 /* not very efficient, but it works for now */ 399 while (reg_addr < reg_end) { 400 u64 data; 401 if (copy_from_user(&data, uaddr, sizeof(data))) { 402 ret = -EFAULT; 403 goto bail; 404 } 405 writeq(data, reg_addr); 406 407 reg_addr++; 408 uaddr += sizeof(u64); 409 } 410 ret = 0; 411 bail: 412 return ret; 413 } 414 415 /* 416 * qib_read_umem32 - read a 32-bit quantity from the chip into user space 417 * @dd: the qlogic_ib device 418 * @uaddr: the location to store the data in user memory 419 * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore) 420 * @count: number of bytes to copy 421 * 422 * read 32 bit values, not 64 bit; for memories that only 423 * support 32 bit reads; usually a single dword. 424 */ 425 static int qib_read_umem32(struct qib_devdata *dd, void __user *uaddr, 426 u32 regoffs, size_t count) 427 { 428 const u32 __iomem *reg_addr; 429 const u32 __iomem *reg_end; 430 u32 limit; 431 int ret; 432 433 reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit); 434 if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) { 435 ret = -EINVAL; 436 goto bail; 437 } 438 if (count >= limit) 439 count = limit; 440 reg_end = reg_addr + (count / sizeof(u32)); 441 442 /* not very efficient, but it works for now */ 443 while (reg_addr < reg_end) { 444 u32 data = readl(reg_addr); 445 446 if (copy_to_user(uaddr, &data, sizeof(data))) { 447 ret = -EFAULT; 448 goto bail; 449 } 450 451 reg_addr++; 452 uaddr += sizeof(u32); 453 454 } 455 ret = 0; 456 bail: 457 return ret; 458 } 459 460 /* 461 * qib_write_umem32 - write a 32-bit quantity to the chip from user space 462 * @dd: the qlogic_ib device 463 * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore) 464 * @uaddr: the source of the data in user memory 465 * @count: number of bytes to copy 466 * 467 * write 32 bit values, not 64 bit; for memories that only 468 * support 32 bit write; usually a single dword. 469 */ 470 471 static int qib_write_umem32(struct qib_devdata *dd, u32 regoffs, 472 const void __user *uaddr, size_t count) 473 { 474 u32 __iomem *reg_addr; 475 const u32 __iomem *reg_end; 476 u32 limit; 477 int ret; 478 479 reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit); 480 if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) { 481 ret = -EINVAL; 482 goto bail; 483 } 484 if (count >= limit) 485 count = limit; 486 reg_end = reg_addr + (count / sizeof(u32)); 487 488 while (reg_addr < reg_end) { 489 u32 data; 490 491 if (copy_from_user(&data, uaddr, sizeof(data))) { 492 ret = -EFAULT; 493 goto bail; 494 } 495 writel(data, reg_addr); 496 497 reg_addr++; 498 uaddr += sizeof(u32); 499 } 500 ret = 0; 501 bail: 502 return ret; 503 } 504 505 static int qib_diag_open(struct inode *in, struct file *fp) 506 { 507 int unit = iminor(in) - QIB_DIAG_MINOR_BASE; 508 struct qib_devdata *dd; 509 struct qib_diag_client *dc; 510 int ret; 511 512 mutex_lock(&qib_mutex); 513 514 dd = qib_lookup(unit); 515 516 if (dd == NULL || !(dd->flags & QIB_PRESENT) || 517 !dd->kregbase) { 518 ret = -ENODEV; 519 goto bail; 520 } 521 522 dc = get_client(dd); 523 if (!dc) { 524 ret = -ENOMEM; 525 goto bail; 526 } 527 dc->next = dd->diag_client; 528 dd->diag_client = dc; 529 fp->private_data = dc; 530 ret = 0; 531 bail: 532 mutex_unlock(&qib_mutex); 533 534 return ret; 535 } 536 537 /** 538 * qib_diagpkt_write - write an IB packet 539 * @fp: the diag data device file pointer 540 * @data: qib_diag_pkt structure saying where to get the packet 541 * @count: size of data to write 542 * @off: unused by this code 543 */ 544 static ssize_t qib_diagpkt_write(struct file *fp, 545 const char __user *data, 546 size_t count, loff_t *off) 547 { 548 u32 __iomem *piobuf; 549 u32 plen, pbufn, maxlen_reserve; 550 struct qib_diag_xpkt dp; 551 u32 *tmpbuf = NULL; 552 struct qib_devdata *dd; 553 struct qib_pportdata *ppd; 554 ssize_t ret = 0; 555 556 if (count != sizeof(dp)) { 557 ret = -EINVAL; 558 goto bail; 559 } 560 if (copy_from_user(&dp, data, sizeof(dp))) { 561 ret = -EFAULT; 562 goto bail; 563 } 564 565 dd = qib_lookup(dp.unit); 566 if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) { 567 ret = -ENODEV; 568 goto bail; 569 } 570 if (!(dd->flags & QIB_INITTED)) { 571 /* no hardware, freeze, etc. */ 572 ret = -ENODEV; 573 goto bail; 574 } 575 576 if (dp.version != _DIAG_XPKT_VERS) { 577 qib_dev_err(dd, "Invalid version %u for diagpkt_write\n", 578 dp.version); 579 ret = -EINVAL; 580 goto bail; 581 } 582 /* send count must be an exact number of dwords */ 583 if (dp.len & 3) { 584 ret = -EINVAL; 585 goto bail; 586 } 587 if (!dp.port || dp.port > dd->num_pports) { 588 ret = -EINVAL; 589 goto bail; 590 } 591 ppd = &dd->pport[dp.port - 1]; 592 593 /* 594 * need total length before first word written, plus 2 Dwords. One Dword 595 * is for padding so we get the full user data when not aligned on 596 * a word boundary. The other Dword is to make sure we have room for the 597 * ICRC which gets tacked on later. 598 */ 599 maxlen_reserve = 2 * sizeof(u32); 600 if (dp.len > ppd->ibmaxlen - maxlen_reserve) { 601 ret = -EINVAL; 602 goto bail; 603 } 604 605 plen = sizeof(u32) + dp.len; 606 607 tmpbuf = vmalloc(plen); 608 if (!tmpbuf) { 609 qib_devinfo(dd->pcidev, 610 "Unable to allocate tmp buffer, failing\n"); 611 ret = -ENOMEM; 612 goto bail; 613 } 614 615 if (copy_from_user(tmpbuf, 616 (const void __user *) (unsigned long) dp.data, 617 dp.len)) { 618 ret = -EFAULT; 619 goto bail; 620 } 621 622 plen >>= 2; /* in dwords */ 623 624 if (dp.pbc_wd == 0) 625 dp.pbc_wd = plen; 626 627 piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn); 628 if (!piobuf) { 629 ret = -EBUSY; 630 goto bail; 631 } 632 /* disarm it just to be extra sure */ 633 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn)); 634 635 /* disable header check on pbufn for this packet */ 636 dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL); 637 638 writeq(dp.pbc_wd, piobuf); 639 /* 640 * Copy all but the trigger word, then flush, so it's written 641 * to chip before trigger word, then write trigger word, then 642 * flush again, so packet is sent. 643 */ 644 if (dd->flags & QIB_PIO_FLUSH_WC) { 645 qib_flush_wc(); 646 qib_pio_copy(piobuf + 2, tmpbuf, plen - 1); 647 qib_flush_wc(); 648 __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1); 649 } else 650 qib_pio_copy(piobuf + 2, tmpbuf, plen); 651 652 if (dd->flags & QIB_USE_SPCL_TRIG) { 653 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; 654 655 qib_flush_wc(); 656 __raw_writel(0xaebecede, piobuf + spcl_off); 657 } 658 659 /* 660 * Ensure buffer is written to the chip, then re-enable 661 * header checks (if supported by chip). The txchk 662 * code will ensure seen by chip before returning. 663 */ 664 qib_flush_wc(); 665 qib_sendbuf_done(dd, pbufn); 666 dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL); 667 668 ret = sizeof(dp); 669 670 bail: 671 vfree(tmpbuf); 672 return ret; 673 } 674 675 static int qib_diag_release(struct inode *in, struct file *fp) 676 { 677 mutex_lock(&qib_mutex); 678 return_client(fp->private_data); 679 fp->private_data = NULL; 680 mutex_unlock(&qib_mutex); 681 return 0; 682 } 683 684 /* 685 * Chip-specific code calls to register its interest in 686 * a specific range. 687 */ 688 struct diag_observer_list_elt { 689 struct diag_observer_list_elt *next; 690 const struct diag_observer *op; 691 }; 692 693 int qib_register_observer(struct qib_devdata *dd, 694 const struct diag_observer *op) 695 { 696 struct diag_observer_list_elt *olp; 697 unsigned long flags; 698 699 if (!dd || !op) 700 return -EINVAL; 701 olp = vmalloc(sizeof *olp); 702 if (!olp) { 703 pr_err("vmalloc for observer failed\n"); 704 return -ENOMEM; 705 } 706 707 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); 708 olp->op = op; 709 olp->next = dd->diag_observer_list; 710 dd->diag_observer_list = olp; 711 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); 712 713 return 0; 714 } 715 716 /* Remove all registered observers when device is closed */ 717 static void qib_unregister_observers(struct qib_devdata *dd) 718 { 719 struct diag_observer_list_elt *olp; 720 unsigned long flags; 721 722 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); 723 olp = dd->diag_observer_list; 724 while (olp) { 725 /* Pop one observer, let go of lock */ 726 dd->diag_observer_list = olp->next; 727 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); 728 vfree(olp); 729 /* try again. */ 730 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); 731 olp = dd->diag_observer_list; 732 } 733 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); 734 } 735 736 /* 737 * Find the observer, if any, for the specified address. Initial implementation 738 * is simple stack of observers. This must be called with diag transaction 739 * lock held. 740 */ 741 static const struct diag_observer *diag_get_observer(struct qib_devdata *dd, 742 u32 addr) 743 { 744 struct diag_observer_list_elt *olp; 745 const struct diag_observer *op = NULL; 746 747 olp = dd->diag_observer_list; 748 while (olp) { 749 op = olp->op; 750 if (addr >= op->bottom && addr <= op->top) 751 break; 752 olp = olp->next; 753 } 754 if (!olp) 755 op = NULL; 756 757 return op; 758 } 759 760 static ssize_t qib_diag_read(struct file *fp, char __user *data, 761 size_t count, loff_t *off) 762 { 763 struct qib_diag_client *dc = fp->private_data; 764 struct qib_devdata *dd = dc->dd; 765 void __iomem *kreg_base; 766 ssize_t ret; 767 768 if (dc->pid != current->pid) { 769 ret = -EPERM; 770 goto bail; 771 } 772 773 kreg_base = dd->kregbase; 774 775 if (count == 0) 776 ret = 0; 777 else if ((count % 4) || (*off % 4)) 778 /* address or length is not 32-bit aligned, hence invalid */ 779 ret = -EINVAL; 780 else if (dc->state < READY && (*off || count != 8)) 781 ret = -EINVAL; /* prevent cat /dev/qib_diag* */ 782 else { 783 unsigned long flags; 784 u64 data64 = 0; 785 int use_32; 786 const struct diag_observer *op; 787 788 use_32 = (count % 8) || (*off % 8); 789 ret = -1; 790 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); 791 /* 792 * Check for observer on this address range. 793 * we only support a single 32 or 64-bit read 794 * via observer, currently. 795 */ 796 op = diag_get_observer(dd, *off); 797 if (op) { 798 u32 offset = *off; 799 ret = op->hook(dd, op, offset, &data64, 0, use_32); 800 } 801 /* 802 * We need to release lock before any copy_to_user(), 803 * whether implicit in qib_read_umem* or explicit below. 804 */ 805 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); 806 if (!op) { 807 if (use_32) 808 /* 809 * Address or length is not 64-bit aligned; 810 * do 32-bit rd 811 */ 812 ret = qib_read_umem32(dd, data, (u32) *off, 813 count); 814 else 815 ret = qib_read_umem64(dd, data, (u32) *off, 816 count); 817 } else if (ret == count) { 818 /* Below finishes case where observer existed */ 819 ret = copy_to_user(data, &data64, use_32 ? 820 sizeof(u32) : sizeof(u64)); 821 if (ret) 822 ret = -EFAULT; 823 } 824 } 825 826 if (ret >= 0) { 827 *off += count; 828 ret = count; 829 if (dc->state == OPENED) 830 dc->state = INIT; 831 } 832 bail: 833 return ret; 834 } 835 836 static ssize_t qib_diag_write(struct file *fp, const char __user *data, 837 size_t count, loff_t *off) 838 { 839 struct qib_diag_client *dc = fp->private_data; 840 struct qib_devdata *dd = dc->dd; 841 void __iomem *kreg_base; 842 ssize_t ret; 843 844 if (dc->pid != current->pid) { 845 ret = -EPERM; 846 goto bail; 847 } 848 849 kreg_base = dd->kregbase; 850 851 if (count == 0) 852 ret = 0; 853 else if ((count % 4) || (*off % 4)) 854 /* address or length is not 32-bit aligned, hence invalid */ 855 ret = -EINVAL; 856 else if (dc->state < READY && 857 ((*off || count != 8) || dc->state != INIT)) 858 /* No writes except second-step of init seq */ 859 ret = -EINVAL; /* before any other write allowed */ 860 else { 861 unsigned long flags; 862 const struct diag_observer *op = NULL; 863 int use_32 = (count % 8) || (*off % 8); 864 865 /* 866 * Check for observer on this address range. 867 * We only support a single 32 or 64-bit write 868 * via observer, currently. This helps, because 869 * we would otherwise have to jump through hoops 870 * to make "diag transaction" meaningful when we 871 * cannot do a copy_from_user while holding the lock. 872 */ 873 if (count == 4 || count == 8) { 874 u64 data64; 875 u32 offset = *off; 876 ret = copy_from_user(&data64, data, count); 877 if (ret) { 878 ret = -EFAULT; 879 goto bail; 880 } 881 spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); 882 op = diag_get_observer(dd, *off); 883 if (op) 884 ret = op->hook(dd, op, offset, &data64, ~0Ull, 885 use_32); 886 spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); 887 } 888 889 if (!op) { 890 if (use_32) 891 /* 892 * Address or length is not 64-bit aligned; 893 * do 32-bit write 894 */ 895 ret = qib_write_umem32(dd, (u32) *off, data, 896 count); 897 else 898 ret = qib_write_umem64(dd, (u32) *off, data, 899 count); 900 } 901 } 902 903 if (ret >= 0) { 904 *off += count; 905 ret = count; 906 if (dc->state == INIT) 907 dc->state = READY; /* all read/write OK now */ 908 } 909 bail: 910 return ret; 911 } 912