1 /* 2 * Copyright (c) 2012 Intel Corporation. All rights reserved. 3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. 4 * Copyright (c) 2006 PathScale, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 #include <linux/ctype.h> 35 #include <rdma/ib_sysfs.h> 36 37 #include "qib.h" 38 #include "qib_mad.h" 39 40 static struct qib_pportdata *qib_get_pportdata_kobj(struct kobject *kobj) 41 { 42 u32 port_num; 43 struct ib_device *ibdev = ib_port_sysfs_get_ibdev_kobj(kobj, &port_num); 44 struct qib_devdata *dd = dd_from_ibdev(ibdev); 45 46 return &dd->pport[port_num - 1]; 47 } 48 49 /* 50 * Get/Set heartbeat enable. OR of 1=enabled, 2=auto 51 */ 52 static ssize_t hrtbt_enable_show(struct ib_device *ibdev, u32 port_num, 53 struct ib_port_attribute *attr, char *buf) 54 { 55 struct qib_devdata *dd = dd_from_ibdev(ibdev); 56 struct qib_pportdata *ppd = &dd->pport[port_num - 1]; 57 58 return sysfs_emit(buf, "%d\n", dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT)); 59 } 60 61 static ssize_t hrtbt_enable_store(struct ib_device *ibdev, u32 port_num, 62 struct ib_port_attribute *attr, 63 const char *buf, size_t count) 64 { 65 struct qib_devdata *dd = dd_from_ibdev(ibdev); 66 struct qib_pportdata *ppd = &dd->pport[port_num - 1]; 67 int ret; 68 u16 val; 69 70 ret = kstrtou16(buf, 0, &val); 71 if (ret) { 72 qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n"); 73 return ret; 74 } 75 76 /* 77 * Set the "intentional" heartbeat enable per either of 78 * "Enable" and "Auto", as these are normally set together. 79 * This bit is consulted when leaving loopback mode, 80 * because entering loopback mode overrides it and automatically 81 * disables heartbeat. 82 */ 83 ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val); 84 return ret < 0 ? ret : count; 85 } 86 static IB_PORT_ATTR_RW(hrtbt_enable); 87 88 static ssize_t loopback_store(struct ib_device *ibdev, u32 port_num, 89 struct ib_port_attribute *attr, const char *buf, 90 size_t count) 91 { 92 struct qib_devdata *dd = dd_from_ibdev(ibdev); 93 struct qib_pportdata *ppd = &dd->pport[port_num - 1]; 94 int ret = count, r; 95 96 r = dd->f_set_ib_loopback(ppd, buf); 97 if (r < 0) 98 ret = r; 99 100 return ret; 101 } 102 static IB_PORT_ATTR_WO(loopback); 103 104 static ssize_t led_override_store(struct ib_device *ibdev, u32 port_num, 105 struct ib_port_attribute *attr, 106 const char *buf, size_t count) 107 { 108 struct qib_devdata *dd = dd_from_ibdev(ibdev); 109 struct qib_pportdata *ppd = &dd->pport[port_num - 1]; 110 int ret; 111 u16 val; 112 113 ret = kstrtou16(buf, 0, &val); 114 if (ret) { 115 qib_dev_err(dd, "attempt to set invalid LED override\n"); 116 return ret; 117 } 118 119 qib_set_led_override(ppd, val); 120 return count; 121 } 122 static IB_PORT_ATTR_WO(led_override); 123 124 static ssize_t status_show(struct ib_device *ibdev, u32 port_num, 125 struct ib_port_attribute *attr, char *buf) 126 { 127 struct qib_devdata *dd = dd_from_ibdev(ibdev); 128 struct qib_pportdata *ppd = &dd->pport[port_num - 1]; 129 130 if (!ppd->statusp) 131 return -EINVAL; 132 133 return sysfs_emit(buf, "0x%llx\n", (unsigned long long)*(ppd->statusp)); 134 } 135 static IB_PORT_ATTR_RO(status); 136 137 /* 138 * For userland compatibility, these offsets must remain fixed. 139 * They are strings for QIB_STATUS_* 140 */ 141 static const char * const qib_status_str[] = { 142 "Initted", 143 "", 144 "", 145 "", 146 "", 147 "Present", 148 "IB_link_up", 149 "IB_configured", 150 "", 151 "Fatal_Hardware_Error", 152 NULL, 153 }; 154 155 static ssize_t status_str_show(struct ib_device *ibdev, u32 port_num, 156 struct ib_port_attribute *attr, char *buf) 157 { 158 struct qib_devdata *dd = dd_from_ibdev(ibdev); 159 struct qib_pportdata *ppd = &dd->pport[port_num - 1]; 160 int i, any; 161 u64 s; 162 ssize_t ret; 163 164 if (!ppd->statusp) { 165 ret = -EINVAL; 166 goto bail; 167 } 168 169 s = *(ppd->statusp); 170 *buf = '\0'; 171 for (any = i = 0; s && qib_status_str[i]; i++) { 172 if (s & 1) { 173 /* if overflow */ 174 if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) 175 break; 176 if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >= 177 PAGE_SIZE) 178 break; 179 any = 1; 180 } 181 s >>= 1; 182 } 183 if (any) 184 strlcat(buf, "\n", PAGE_SIZE); 185 186 ret = strlen(buf); 187 188 bail: 189 return ret; 190 } 191 static IB_PORT_ATTR_RO(status_str); 192 193 /* end of per-port functions */ 194 195 static struct attribute *port_linkcontrol_attributes[] = { 196 &ib_port_attr_loopback.attr, 197 &ib_port_attr_led_override.attr, 198 &ib_port_attr_hrtbt_enable.attr, 199 &ib_port_attr_status.attr, 200 &ib_port_attr_status_str.attr, 201 NULL 202 }; 203 204 static const struct attribute_group port_linkcontrol_group = { 205 .name = "linkcontrol", 206 .attrs = port_linkcontrol_attributes, 207 }; 208 209 /* 210 * Start of per-port congestion control structures and support code 211 */ 212 213 /* 214 * Congestion control table size followed by table entries 215 */ 216 static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj, 217 struct bin_attribute *bin_attr, char *buf, 218 loff_t pos, size_t count) 219 { 220 struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj); 221 int ret; 222 223 if (!qib_cc_table_size || !ppd->ccti_entries_shadow) 224 return -EINVAL; 225 226 ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow) 227 + sizeof(__be16); 228 229 if (pos > ret) 230 return -EINVAL; 231 232 if (count > ret - pos) 233 count = ret - pos; 234 235 if (!count) 236 return count; 237 238 spin_lock(&ppd->cc_shadow_lock); 239 memcpy(buf, ppd->ccti_entries_shadow, count); 240 spin_unlock(&ppd->cc_shadow_lock); 241 242 return count; 243 } 244 static BIN_ATTR_RO(cc_table_bin, PAGE_SIZE); 245 246 /* 247 * Congestion settings: port control, control map and an array of 16 248 * entries for the congestion entries - increase, timer, event log 249 * trigger threshold and the minimum injection rate delay. 250 */ 251 static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj, 252 struct bin_attribute *bin_attr, char *buf, 253 loff_t pos, size_t count) 254 { 255 struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj); 256 int ret; 257 258 if (!qib_cc_table_size || !ppd->congestion_entries_shadow) 259 return -EINVAL; 260 261 ret = sizeof(struct ib_cc_congestion_setting_attr_shadow); 262 263 if (pos > ret) 264 return -EINVAL; 265 if (count > ret - pos) 266 count = ret - pos; 267 268 if (!count) 269 return count; 270 271 spin_lock(&ppd->cc_shadow_lock); 272 memcpy(buf, ppd->congestion_entries_shadow, count); 273 spin_unlock(&ppd->cc_shadow_lock); 274 275 return count; 276 } 277 static BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE); 278 279 static struct bin_attribute *port_ccmgta_attributes[] = { 280 &bin_attr_cc_setting_bin, 281 &bin_attr_cc_table_bin, 282 NULL, 283 }; 284 285 static umode_t qib_ccmgta_is_bin_visible(struct kobject *kobj, 286 const struct bin_attribute *attr, int n) 287 { 288 struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj); 289 290 if (!qib_cc_table_size || !ppd->congestion_entries_shadow) 291 return 0; 292 return attr->attr.mode; 293 } 294 295 static const struct attribute_group port_ccmgta_attribute_group = { 296 .name = "CCMgtA", 297 .is_bin_visible = qib_ccmgta_is_bin_visible, 298 .bin_attrs = port_ccmgta_attributes, 299 }; 300 301 /* Start sl2vl */ 302 303 struct qib_sl2vl_attr { 304 struct ib_port_attribute attr; 305 int sl; 306 }; 307 308 static ssize_t sl2vl_attr_show(struct ib_device *ibdev, u32 port_num, 309 struct ib_port_attribute *attr, char *buf) 310 { 311 struct qib_sl2vl_attr *sattr = 312 container_of(attr, struct qib_sl2vl_attr, attr); 313 struct qib_devdata *dd = dd_from_ibdev(ibdev); 314 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data; 315 316 return sysfs_emit(buf, "%u\n", qibp->sl_to_vl[sattr->sl]); 317 } 318 319 #define QIB_SL2VL_ATTR(N) \ 320 static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \ 321 .attr = __ATTR(N, 0444, sl2vl_attr_show, NULL), \ 322 .sl = N, \ 323 } 324 325 QIB_SL2VL_ATTR(0); 326 QIB_SL2VL_ATTR(1); 327 QIB_SL2VL_ATTR(2); 328 QIB_SL2VL_ATTR(3); 329 QIB_SL2VL_ATTR(4); 330 QIB_SL2VL_ATTR(5); 331 QIB_SL2VL_ATTR(6); 332 QIB_SL2VL_ATTR(7); 333 QIB_SL2VL_ATTR(8); 334 QIB_SL2VL_ATTR(9); 335 QIB_SL2VL_ATTR(10); 336 QIB_SL2VL_ATTR(11); 337 QIB_SL2VL_ATTR(12); 338 QIB_SL2VL_ATTR(13); 339 QIB_SL2VL_ATTR(14); 340 QIB_SL2VL_ATTR(15); 341 342 static struct attribute *port_sl2vl_attributes[] = { 343 &qib_sl2vl_attr_0.attr.attr, 344 &qib_sl2vl_attr_1.attr.attr, 345 &qib_sl2vl_attr_2.attr.attr, 346 &qib_sl2vl_attr_3.attr.attr, 347 &qib_sl2vl_attr_4.attr.attr, 348 &qib_sl2vl_attr_5.attr.attr, 349 &qib_sl2vl_attr_6.attr.attr, 350 &qib_sl2vl_attr_7.attr.attr, 351 &qib_sl2vl_attr_8.attr.attr, 352 &qib_sl2vl_attr_9.attr.attr, 353 &qib_sl2vl_attr_10.attr.attr, 354 &qib_sl2vl_attr_11.attr.attr, 355 &qib_sl2vl_attr_12.attr.attr, 356 &qib_sl2vl_attr_13.attr.attr, 357 &qib_sl2vl_attr_14.attr.attr, 358 &qib_sl2vl_attr_15.attr.attr, 359 NULL 360 }; 361 362 static const struct attribute_group port_sl2vl_group = { 363 .name = "sl2vl", 364 .attrs = port_sl2vl_attributes, 365 }; 366 367 /* End sl2vl */ 368 369 /* Start diag_counters */ 370 371 struct qib_diagc_attr { 372 struct ib_port_attribute attr; 373 size_t counter; 374 }; 375 376 static ssize_t diagc_attr_show(struct ib_device *ibdev, u32 port_num, 377 struct ib_port_attribute *attr, char *buf) 378 { 379 struct qib_diagc_attr *dattr = 380 container_of(attr, struct qib_diagc_attr, attr); 381 struct qib_devdata *dd = dd_from_ibdev(ibdev); 382 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data; 383 384 return sysfs_emit(buf, "%llu\n", *((u64 *)qibp + dattr->counter)); 385 } 386 387 static ssize_t diagc_attr_store(struct ib_device *ibdev, u32 port_num, 388 struct ib_port_attribute *attr, const char *buf, 389 size_t count) 390 { 391 struct qib_diagc_attr *dattr = 392 container_of(attr, struct qib_diagc_attr, attr); 393 struct qib_devdata *dd = dd_from_ibdev(ibdev); 394 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data; 395 u64 val; 396 int ret; 397 398 ret = kstrtou64(buf, 0, &val); 399 if (ret) 400 return ret; 401 *((u64 *)qibp + dattr->counter) = val; 402 return count; 403 } 404 405 #define QIB_DIAGC_ATTR(N) \ 406 static_assert(__same_type(((struct qib_ibport *)0)->rvp.n_##N, u64)); \ 407 static struct qib_diagc_attr qib_diagc_attr_##N = { \ 408 .attr = __ATTR(N, 0664, diagc_attr_show, diagc_attr_store), \ 409 .counter = \ 410 offsetof(struct qib_ibport, rvp.n_##N) / sizeof(u64) \ 411 } 412 413 QIB_DIAGC_ATTR(rc_resends); 414 QIB_DIAGC_ATTR(seq_naks); 415 QIB_DIAGC_ATTR(rdma_seq); 416 QIB_DIAGC_ATTR(rnr_naks); 417 QIB_DIAGC_ATTR(other_naks); 418 QIB_DIAGC_ATTR(rc_timeouts); 419 QIB_DIAGC_ATTR(loop_pkts); 420 QIB_DIAGC_ATTR(pkt_drops); 421 QIB_DIAGC_ATTR(dmawait); 422 QIB_DIAGC_ATTR(unaligned); 423 QIB_DIAGC_ATTR(rc_dupreq); 424 QIB_DIAGC_ATTR(rc_seqnak); 425 QIB_DIAGC_ATTR(rc_crwaits); 426 427 static u64 get_all_cpu_total(u64 __percpu *cntr) 428 { 429 int cpu; 430 u64 counter = 0; 431 432 for_each_possible_cpu(cpu) 433 counter += *per_cpu_ptr(cntr, cpu); 434 return counter; 435 } 436 437 static ssize_t qib_store_per_cpu(struct qib_devdata *dd, const char *buf, 438 size_t count, u64 *zero, u64 cur) 439 { 440 u32 val; 441 int ret; 442 443 ret = kstrtou32(buf, 0, &val); 444 if (ret) 445 return ret; 446 if (val != 0) { 447 qib_dev_err(dd, "Per CPU cntrs can only be zeroed"); 448 return count; 449 } 450 *zero = cur; 451 return count; 452 } 453 454 static ssize_t rc_acks_show(struct ib_device *ibdev, u32 port_num, 455 struct ib_port_attribute *attr, char *buf) 456 { 457 struct qib_devdata *dd = dd_from_ibdev(ibdev); 458 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data; 459 460 return sysfs_emit(buf, "%llu\n", 461 get_all_cpu_total(qibp->rvp.rc_acks) - 462 qibp->rvp.z_rc_acks); 463 } 464 465 static ssize_t rc_acks_store(struct ib_device *ibdev, u32 port_num, 466 struct ib_port_attribute *attr, const char *buf, 467 size_t count) 468 { 469 struct qib_devdata *dd = dd_from_ibdev(ibdev); 470 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data; 471 472 return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_acks, 473 get_all_cpu_total(qibp->rvp.rc_acks)); 474 } 475 static IB_PORT_ATTR_RW(rc_acks); 476 477 static ssize_t rc_qacks_show(struct ib_device *ibdev, u32 port_num, 478 struct ib_port_attribute *attr, char *buf) 479 { 480 struct qib_devdata *dd = dd_from_ibdev(ibdev); 481 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data; 482 483 return sysfs_emit(buf, "%llu\n", 484 get_all_cpu_total(qibp->rvp.rc_qacks) - 485 qibp->rvp.z_rc_qacks); 486 } 487 488 static ssize_t rc_qacks_store(struct ib_device *ibdev, u32 port_num, 489 struct ib_port_attribute *attr, const char *buf, 490 size_t count) 491 { 492 struct qib_devdata *dd = dd_from_ibdev(ibdev); 493 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data; 494 495 return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_qacks, 496 get_all_cpu_total(qibp->rvp.rc_qacks)); 497 } 498 static IB_PORT_ATTR_RW(rc_qacks); 499 500 static ssize_t rc_delayed_comp_show(struct ib_device *ibdev, u32 port_num, 501 struct ib_port_attribute *attr, char *buf) 502 { 503 struct qib_devdata *dd = dd_from_ibdev(ibdev); 504 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data; 505 506 return sysfs_emit(buf, "%llu\n", 507 get_all_cpu_total(qibp->rvp.rc_delayed_comp) - 508 qibp->rvp.z_rc_delayed_comp); 509 } 510 511 static ssize_t rc_delayed_comp_store(struct ib_device *ibdev, u32 port_num, 512 struct ib_port_attribute *attr, 513 const char *buf, size_t count) 514 { 515 struct qib_devdata *dd = dd_from_ibdev(ibdev); 516 struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data; 517 518 return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_delayed_comp, 519 get_all_cpu_total(qibp->rvp.rc_delayed_comp)); 520 } 521 static IB_PORT_ATTR_RW(rc_delayed_comp); 522 523 static struct attribute *port_diagc_attributes[] = { 524 &qib_diagc_attr_rc_resends.attr.attr, 525 &qib_diagc_attr_seq_naks.attr.attr, 526 &qib_diagc_attr_rdma_seq.attr.attr, 527 &qib_diagc_attr_rnr_naks.attr.attr, 528 &qib_diagc_attr_other_naks.attr.attr, 529 &qib_diagc_attr_rc_timeouts.attr.attr, 530 &qib_diagc_attr_loop_pkts.attr.attr, 531 &qib_diagc_attr_pkt_drops.attr.attr, 532 &qib_diagc_attr_dmawait.attr.attr, 533 &qib_diagc_attr_unaligned.attr.attr, 534 &qib_diagc_attr_rc_dupreq.attr.attr, 535 &qib_diagc_attr_rc_seqnak.attr.attr, 536 &qib_diagc_attr_rc_crwaits.attr.attr, 537 &ib_port_attr_rc_acks.attr, 538 &ib_port_attr_rc_qacks.attr, 539 &ib_port_attr_rc_delayed_comp.attr, 540 NULL 541 }; 542 543 static const struct attribute_group port_diagc_group = { 544 .name = "diag_counters", 545 .attrs = port_diagc_attributes, 546 }; 547 548 /* End diag_counters */ 549 550 const struct attribute_group *qib_attr_port_groups[] = { 551 &port_linkcontrol_group, 552 &port_ccmgta_attribute_group, 553 &port_sl2vl_group, 554 &port_diagc_group, 555 NULL, 556 }; 557 558 /* end of per-port file structures and support code */ 559 560 /* 561 * Start of per-unit (or driver, in some cases, but replicated 562 * per unit) functions (these get a device *) 563 */ 564 static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, 565 char *buf) 566 { 567 struct qib_ibdev *dev = 568 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev); 569 570 return sysfs_emit(buf, "%x\n", dd_from_dev(dev)->minrev); 571 } 572 static DEVICE_ATTR_RO(hw_rev); 573 574 static ssize_t hca_type_show(struct device *device, 575 struct device_attribute *attr, char *buf) 576 { 577 struct qib_ibdev *dev = 578 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev); 579 struct qib_devdata *dd = dd_from_dev(dev); 580 581 if (!dd->boardname) 582 return -EINVAL; 583 return sysfs_emit(buf, "%s\n", dd->boardname); 584 } 585 static DEVICE_ATTR_RO(hca_type); 586 static DEVICE_ATTR(board_id, 0444, hca_type_show, NULL); 587 588 static DEVICE_STRING_ATTR_RO(version, 0444, QIB_DRIVER_VERSION); 589 590 static ssize_t boardversion_show(struct device *device, 591 struct device_attribute *attr, char *buf) 592 { 593 struct qib_ibdev *dev = 594 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev); 595 struct qib_devdata *dd = dd_from_dev(dev); 596 597 /* The string printed here is already newline-terminated. */ 598 return sysfs_emit(buf, "%s", dd->boardversion); 599 } 600 static DEVICE_ATTR_RO(boardversion); 601 602 static ssize_t localbus_info_show(struct device *device, 603 struct device_attribute *attr, char *buf) 604 { 605 struct qib_ibdev *dev = 606 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev); 607 struct qib_devdata *dd = dd_from_dev(dev); 608 609 /* The string printed here is already newline-terminated. */ 610 return sysfs_emit(buf, "%s", dd->lbus_info); 611 } 612 static DEVICE_ATTR_RO(localbus_info); 613 614 static ssize_t nctxts_show(struct device *device, 615 struct device_attribute *attr, char *buf) 616 { 617 struct qib_ibdev *dev = 618 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev); 619 struct qib_devdata *dd = dd_from_dev(dev); 620 621 /* Return the number of user ports (contexts) available. */ 622 /* The calculation below deals with a special case where 623 * cfgctxts is set to 1 on a single-port board. */ 624 return sysfs_emit(buf, "%u\n", 625 (dd->first_user_ctxt > dd->cfgctxts) ? 626 0 : 627 (dd->cfgctxts - dd->first_user_ctxt)); 628 } 629 static DEVICE_ATTR_RO(nctxts); 630 631 static ssize_t nfreectxts_show(struct device *device, 632 struct device_attribute *attr, char *buf) 633 { 634 struct qib_ibdev *dev = 635 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev); 636 struct qib_devdata *dd = dd_from_dev(dev); 637 638 /* Return the number of free user ports (contexts) available. */ 639 return sysfs_emit(buf, "%u\n", dd->freectxts); 640 } 641 static DEVICE_ATTR_RO(nfreectxts); 642 643 static ssize_t serial_show(struct device *device, struct device_attribute *attr, 644 char *buf) 645 { 646 struct qib_ibdev *dev = 647 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev); 648 struct qib_devdata *dd = dd_from_dev(dev); 649 const u8 *end = memchr(dd->serial, 0, ARRAY_SIZE(dd->serial)); 650 int size = end ? end - dd->serial : ARRAY_SIZE(dd->serial); 651 652 return sysfs_emit(buf, ".%*s\n", size, dd->serial); 653 } 654 static DEVICE_ATTR_RO(serial); 655 656 static ssize_t chip_reset_store(struct device *device, 657 struct device_attribute *attr, const char *buf, 658 size_t count) 659 { 660 struct qib_ibdev *dev = 661 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev); 662 struct qib_devdata *dd = dd_from_dev(dev); 663 int ret; 664 665 if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) { 666 ret = -EINVAL; 667 goto bail; 668 } 669 670 ret = qib_reset_device(dd->unit); 671 bail: 672 return ret < 0 ? ret : count; 673 } 674 static DEVICE_ATTR_WO(chip_reset); 675 676 /* 677 * Dump tempsense regs. in decimal, to ease shell-scripts. 678 */ 679 static ssize_t tempsense_show(struct device *device, 680 struct device_attribute *attr, char *buf) 681 { 682 struct qib_ibdev *dev = 683 rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev); 684 struct qib_devdata *dd = dd_from_dev(dev); 685 int i; 686 u8 regvals[8]; 687 688 for (i = 0; i < 8; i++) { 689 int ret; 690 691 if (i == 6) 692 continue; 693 ret = dd->f_tempsense_rd(dd, i); 694 if (ret < 0) 695 return ret; /* return error on bad read */ 696 regvals[i] = ret; 697 } 698 return sysfs_emit(buf, "%d %d %02X %02X %d %d\n", 699 (signed char)regvals[0], 700 (signed char)regvals[1], 701 regvals[2], 702 regvals[3], 703 (signed char)regvals[5], 704 (signed char)regvals[7]); 705 } 706 static DEVICE_ATTR_RO(tempsense); 707 708 /* 709 * end of per-unit (or driver, in some cases, but replicated 710 * per unit) functions 711 */ 712 713 /* start of per-unit file structures and support code */ 714 static struct attribute *qib_attributes[] = { 715 &dev_attr_hw_rev.attr, 716 &dev_attr_hca_type.attr, 717 &dev_attr_board_id.attr, 718 &dev_attr_version.attr.attr, 719 &dev_attr_nctxts.attr, 720 &dev_attr_nfreectxts.attr, 721 &dev_attr_serial.attr, 722 &dev_attr_boardversion.attr, 723 &dev_attr_tempsense.attr, 724 &dev_attr_localbus_info.attr, 725 &dev_attr_chip_reset.attr, 726 NULL, 727 }; 728 729 const struct attribute_group qib_attr_group = { 730 .attrs = qib_attributes, 731 }; 732