1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright(c) 2015-2020 Intel Corporation. 4 * Copyright(c) 2021 Cornelis Networks. 5 */ 6 7 #include <linux/spinlock.h> 8 #include <linux/pci.h> 9 #include <linux/io.h> 10 #include <linux/delay.h> 11 #include <linux/netdevice.h> 12 #include <linux/vmalloc.h> 13 #include <linux/module.h> 14 #include <linux/prefetch.h> 15 #include <rdma/ib_verbs.h> 16 #include <linux/etherdevice.h> 17 18 #include "hfi.h" 19 #include "trace.h" 20 #include "qp.h" 21 #include "sdma.h" 22 #include "debugfs.h" 23 #include "fault.h" 24 25 #include "ipoib.h" 26 #include "netdev.h" 27 28 #undef pr_fmt 29 #define pr_fmt(fmt) DRIVER_NAME ": " fmt 30 31 DEFINE_MUTEX(hfi1_mutex); /* general driver use */ 32 33 unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU; 34 module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO); 35 MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is " __stringify( 36 HFI1_DEFAULT_MAX_MTU)); 37 38 unsigned int hfi1_cu = 1; 39 module_param_named(cu, hfi1_cu, uint, S_IRUGO); 40 MODULE_PARM_DESC(cu, "Credit return units"); 41 42 unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT; 43 static int hfi1_caps_set(const char *val, const struct kernel_param *kp); 44 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp); 45 static const struct kernel_param_ops cap_ops = { 46 .set = hfi1_caps_set, 47 .get = hfi1_caps_get 48 }; 49 module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO); 50 MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features"); 51 52 MODULE_LICENSE("Dual BSD/GPL"); 53 MODULE_DESCRIPTION("Cornelis Omni-Path Express driver"); 54 55 /* 56 * MAX_PKT_RCV is the max # if packets processed per receive interrupt. 57 */ 58 #define MAX_PKT_RECV 64 59 /* 60 * MAX_PKT_THREAD_RCV is the max # of packets processed before 61 * the qp_wait_list queue is flushed. 62 */ 63 #define MAX_PKT_RECV_THREAD (MAX_PKT_RECV * 4) 64 #define EGR_HEAD_UPDATE_THRESHOLD 16 65 66 struct hfi1_ib_stats hfi1_stats; 67 68 static int hfi1_caps_set(const char *val, const struct kernel_param *kp) 69 { 70 int ret = 0; 71 unsigned long *cap_mask_ptr = (unsigned long *)kp->arg, 72 cap_mask = *cap_mask_ptr, value, diff, 73 write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) | 74 HFI1_CAP_WRITABLE_MASK); 75 76 ret = kstrtoul(val, 0, &value); 77 if (ret) { 78 pr_warn("Invalid module parameter value for 'cap_mask'\n"); 79 goto done; 80 } 81 /* Get the changed bits (except the locked bit) */ 82 diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK); 83 84 /* Remove any bits that are not allowed to change after driver load */ 85 if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) { 86 pr_warn("Ignoring non-writable capability bits %#lx\n", 87 diff & ~write_mask); 88 diff &= write_mask; 89 } 90 91 /* Mask off any reserved bits */ 92 diff &= ~HFI1_CAP_RESERVED_MASK; 93 /* Clear any previously set and changing bits */ 94 cap_mask &= ~diff; 95 /* Update the bits with the new capability */ 96 cap_mask |= (value & diff); 97 /* Check for any kernel/user restrictions */ 98 diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^ 99 ((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT); 100 cap_mask &= ~diff; 101 /* Set the bitmask to the final set */ 102 *cap_mask_ptr = cap_mask; 103 done: 104 return ret; 105 } 106 107 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp) 108 { 109 unsigned long cap_mask = *(unsigned long *)kp->arg; 110 111 cap_mask &= ~HFI1_CAP_LOCKED_SMASK; 112 cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT); 113 114 return sysfs_emit(buffer, "0x%lx\n", cap_mask); 115 } 116 117 struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi) 118 { 119 struct hfi1_ibdev *ibdev = container_of(rdi, struct hfi1_ibdev, rdi); 120 struct hfi1_devdata *dd = container_of(ibdev, 121 struct hfi1_devdata, verbs_dev); 122 return dd->pcidev; 123 } 124 125 /* 126 * Return count of units with at least one port ACTIVE. 127 */ 128 int hfi1_count_active_units(void) 129 { 130 struct hfi1_devdata *dd; 131 struct hfi1_pportdata *ppd; 132 unsigned long index, flags; 133 int pidx, nunits_active = 0; 134 135 xa_lock_irqsave(&hfi1_dev_table, flags); 136 xa_for_each(&hfi1_dev_table, index, dd) { 137 if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase1) 138 continue; 139 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 140 ppd = dd->pport + pidx; 141 if (ppd->lid && ppd->linkup) { 142 nunits_active++; 143 break; 144 } 145 } 146 } 147 xa_unlock_irqrestore(&hfi1_dev_table, flags); 148 return nunits_active; 149 } 150 151 /* 152 * Get address of eager buffer from it's index (allocated in chunks, not 153 * contiguous). 154 */ 155 static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf, 156 u8 *update) 157 { 158 u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf); 159 160 *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset; 161 return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) + 162 (offset * RCV_BUF_BLOCK_SIZE)); 163 } 164 165 static inline void *hfi1_get_header(struct hfi1_ctxtdata *rcd, 166 __le32 *rhf_addr) 167 { 168 u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr)); 169 170 return (void *)(rhf_addr - rcd->rhf_offset + offset); 171 } 172 173 static inline struct ib_header *hfi1_get_msgheader(struct hfi1_ctxtdata *rcd, 174 __le32 *rhf_addr) 175 { 176 return (struct ib_header *)hfi1_get_header(rcd, rhf_addr); 177 } 178 179 static inline struct hfi1_16b_header 180 *hfi1_get_16B_header(struct hfi1_ctxtdata *rcd, 181 __le32 *rhf_addr) 182 { 183 return (struct hfi1_16b_header *)hfi1_get_header(rcd, rhf_addr); 184 } 185 186 /* 187 * Validate and encode the a given RcvArray Buffer size. 188 * The function will check whether the given size falls within 189 * allowed size ranges for the respective type and, optionally, 190 * return the proper encoding. 191 */ 192 int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded) 193 { 194 if (unlikely(!PAGE_ALIGNED(size))) 195 return 0; 196 if (unlikely(size < MIN_EAGER_BUFFER)) 197 return 0; 198 if (size > 199 (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER)) 200 return 0; 201 if (encoded) 202 *encoded = ilog2(size / PAGE_SIZE) + 1; 203 return 1; 204 } 205 206 static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd, 207 struct hfi1_packet *packet) 208 { 209 struct ib_header *rhdr = packet->hdr; 210 u32 rte = rhf_rcv_type_err(packet->rhf); 211 u32 mlid_base; 212 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 213 struct hfi1_devdata *dd = ppd->dd; 214 struct hfi1_ibdev *verbs_dev = &dd->verbs_dev; 215 struct rvt_dev_info *rdi = &verbs_dev->rdi; 216 217 if ((packet->rhf & RHF_DC_ERR) && 218 hfi1_dbg_fault_suppress_err(verbs_dev)) 219 return; 220 221 if (packet->rhf & RHF_ICRC_ERR) 222 return; 223 224 if (packet->etype == RHF_RCV_TYPE_BYPASS) { 225 goto drop; 226 } else { 227 u8 lnh = ib_get_lnh(rhdr); 228 229 mlid_base = be16_to_cpu(IB_MULTICAST_LID_BASE); 230 if (lnh == HFI1_LRH_BTH) { 231 packet->ohdr = &rhdr->u.oth; 232 } else if (lnh == HFI1_LRH_GRH) { 233 packet->ohdr = &rhdr->u.l.oth; 234 packet->grh = &rhdr->u.l.grh; 235 } else { 236 goto drop; 237 } 238 } 239 240 if (packet->rhf & RHF_TID_ERR) { 241 /* For TIDERR and RC QPs preemptively schedule a NAK */ 242 u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */ 243 u32 dlid = ib_get_dlid(rhdr); 244 u32 qp_num; 245 246 /* Sanity check packet */ 247 if (tlen < 24) 248 goto drop; 249 250 /* Check for GRH */ 251 if (packet->grh) { 252 u32 vtf; 253 struct ib_grh *grh = packet->grh; 254 255 if (grh->next_hdr != IB_GRH_NEXT_HDR) 256 goto drop; 257 vtf = be32_to_cpu(grh->version_tclass_flow); 258 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 259 goto drop; 260 } 261 262 /* Get the destination QP number. */ 263 qp_num = ib_bth_get_qpn(packet->ohdr); 264 if (dlid < mlid_base) { 265 struct rvt_qp *qp; 266 unsigned long flags; 267 268 rcu_read_lock(); 269 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); 270 if (!qp) { 271 rcu_read_unlock(); 272 goto drop; 273 } 274 275 /* 276 * Handle only RC QPs - for other QP types drop error 277 * packet. 278 */ 279 spin_lock_irqsave(&qp->r_lock, flags); 280 281 /* Check for valid receive state. */ 282 if (!(ib_rvt_state_ops[qp->state] & 283 RVT_PROCESS_RECV_OK)) { 284 ibp->rvp.n_pkt_drops++; 285 } 286 287 switch (qp->ibqp.qp_type) { 288 case IB_QPT_RC: 289 hfi1_rc_hdrerr(rcd, packet, qp); 290 break; 291 default: 292 /* For now don't handle any other QP types */ 293 break; 294 } 295 296 spin_unlock_irqrestore(&qp->r_lock, flags); 297 rcu_read_unlock(); 298 } /* Unicast QP */ 299 } /* Valid packet with TIDErr */ 300 301 /* handle "RcvTypeErr" flags */ 302 switch (rte) { 303 case RHF_RTE_ERROR_OP_CODE_ERR: 304 { 305 void *ebuf = NULL; 306 u8 opcode; 307 308 if (rhf_use_egr_bfr(packet->rhf)) 309 ebuf = packet->ebuf; 310 311 if (!ebuf) 312 goto drop; /* this should never happen */ 313 314 opcode = ib_bth_get_opcode(packet->ohdr); 315 if (opcode == IB_OPCODE_CNP) { 316 /* 317 * Only in pre-B0 h/w is the CNP_OPCODE handled 318 * via this code path. 319 */ 320 struct rvt_qp *qp = NULL; 321 u32 lqpn, rqpn; 322 u16 rlid; 323 u8 svc_type, sl, sc5; 324 325 sc5 = hfi1_9B_get_sc5(rhdr, packet->rhf); 326 sl = ibp->sc_to_sl[sc5]; 327 328 lqpn = ib_bth_get_qpn(packet->ohdr); 329 rcu_read_lock(); 330 qp = rvt_lookup_qpn(rdi, &ibp->rvp, lqpn); 331 if (!qp) { 332 rcu_read_unlock(); 333 goto drop; 334 } 335 336 switch (qp->ibqp.qp_type) { 337 case IB_QPT_UD: 338 rlid = 0; 339 rqpn = 0; 340 svc_type = IB_CC_SVCTYPE_UD; 341 break; 342 case IB_QPT_UC: 343 rlid = ib_get_slid(rhdr); 344 rqpn = qp->remote_qpn; 345 svc_type = IB_CC_SVCTYPE_UC; 346 break; 347 default: 348 rcu_read_unlock(); 349 goto drop; 350 } 351 352 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); 353 rcu_read_unlock(); 354 } 355 356 packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK; 357 break; 358 } 359 default: 360 break; 361 } 362 363 drop: 364 return; 365 } 366 367 static inline void init_packet(struct hfi1_ctxtdata *rcd, 368 struct hfi1_packet *packet) 369 { 370 packet->rsize = get_hdrqentsize(rcd); /* words */ 371 packet->maxcnt = get_hdrq_cnt(rcd) * packet->rsize; /* words */ 372 packet->rcd = rcd; 373 packet->updegr = 0; 374 packet->etail = -1; 375 packet->rhf_addr = get_rhf_addr(rcd); 376 packet->rhf = rhf_to_cpu(packet->rhf_addr); 377 packet->rhqoff = hfi1_rcd_head(rcd); 378 packet->numpkt = 0; 379 } 380 381 /* We support only two types - 9B and 16B for now */ 382 static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = { 383 [HFI1_PKT_TYPE_9B] = &return_cnp, 384 [HFI1_PKT_TYPE_16B] = &return_cnp_16B 385 }; 386 387 /** 388 * hfi1_process_ecn_slowpath - Process FECN or BECN bits 389 * @qp: The packet's destination QP 390 * @pkt: The packet itself. 391 * @prescan: Is the caller the RXQ prescan 392 * 393 * Process the packet's FECN or BECN bits. By now, the packet 394 * has already been evaluated whether processing of those bit should 395 * be done. 396 * The significance of the @prescan argument is that if the caller 397 * is the RXQ prescan, a CNP will be send out instead of waiting for the 398 * normal packet processing to send an ACK with BECN set (or a CNP). 399 */ 400 bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, 401 bool prescan) 402 { 403 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 404 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 405 struct ib_other_headers *ohdr = pkt->ohdr; 406 struct ib_grh *grh = pkt->grh; 407 u32 rqpn = 0; 408 u16 pkey; 409 u32 rlid, slid, dlid = 0; 410 u8 hdr_type, sc, svc_type, opcode; 411 bool is_mcast = false, ignore_fecn = false, do_cnp = false, 412 fecn, becn; 413 414 /* can be called from prescan */ 415 if (pkt->etype == RHF_RCV_TYPE_BYPASS) { 416 pkey = hfi1_16B_get_pkey(pkt->hdr); 417 sc = hfi1_16B_get_sc(pkt->hdr); 418 dlid = hfi1_16B_get_dlid(pkt->hdr); 419 slid = hfi1_16B_get_slid(pkt->hdr); 420 is_mcast = hfi1_is_16B_mcast(dlid); 421 opcode = ib_bth_get_opcode(ohdr); 422 hdr_type = HFI1_PKT_TYPE_16B; 423 fecn = hfi1_16B_get_fecn(pkt->hdr); 424 becn = hfi1_16B_get_becn(pkt->hdr); 425 } else { 426 pkey = ib_bth_get_pkey(ohdr); 427 sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf); 428 dlid = qp->ibqp.qp_type != IB_QPT_UD ? ib_get_dlid(pkt->hdr) : 429 ppd->lid; 430 slid = ib_get_slid(pkt->hdr); 431 is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) && 432 (dlid != be16_to_cpu(IB_LID_PERMISSIVE)); 433 opcode = ib_bth_get_opcode(ohdr); 434 hdr_type = HFI1_PKT_TYPE_9B; 435 fecn = ib_bth_get_fecn(ohdr); 436 becn = ib_bth_get_becn(ohdr); 437 } 438 439 switch (qp->ibqp.qp_type) { 440 case IB_QPT_UD: 441 rlid = slid; 442 rqpn = ib_get_sqpn(pkt->ohdr); 443 svc_type = IB_CC_SVCTYPE_UD; 444 break; 445 case IB_QPT_SMI: 446 case IB_QPT_GSI: 447 rlid = slid; 448 rqpn = ib_get_sqpn(pkt->ohdr); 449 svc_type = IB_CC_SVCTYPE_UD; 450 break; 451 case IB_QPT_UC: 452 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); 453 rqpn = qp->remote_qpn; 454 svc_type = IB_CC_SVCTYPE_UC; 455 break; 456 case IB_QPT_RC: 457 rlid = rdma_ah_get_dlid(&qp->remote_ah_attr); 458 rqpn = qp->remote_qpn; 459 svc_type = IB_CC_SVCTYPE_RC; 460 break; 461 default: 462 return false; 463 } 464 465 ignore_fecn = is_mcast || (opcode == IB_OPCODE_CNP) || 466 (opcode == IB_OPCODE_RC_ACKNOWLEDGE); 467 /* 468 * ACKNOWLEDGE packets do not get a CNP but this will be 469 * guarded by ignore_fecn above. 470 */ 471 do_cnp = prescan || 472 (opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST && 473 opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE) || 474 opcode == TID_OP(READ_RESP) || 475 opcode == TID_OP(ACK); 476 477 /* Call appropriate CNP handler */ 478 if (!ignore_fecn && do_cnp && fecn) 479 hfi1_handle_cnp_tbl[hdr_type](ibp, qp, rqpn, pkey, 480 dlid, rlid, sc, grh); 481 482 if (becn) { 483 u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; 484 u8 sl = ibp->sc_to_sl[sc]; 485 486 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type); 487 } 488 return !ignore_fecn && fecn; 489 } 490 491 struct ps_mdata { 492 struct hfi1_ctxtdata *rcd; 493 u32 rsize; 494 u32 maxcnt; 495 u32 ps_head; 496 u32 ps_tail; 497 u32 ps_seq; 498 }; 499 500 static inline void init_ps_mdata(struct ps_mdata *mdata, 501 struct hfi1_packet *packet) 502 { 503 struct hfi1_ctxtdata *rcd = packet->rcd; 504 505 mdata->rcd = rcd; 506 mdata->rsize = packet->rsize; 507 mdata->maxcnt = packet->maxcnt; 508 mdata->ps_head = packet->rhqoff; 509 510 if (get_dma_rtail_setting(rcd)) { 511 mdata->ps_tail = get_rcvhdrtail(rcd); 512 if (rcd->ctxt == HFI1_CTRL_CTXT) 513 mdata->ps_seq = hfi1_seq_cnt(rcd); 514 else 515 mdata->ps_seq = 0; /* not used with DMA_RTAIL */ 516 } else { 517 mdata->ps_tail = 0; /* used only with DMA_RTAIL*/ 518 mdata->ps_seq = hfi1_seq_cnt(rcd); 519 } 520 } 521 522 static inline int ps_done(struct ps_mdata *mdata, u64 rhf, 523 struct hfi1_ctxtdata *rcd) 524 { 525 if (get_dma_rtail_setting(rcd)) 526 return mdata->ps_head == mdata->ps_tail; 527 return mdata->ps_seq != rhf_rcv_seq(rhf); 528 } 529 530 static inline int ps_skip(struct ps_mdata *mdata, u64 rhf, 531 struct hfi1_ctxtdata *rcd) 532 { 533 /* 534 * Control context can potentially receive an invalid rhf. 535 * Drop such packets. 536 */ 537 if ((rcd->ctxt == HFI1_CTRL_CTXT) && (mdata->ps_head != mdata->ps_tail)) 538 return mdata->ps_seq != rhf_rcv_seq(rhf); 539 540 return 0; 541 } 542 543 static inline void update_ps_mdata(struct ps_mdata *mdata, 544 struct hfi1_ctxtdata *rcd) 545 { 546 mdata->ps_head += mdata->rsize; 547 if (mdata->ps_head >= mdata->maxcnt) 548 mdata->ps_head = 0; 549 550 /* Control context must do seq counting */ 551 if (!get_dma_rtail_setting(rcd) || 552 rcd->ctxt == HFI1_CTRL_CTXT) 553 mdata->ps_seq = hfi1_seq_incr_wrap(mdata->ps_seq); 554 } 555 556 /* 557 * prescan_rxq - search through the receive queue looking for packets 558 * containing Excplicit Congestion Notifications (FECNs, or BECNs). 559 * When an ECN is found, process the Congestion Notification, and toggle 560 * it off. 561 * This is declared as a macro to allow quick checking of the port to avoid 562 * the overhead of a function call if not enabled. 563 */ 564 #define prescan_rxq(rcd, packet) \ 565 do { \ 566 if (rcd->ppd->cc_prescan) \ 567 __prescan_rxq(packet); \ 568 } while (0) 569 static void __prescan_rxq(struct hfi1_packet *packet) 570 { 571 struct hfi1_ctxtdata *rcd = packet->rcd; 572 struct ps_mdata mdata; 573 574 init_ps_mdata(&mdata, packet); 575 576 while (1) { 577 struct hfi1_ibport *ibp = rcd_to_iport(rcd); 578 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head + 579 packet->rcd->rhf_offset; 580 struct rvt_qp *qp; 581 struct ib_header *hdr; 582 struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi; 583 u64 rhf = rhf_to_cpu(rhf_addr); 584 u32 etype = rhf_rcv_type(rhf), qpn, bth1; 585 u8 lnh; 586 587 if (ps_done(&mdata, rhf, rcd)) 588 break; 589 590 if (ps_skip(&mdata, rhf, rcd)) 591 goto next; 592 593 if (etype != RHF_RCV_TYPE_IB) 594 goto next; 595 596 packet->hdr = hfi1_get_msgheader(packet->rcd, rhf_addr); 597 hdr = packet->hdr; 598 lnh = ib_get_lnh(hdr); 599 600 if (lnh == HFI1_LRH_BTH) { 601 packet->ohdr = &hdr->u.oth; 602 packet->grh = NULL; 603 } else if (lnh == HFI1_LRH_GRH) { 604 packet->ohdr = &hdr->u.l.oth; 605 packet->grh = &hdr->u.l.grh; 606 } else { 607 goto next; /* just in case */ 608 } 609 610 if (!hfi1_may_ecn(packet)) 611 goto next; 612 613 bth1 = be32_to_cpu(packet->ohdr->bth[1]); 614 qpn = bth1 & RVT_QPN_MASK; 615 rcu_read_lock(); 616 qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn); 617 618 if (!qp) { 619 rcu_read_unlock(); 620 goto next; 621 } 622 623 hfi1_process_ecn_slowpath(qp, packet, true); 624 rcu_read_unlock(); 625 626 /* turn off BECN, FECN */ 627 bth1 &= ~(IB_FECN_SMASK | IB_BECN_SMASK); 628 packet->ohdr->bth[1] = cpu_to_be32(bth1); 629 next: 630 update_ps_mdata(&mdata, rcd); 631 } 632 } 633 634 static void process_rcv_qp_work(struct hfi1_packet *packet) 635 { 636 struct rvt_qp *qp, *nqp; 637 struct hfi1_ctxtdata *rcd = packet->rcd; 638 639 /* 640 * Iterate over all QPs waiting to respond. 641 * The list won't change since the IRQ is only run on one CPU. 642 */ 643 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { 644 list_del_init(&qp->rspwait); 645 if (qp->r_flags & RVT_R_RSP_NAK) { 646 qp->r_flags &= ~RVT_R_RSP_NAK; 647 packet->qp = qp; 648 hfi1_send_rc_ack(packet, 0); 649 } 650 if (qp->r_flags & RVT_R_RSP_SEND) { 651 unsigned long flags; 652 653 qp->r_flags &= ~RVT_R_RSP_SEND; 654 spin_lock_irqsave(&qp->s_lock, flags); 655 if (ib_rvt_state_ops[qp->state] & 656 RVT_PROCESS_OR_FLUSH_SEND) 657 hfi1_schedule_send(qp); 658 spin_unlock_irqrestore(&qp->s_lock, flags); 659 } 660 rvt_put_qp(qp); 661 } 662 } 663 664 static noinline int max_packet_exceeded(struct hfi1_packet *packet, int thread) 665 { 666 if (thread) { 667 if ((packet->numpkt & (MAX_PKT_RECV_THREAD - 1)) == 0) 668 /* allow defered processing */ 669 process_rcv_qp_work(packet); 670 cond_resched(); 671 return RCV_PKT_OK; 672 } else { 673 this_cpu_inc(*packet->rcd->dd->rcv_limit); 674 return RCV_PKT_LIMIT; 675 } 676 } 677 678 static inline int check_max_packet(struct hfi1_packet *packet, int thread) 679 { 680 int ret = RCV_PKT_OK; 681 682 if (unlikely((packet->numpkt & (MAX_PKT_RECV - 1)) == 0)) 683 ret = max_packet_exceeded(packet, thread); 684 return ret; 685 } 686 687 static noinline int skip_rcv_packet(struct hfi1_packet *packet, int thread) 688 { 689 int ret; 690 691 packet->rcd->dd->ctx0_seq_drop++; 692 /* Set up for the next packet */ 693 packet->rhqoff += packet->rsize; 694 if (packet->rhqoff >= packet->maxcnt) 695 packet->rhqoff = 0; 696 697 packet->numpkt++; 698 ret = check_max_packet(packet, thread); 699 700 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + 701 packet->rcd->rhf_offset; 702 packet->rhf = rhf_to_cpu(packet->rhf_addr); 703 704 return ret; 705 } 706 707 static void process_rcv_packet_napi(struct hfi1_packet *packet) 708 { 709 packet->etype = rhf_rcv_type(packet->rhf); 710 711 /* total length */ 712 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */ 713 /* retrieve eager buffer details */ 714 packet->etail = rhf_egr_index(packet->rhf); 715 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf, 716 &packet->updegr); 717 /* 718 * Prefetch the contents of the eager buffer. It is 719 * OK to send a negative length to prefetch_range(). 720 * The +2 is the size of the RHF. 721 */ 722 prefetch_range(packet->ebuf, 723 packet->tlen - ((packet->rcd->rcvhdrqentsize - 724 (rhf_hdrq_offset(packet->rhf) 725 + 2)) * 4)); 726 727 packet->rcd->rhf_rcv_function_map[packet->etype](packet); 728 packet->numpkt++; 729 730 /* Set up for the next packet */ 731 packet->rhqoff += packet->rsize; 732 if (packet->rhqoff >= packet->maxcnt) 733 packet->rhqoff = 0; 734 735 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + 736 packet->rcd->rhf_offset; 737 packet->rhf = rhf_to_cpu(packet->rhf_addr); 738 } 739 740 static inline int process_rcv_packet(struct hfi1_packet *packet, int thread) 741 { 742 int ret; 743 744 packet->etype = rhf_rcv_type(packet->rhf); 745 746 /* total length */ 747 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */ 748 /* retrieve eager buffer details */ 749 packet->ebuf = NULL; 750 if (rhf_use_egr_bfr(packet->rhf)) { 751 packet->etail = rhf_egr_index(packet->rhf); 752 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf, 753 &packet->updegr); 754 /* 755 * Prefetch the contents of the eager buffer. It is 756 * OK to send a negative length to prefetch_range(). 757 * The +2 is the size of the RHF. 758 */ 759 prefetch_range(packet->ebuf, 760 packet->tlen - ((get_hdrqentsize(packet->rcd) - 761 (rhf_hdrq_offset(packet->rhf) 762 + 2)) * 4)); 763 } 764 765 /* 766 * Call a type specific handler for the packet. We 767 * should be able to trust that etype won't be beyond 768 * the range of valid indexes. If so something is really 769 * wrong and we can probably just let things come 770 * crashing down. There is no need to eat another 771 * comparison in this performance critical code. 772 */ 773 packet->rcd->rhf_rcv_function_map[packet->etype](packet); 774 packet->numpkt++; 775 776 /* Set up for the next packet */ 777 packet->rhqoff += packet->rsize; 778 if (packet->rhqoff >= packet->maxcnt) 779 packet->rhqoff = 0; 780 781 ret = check_max_packet(packet, thread); 782 783 packet->rhf_addr = (__le32 *)packet->rcd->rcvhdrq + packet->rhqoff + 784 packet->rcd->rhf_offset; 785 packet->rhf = rhf_to_cpu(packet->rhf_addr); 786 787 return ret; 788 } 789 790 static inline void process_rcv_update(int last, struct hfi1_packet *packet) 791 { 792 /* 793 * Update head regs etc., every 16 packets, if not last pkt, 794 * to help prevent rcvhdrq overflows, when many packets 795 * are processed and queue is nearly full. 796 * Don't request an interrupt for intermediate updates. 797 */ 798 if (!last && !(packet->numpkt & 0xf)) { 799 update_usrhead(packet->rcd, packet->rhqoff, packet->updegr, 800 packet->etail, 0, 0); 801 packet->updegr = 0; 802 } 803 packet->grh = NULL; 804 } 805 806 static inline void finish_packet(struct hfi1_packet *packet) 807 { 808 /* 809 * Nothing we need to free for the packet. 810 * 811 * The only thing we need to do is a final update and call for an 812 * interrupt 813 */ 814 update_usrhead(packet->rcd, hfi1_rcd_head(packet->rcd), packet->updegr, 815 packet->etail, rcv_intr_dynamic, packet->numpkt); 816 } 817 818 /* 819 * handle_receive_interrupt_napi_fp - receive a packet 820 * @rcd: the context 821 * @budget: polling budget 822 * 823 * Called from interrupt handler for receive interrupt. 824 * This is the fast path interrupt handler 825 * when executing napi soft irq environment. 826 */ 827 int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget) 828 { 829 struct hfi1_packet packet; 830 831 init_packet(rcd, &packet); 832 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) 833 goto bail; 834 835 while (packet.numpkt < budget) { 836 process_rcv_packet_napi(&packet); 837 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 838 break; 839 840 process_rcv_update(0, &packet); 841 } 842 hfi1_set_rcd_head(rcd, packet.rhqoff); 843 bail: 844 finish_packet(&packet); 845 return packet.numpkt; 846 } 847 848 /* 849 * Handle receive interrupts when using the no dma rtail option. 850 */ 851 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread) 852 { 853 int last = RCV_PKT_OK; 854 struct hfi1_packet packet; 855 856 init_packet(rcd, &packet); 857 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) { 858 last = RCV_PKT_DONE; 859 goto bail; 860 } 861 862 prescan_rxq(rcd, &packet); 863 864 while (last == RCV_PKT_OK) { 865 last = process_rcv_packet(&packet, thread); 866 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 867 last = RCV_PKT_DONE; 868 process_rcv_update(last, &packet); 869 } 870 process_rcv_qp_work(&packet); 871 hfi1_set_rcd_head(rcd, packet.rhqoff); 872 bail: 873 finish_packet(&packet); 874 return last; 875 } 876 877 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread) 878 { 879 u32 hdrqtail; 880 int last = RCV_PKT_OK; 881 struct hfi1_packet packet; 882 883 init_packet(rcd, &packet); 884 hdrqtail = get_rcvhdrtail(rcd); 885 if (packet.rhqoff == hdrqtail) { 886 last = RCV_PKT_DONE; 887 goto bail; 888 } 889 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ 890 891 prescan_rxq(rcd, &packet); 892 893 while (last == RCV_PKT_OK) { 894 last = process_rcv_packet(&packet, thread); 895 if (packet.rhqoff == hdrqtail) 896 last = RCV_PKT_DONE; 897 process_rcv_update(last, &packet); 898 } 899 process_rcv_qp_work(&packet); 900 hfi1_set_rcd_head(rcd, packet.rhqoff); 901 bail: 902 finish_packet(&packet); 903 return last; 904 } 905 906 static void set_all_fastpath(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) 907 { 908 u16 i; 909 910 /* 911 * For dynamically allocated kernel contexts switch 912 * interrupt handler only for that context. Otherwise, switch 913 * interrupt handler for all statically allocated kernel contexts. 914 */ 915 if (rcd->ctxt >= dd->first_dyn_alloc_ctxt) { 916 hfi1_rcd_get(rcd); 917 hfi1_set_fast(rcd); 918 hfi1_rcd_put(rcd); 919 return; 920 } 921 922 for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) { 923 rcd = hfi1_rcd_get_by_index(dd, i); 924 if (rcd && (i < dd->first_dyn_alloc_ctxt)) 925 hfi1_set_fast(rcd); 926 hfi1_rcd_put(rcd); 927 } 928 } 929 930 void set_all_slowpath(struct hfi1_devdata *dd) 931 { 932 struct hfi1_ctxtdata *rcd; 933 u16 i; 934 935 /* HFI1_CTRL_CTXT must always use the slow path interrupt handler */ 936 for (i = HFI1_CTRL_CTXT + 1; i < dd->num_rcv_contexts; i++) { 937 rcd = hfi1_rcd_get_by_index(dd, i); 938 if (!rcd) 939 continue; 940 if (i < dd->first_dyn_alloc_ctxt) 941 rcd->do_interrupt = rcd->slow_handler; 942 943 hfi1_rcd_put(rcd); 944 } 945 } 946 947 static bool __set_armed_to_active(struct hfi1_packet *packet) 948 { 949 u8 etype = rhf_rcv_type(packet->rhf); 950 u8 sc = SC15_PACKET; 951 952 if (etype == RHF_RCV_TYPE_IB) { 953 struct ib_header *hdr = hfi1_get_msgheader(packet->rcd, 954 packet->rhf_addr); 955 sc = hfi1_9B_get_sc5(hdr, packet->rhf); 956 } else if (etype == RHF_RCV_TYPE_BYPASS) { 957 struct hfi1_16b_header *hdr = hfi1_get_16B_header( 958 packet->rcd, 959 packet->rhf_addr); 960 sc = hfi1_16B_get_sc(hdr); 961 } 962 if (sc != SC15_PACKET) { 963 int hwstate = driver_lstate(packet->rcd->ppd); 964 struct work_struct *lsaw = 965 &packet->rcd->ppd->linkstate_active_work; 966 967 if (hwstate != IB_PORT_ACTIVE) { 968 dd_dev_info(packet->rcd->dd, 969 "Unexpected link state %s\n", 970 ib_port_state_to_str(hwstate)); 971 return false; 972 } 973 974 queue_work(packet->rcd->ppd->link_wq, lsaw); 975 return true; 976 } 977 return false; 978 } 979 980 /** 981 * set_armed_to_active - the fast path for armed to active 982 * @packet: the packet structure 983 * 984 * Return true if packet processing needs to bail. 985 */ 986 static bool set_armed_to_active(struct hfi1_packet *packet) 987 { 988 if (likely(packet->rcd->ppd->host_link_state != HLS_UP_ARMED)) 989 return false; 990 return __set_armed_to_active(packet); 991 } 992 993 /* 994 * handle_receive_interrupt - receive a packet 995 * @rcd: the context 996 * 997 * Called from interrupt handler for errors or receive interrupt. 998 * This is the slow path interrupt handler. 999 */ 1000 int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) 1001 { 1002 struct hfi1_devdata *dd = rcd->dd; 1003 u32 hdrqtail; 1004 int needset, last = RCV_PKT_OK; 1005 struct hfi1_packet packet; 1006 int skip_pkt = 0; 1007 1008 if (!rcd->rcvhdrq) 1009 return RCV_PKT_OK; 1010 /* Control context will always use the slow path interrupt handler */ 1011 needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1; 1012 1013 init_packet(rcd, &packet); 1014 1015 if (!get_dma_rtail_setting(rcd)) { 1016 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) { 1017 last = RCV_PKT_DONE; 1018 goto bail; 1019 } 1020 hdrqtail = 0; 1021 } else { 1022 hdrqtail = get_rcvhdrtail(rcd); 1023 if (packet.rhqoff == hdrqtail) { 1024 last = RCV_PKT_DONE; 1025 goto bail; 1026 } 1027 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ 1028 1029 /* 1030 * Control context can potentially receive an invalid 1031 * rhf. Drop such packets. 1032 */ 1033 if (rcd->ctxt == HFI1_CTRL_CTXT) 1034 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) 1035 skip_pkt = 1; 1036 } 1037 1038 prescan_rxq(rcd, &packet); 1039 1040 while (last == RCV_PKT_OK) { 1041 if (hfi1_need_drop(dd)) { 1042 /* On to the next packet */ 1043 packet.rhqoff += packet.rsize; 1044 packet.rhf_addr = (__le32 *)rcd->rcvhdrq + 1045 packet.rhqoff + 1046 rcd->rhf_offset; 1047 packet.rhf = rhf_to_cpu(packet.rhf_addr); 1048 1049 } else if (skip_pkt) { 1050 last = skip_rcv_packet(&packet, thread); 1051 skip_pkt = 0; 1052 } else { 1053 if (set_armed_to_active(&packet)) 1054 goto bail; 1055 last = process_rcv_packet(&packet, thread); 1056 } 1057 1058 if (!get_dma_rtail_setting(rcd)) { 1059 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 1060 last = RCV_PKT_DONE; 1061 } else { 1062 if (packet.rhqoff == hdrqtail) 1063 last = RCV_PKT_DONE; 1064 /* 1065 * Control context can potentially receive an invalid 1066 * rhf. Drop such packets. 1067 */ 1068 if (rcd->ctxt == HFI1_CTRL_CTXT) { 1069 bool lseq; 1070 1071 lseq = hfi1_seq_incr(rcd, 1072 rhf_rcv_seq(packet.rhf)); 1073 if (!last && lseq) 1074 skip_pkt = 1; 1075 } 1076 } 1077 1078 if (needset) { 1079 needset = false; 1080 set_all_fastpath(dd, rcd); 1081 } 1082 process_rcv_update(last, &packet); 1083 } 1084 1085 process_rcv_qp_work(&packet); 1086 hfi1_set_rcd_head(rcd, packet.rhqoff); 1087 1088 bail: 1089 /* 1090 * Always write head at end, and setup rcv interrupt, even 1091 * if no packets were processed. 1092 */ 1093 finish_packet(&packet); 1094 return last; 1095 } 1096 1097 /* 1098 * handle_receive_interrupt_napi_sp - receive a packet 1099 * @rcd: the context 1100 * @budget: polling budget 1101 * 1102 * Called from interrupt handler for errors or receive interrupt. 1103 * This is the slow path interrupt handler 1104 * when executing napi soft irq environment. 1105 */ 1106 int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget) 1107 { 1108 struct hfi1_devdata *dd = rcd->dd; 1109 int last = RCV_PKT_OK; 1110 bool needset = true; 1111 struct hfi1_packet packet; 1112 1113 init_packet(rcd, &packet); 1114 if (last_rcv_seq(rcd, rhf_rcv_seq(packet.rhf))) 1115 goto bail; 1116 1117 while (last != RCV_PKT_DONE && packet.numpkt < budget) { 1118 if (hfi1_need_drop(dd)) { 1119 /* On to the next packet */ 1120 packet.rhqoff += packet.rsize; 1121 packet.rhf_addr = (__le32 *)rcd->rcvhdrq + 1122 packet.rhqoff + 1123 rcd->rhf_offset; 1124 packet.rhf = rhf_to_cpu(packet.rhf_addr); 1125 1126 } else { 1127 if (set_armed_to_active(&packet)) 1128 goto bail; 1129 process_rcv_packet_napi(&packet); 1130 } 1131 1132 if (hfi1_seq_incr(rcd, rhf_rcv_seq(packet.rhf))) 1133 last = RCV_PKT_DONE; 1134 1135 if (needset) { 1136 needset = false; 1137 set_all_fastpath(dd, rcd); 1138 } 1139 1140 process_rcv_update(last, &packet); 1141 } 1142 1143 hfi1_set_rcd_head(rcd, packet.rhqoff); 1144 1145 bail: 1146 /* 1147 * Always write head at end, and setup rcv interrupt, even 1148 * if no packets were processed. 1149 */ 1150 finish_packet(&packet); 1151 return packet.numpkt; 1152 } 1153 1154 /* 1155 * We may discover in the interrupt that the hardware link state has 1156 * changed from ARMED to ACTIVE (due to the arrival of a non-SC15 packet), 1157 * and we need to update the driver's notion of the link state. We cannot 1158 * run set_link_state from interrupt context, so we queue this function on 1159 * a workqueue. 1160 * 1161 * We delay the regular interrupt processing until after the state changes 1162 * so that the link will be in the correct state by the time any application 1163 * we wake up attempts to send a reply to any message it received. 1164 * (Subsequent receive interrupts may possibly force the wakeup before we 1165 * update the link state.) 1166 * 1167 * The rcd is freed in hfi1_free_ctxtdata after hfi1_postinit_cleanup invokes 1168 * dd->f_cleanup(dd) to disable the interrupt handler and flush workqueues, 1169 * so we're safe from use-after-free of the rcd. 1170 */ 1171 void receive_interrupt_work(struct work_struct *work) 1172 { 1173 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 1174 linkstate_active_work); 1175 struct hfi1_devdata *dd = ppd->dd; 1176 struct hfi1_ctxtdata *rcd; 1177 u16 i; 1178 1179 /* Received non-SC15 packet implies neighbor_normal */ 1180 ppd->neighbor_normal = 1; 1181 set_link_state(ppd, HLS_UP_ACTIVE); 1182 1183 /* 1184 * Interrupt all statically allocated kernel contexts that could 1185 * have had an interrupt during auto activation. 1186 */ 1187 for (i = HFI1_CTRL_CTXT; i < dd->first_dyn_alloc_ctxt; i++) { 1188 rcd = hfi1_rcd_get_by_index(dd, i); 1189 if (rcd) 1190 force_recv_intr(rcd); 1191 hfi1_rcd_put(rcd); 1192 } 1193 } 1194 1195 /* 1196 * Convert a given MTU size to the on-wire MAD packet enumeration. 1197 * Return -1 if the size is invalid. 1198 */ 1199 int mtu_to_enum(u32 mtu, int default_if_bad) 1200 { 1201 switch (mtu) { 1202 case 0: return OPA_MTU_0; 1203 case 256: return OPA_MTU_256; 1204 case 512: return OPA_MTU_512; 1205 case 1024: return OPA_MTU_1024; 1206 case 2048: return OPA_MTU_2048; 1207 case 4096: return OPA_MTU_4096; 1208 case 8192: return OPA_MTU_8192; 1209 case 10240: return OPA_MTU_10240; 1210 } 1211 return default_if_bad; 1212 } 1213 1214 u16 enum_to_mtu(int mtu) 1215 { 1216 switch (mtu) { 1217 case OPA_MTU_0: return 0; 1218 case OPA_MTU_256: return 256; 1219 case OPA_MTU_512: return 512; 1220 case OPA_MTU_1024: return 1024; 1221 case OPA_MTU_2048: return 2048; 1222 case OPA_MTU_4096: return 4096; 1223 case OPA_MTU_8192: return 8192; 1224 case OPA_MTU_10240: return 10240; 1225 default: return 0xffff; 1226 } 1227 } 1228 1229 /* 1230 * set_mtu - set the MTU 1231 * @ppd: the per port data 1232 * 1233 * We can handle "any" incoming size, the issue here is whether we 1234 * need to restrict our outgoing size. We do not deal with what happens 1235 * to programs that are already running when the size changes. 1236 */ 1237 int set_mtu(struct hfi1_pportdata *ppd) 1238 { 1239 struct hfi1_devdata *dd = ppd->dd; 1240 int i, drain, ret = 0, is_up = 0; 1241 1242 ppd->ibmtu = 0; 1243 for (i = 0; i < ppd->vls_supported; i++) 1244 if (ppd->ibmtu < dd->vld[i].mtu) 1245 ppd->ibmtu = dd->vld[i].mtu; 1246 ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd); 1247 1248 mutex_lock(&ppd->hls_lock); 1249 if (ppd->host_link_state == HLS_UP_INIT || 1250 ppd->host_link_state == HLS_UP_ARMED || 1251 ppd->host_link_state == HLS_UP_ACTIVE) 1252 is_up = 1; 1253 1254 drain = !is_ax(dd) && is_up; 1255 1256 if (drain) 1257 /* 1258 * MTU is specified per-VL. To ensure that no packet gets 1259 * stuck (due, e.g., to the MTU for the packet's VL being 1260 * reduced), empty the per-VL FIFOs before adjusting MTU. 1261 */ 1262 ret = stop_drain_data_vls(dd); 1263 1264 if (ret) { 1265 dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n", 1266 __func__); 1267 goto err; 1268 } 1269 1270 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0); 1271 1272 if (drain) 1273 open_fill_data_vls(dd); /* reopen all VLs */ 1274 1275 err: 1276 mutex_unlock(&ppd->hls_lock); 1277 1278 return ret; 1279 } 1280 1281 int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc) 1282 { 1283 struct hfi1_devdata *dd = ppd->dd; 1284 1285 ppd->lid = lid; 1286 ppd->lmc = lmc; 1287 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0); 1288 1289 dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid); 1290 1291 return 0; 1292 } 1293 1294 void shutdown_led_override(struct hfi1_pportdata *ppd) 1295 { 1296 struct hfi1_devdata *dd = ppd->dd; 1297 1298 /* 1299 * This pairs with the memory barrier in hfi1_start_led_override to 1300 * ensure that we read the correct state of LED beaconing represented 1301 * by led_override_timer_active 1302 */ 1303 smp_rmb(); 1304 if (atomic_read(&ppd->led_override_timer_active)) { 1305 timer_delete_sync(&ppd->led_override_timer); 1306 atomic_set(&ppd->led_override_timer_active, 0); 1307 /* Ensure the atomic_set is visible to all CPUs */ 1308 smp_wmb(); 1309 } 1310 1311 /* Hand control of the LED to the DC for normal operation */ 1312 write_csr(dd, DCC_CFG_LED_CNTRL, 0); 1313 } 1314 1315 static void run_led_override(struct timer_list *t) 1316 { 1317 struct hfi1_pportdata *ppd = timer_container_of(ppd, t, 1318 led_override_timer); 1319 struct hfi1_devdata *dd = ppd->dd; 1320 unsigned long timeout; 1321 int phase_idx; 1322 1323 if (!(dd->flags & HFI1_INITTED)) 1324 return; 1325 1326 phase_idx = ppd->led_override_phase & 1; 1327 1328 setextled(dd, phase_idx); 1329 1330 timeout = ppd->led_override_vals[phase_idx]; 1331 1332 /* Set up for next phase */ 1333 ppd->led_override_phase = !ppd->led_override_phase; 1334 1335 mod_timer(&ppd->led_override_timer, jiffies + timeout); 1336 } 1337 1338 /* 1339 * To have the LED blink in a particular pattern, provide timeon and timeoff 1340 * in milliseconds. 1341 * To turn off custom blinking and return to normal operation, use 1342 * shutdown_led_override() 1343 */ 1344 void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, 1345 unsigned int timeoff) 1346 { 1347 if (!(ppd->dd->flags & HFI1_INITTED)) 1348 return; 1349 1350 /* Convert to jiffies for direct use in timer */ 1351 ppd->led_override_vals[0] = msecs_to_jiffies(timeoff); 1352 ppd->led_override_vals[1] = msecs_to_jiffies(timeon); 1353 1354 /* Arbitrarily start from LED on phase */ 1355 ppd->led_override_phase = 1; 1356 1357 /* 1358 * If the timer has not already been started, do so. Use a "quick" 1359 * timeout so the handler will be called soon to look at our request. 1360 */ 1361 if (!timer_pending(&ppd->led_override_timer)) { 1362 timer_setup(&ppd->led_override_timer, run_led_override, 0); 1363 ppd->led_override_timer.expires = jiffies + 1; 1364 add_timer(&ppd->led_override_timer); 1365 atomic_set(&ppd->led_override_timer_active, 1); 1366 /* Ensure the atomic_set is visible to all CPUs */ 1367 smp_wmb(); 1368 } 1369 } 1370 1371 /** 1372 * hfi1_reset_device - reset the chip if possible 1373 * @unit: the device to reset 1374 * 1375 * Whether or not reset is successful, we attempt to re-initialize the chip 1376 * (that is, much like a driver unload/reload). We clear the INITTED flag 1377 * so that the various entry points will fail until we reinitialize. For 1378 * now, we only allow this if no user contexts are open that use chip resources 1379 */ 1380 int hfi1_reset_device(int unit) 1381 { 1382 int ret; 1383 struct hfi1_devdata *dd = hfi1_lookup(unit); 1384 struct hfi1_pportdata *ppd; 1385 int pidx; 1386 1387 if (!dd) { 1388 ret = -ENODEV; 1389 goto bail; 1390 } 1391 1392 dd_dev_info(dd, "Reset on unit %u requested\n", unit); 1393 1394 if (!dd->kregbase1 || !(dd->flags & HFI1_PRESENT)) { 1395 dd_dev_info(dd, 1396 "Invalid unit number %u or not initialized or not present\n", 1397 unit); 1398 ret = -ENXIO; 1399 goto bail; 1400 } 1401 1402 /* If there are any user contexts, we cannot reset */ 1403 mutex_lock(&hfi1_mutex); 1404 if (dd->rcd) 1405 if (hfi1_stats.sps_ctxts) { 1406 mutex_unlock(&hfi1_mutex); 1407 ret = -EBUSY; 1408 goto bail; 1409 } 1410 mutex_unlock(&hfi1_mutex); 1411 1412 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 1413 ppd = dd->pport + pidx; 1414 1415 shutdown_led_override(ppd); 1416 } 1417 if (dd->flags & HFI1_HAS_SEND_DMA) 1418 sdma_exit(dd); 1419 1420 hfi1_reset_cpu_counters(dd); 1421 1422 ret = hfi1_init(dd, 1); 1423 1424 if (ret) 1425 dd_dev_err(dd, 1426 "Reinitialize unit %u after reset failed with %d\n", 1427 unit, ret); 1428 else 1429 dd_dev_info(dd, "Reinitialized unit %u after resetting\n", 1430 unit); 1431 1432 bail: 1433 return ret; 1434 } 1435 1436 static inline void hfi1_setup_ib_header(struct hfi1_packet *packet) 1437 { 1438 packet->hdr = (struct hfi1_ib_message_header *) 1439 hfi1_get_msgheader(packet->rcd, 1440 packet->rhf_addr); 1441 packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr; 1442 } 1443 1444 static int hfi1_bypass_ingress_pkt_check(struct hfi1_packet *packet) 1445 { 1446 struct hfi1_pportdata *ppd = packet->rcd->ppd; 1447 1448 /* slid and dlid cannot be 0 */ 1449 if ((!packet->slid) || (!packet->dlid)) 1450 return -EINVAL; 1451 1452 /* Compare port lid with incoming packet dlid */ 1453 if ((!(hfi1_is_16B_mcast(packet->dlid))) && 1454 (packet->dlid != 1455 opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))) { 1456 if ((packet->dlid & ~((1 << ppd->lmc) - 1)) != ppd->lid) 1457 return -EINVAL; 1458 } 1459 1460 /* No multicast packets with SC15 */ 1461 if ((hfi1_is_16B_mcast(packet->dlid)) && (packet->sc == 0xF)) 1462 return -EINVAL; 1463 1464 /* Packets with permissive DLID always on SC15 */ 1465 if ((packet->dlid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 1466 16B)) && 1467 (packet->sc != 0xF)) 1468 return -EINVAL; 1469 1470 return 0; 1471 } 1472 1473 static int hfi1_setup_9B_packet(struct hfi1_packet *packet) 1474 { 1475 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd); 1476 struct ib_header *hdr; 1477 u8 lnh; 1478 1479 hfi1_setup_ib_header(packet); 1480 hdr = packet->hdr; 1481 1482 lnh = ib_get_lnh(hdr); 1483 if (lnh == HFI1_LRH_BTH) { 1484 packet->ohdr = &hdr->u.oth; 1485 packet->grh = NULL; 1486 } else if (lnh == HFI1_LRH_GRH) { 1487 u32 vtf; 1488 1489 packet->ohdr = &hdr->u.l.oth; 1490 packet->grh = &hdr->u.l.grh; 1491 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR) 1492 goto drop; 1493 vtf = be32_to_cpu(packet->grh->version_tclass_flow); 1494 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 1495 goto drop; 1496 } else { 1497 goto drop; 1498 } 1499 1500 /* Query commonly used fields from packet header */ 1501 packet->payload = packet->ebuf; 1502 packet->opcode = ib_bth_get_opcode(packet->ohdr); 1503 packet->slid = ib_get_slid(hdr); 1504 packet->dlid = ib_get_dlid(hdr); 1505 if (unlikely((packet->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) && 1506 (packet->dlid != be16_to_cpu(IB_LID_PERMISSIVE)))) 1507 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) - 1508 be16_to_cpu(IB_MULTICAST_LID_BASE); 1509 packet->sl = ib_get_sl(hdr); 1510 packet->sc = hfi1_9B_get_sc5(hdr, packet->rhf); 1511 packet->pad = ib_bth_get_pad(packet->ohdr); 1512 packet->extra_byte = 0; 1513 packet->pkey = ib_bth_get_pkey(packet->ohdr); 1514 packet->migrated = ib_bth_is_migration(packet->ohdr); 1515 1516 return 0; 1517 drop: 1518 ibp->rvp.n_pkt_drops++; 1519 return -EINVAL; 1520 } 1521 1522 static int hfi1_setup_bypass_packet(struct hfi1_packet *packet) 1523 { 1524 /* 1525 * Bypass packets have a different header/payload split 1526 * compared to an IB packet. 1527 * Current split is set such that 16 bytes of the actual 1528 * header is in the header buffer and the remining is in 1529 * the eager buffer. We chose 16 since hfi1 driver only 1530 * supports 16B bypass packets and we will be able to 1531 * receive the entire LRH with such a split. 1532 */ 1533 1534 struct hfi1_ctxtdata *rcd = packet->rcd; 1535 struct hfi1_pportdata *ppd = rcd->ppd; 1536 struct hfi1_ibport *ibp = &ppd->ibport_data; 1537 u8 l4; 1538 1539 packet->hdr = (struct hfi1_16b_header *) 1540 hfi1_get_16B_header(packet->rcd, 1541 packet->rhf_addr); 1542 l4 = hfi1_16B_get_l4(packet->hdr); 1543 if (l4 == OPA_16B_L4_IB_LOCAL) { 1544 packet->ohdr = packet->ebuf; 1545 packet->grh = NULL; 1546 packet->opcode = ib_bth_get_opcode(packet->ohdr); 1547 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr); 1548 /* hdr_len_by_opcode already has an IB LRH factored in */ 1549 packet->hlen = hdr_len_by_opcode[packet->opcode] + 1550 (LRH_16B_BYTES - LRH_9B_BYTES); 1551 packet->migrated = opa_bth_is_migration(packet->ohdr); 1552 } else if (l4 == OPA_16B_L4_IB_GLOBAL) { 1553 u32 vtf; 1554 u8 grh_len = sizeof(struct ib_grh); 1555 1556 packet->ohdr = packet->ebuf + grh_len; 1557 packet->grh = packet->ebuf; 1558 packet->opcode = ib_bth_get_opcode(packet->ohdr); 1559 packet->pad = hfi1_16B_bth_get_pad(packet->ohdr); 1560 /* hdr_len_by_opcode already has an IB LRH factored in */ 1561 packet->hlen = hdr_len_by_opcode[packet->opcode] + 1562 (LRH_16B_BYTES - LRH_9B_BYTES) + grh_len; 1563 packet->migrated = opa_bth_is_migration(packet->ohdr); 1564 1565 if (packet->grh->next_hdr != IB_GRH_NEXT_HDR) 1566 goto drop; 1567 vtf = be32_to_cpu(packet->grh->version_tclass_flow); 1568 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) 1569 goto drop; 1570 } else if (l4 == OPA_16B_L4_FM) { 1571 packet->mgmt = packet->ebuf; 1572 packet->ohdr = NULL; 1573 packet->grh = NULL; 1574 packet->opcode = IB_OPCODE_UD_SEND_ONLY; 1575 packet->pad = OPA_16B_L4_FM_PAD; 1576 packet->hlen = OPA_16B_L4_FM_HLEN; 1577 packet->migrated = false; 1578 } else { 1579 goto drop; 1580 } 1581 1582 /* Query commonly used fields from packet header */ 1583 packet->payload = packet->ebuf + packet->hlen - LRH_16B_BYTES; 1584 packet->slid = hfi1_16B_get_slid(packet->hdr); 1585 packet->dlid = hfi1_16B_get_dlid(packet->hdr); 1586 if (unlikely(hfi1_is_16B_mcast(packet->dlid))) 1587 packet->dlid += opa_get_mcast_base(OPA_MCAST_NR) - 1588 opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 1589 16B); 1590 packet->sc = hfi1_16B_get_sc(packet->hdr); 1591 packet->sl = ibp->sc_to_sl[packet->sc]; 1592 packet->extra_byte = SIZE_OF_LT; 1593 packet->pkey = hfi1_16B_get_pkey(packet->hdr); 1594 1595 if (hfi1_bypass_ingress_pkt_check(packet)) 1596 goto drop; 1597 1598 return 0; 1599 drop: 1600 hfi1_cdbg(PKT, "%s: packet dropped", __func__); 1601 ibp->rvp.n_pkt_drops++; 1602 return -EINVAL; 1603 } 1604 1605 static void show_eflags_errs(struct hfi1_packet *packet) 1606 { 1607 struct hfi1_ctxtdata *rcd = packet->rcd; 1608 u32 rte = rhf_rcv_type_err(packet->rhf); 1609 1610 dd_dev_err(rcd->dd, 1611 "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s] rte 0x%x\n", 1612 rcd->ctxt, packet->rhf, 1613 packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "", 1614 packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "", 1615 packet->rhf & RHF_DC_ERR ? "dc " : "", 1616 packet->rhf & RHF_TID_ERR ? "tid " : "", 1617 packet->rhf & RHF_LEN_ERR ? "len " : "", 1618 packet->rhf & RHF_ECC_ERR ? "ecc " : "", 1619 packet->rhf & RHF_ICRC_ERR ? "icrc " : "", 1620 rte); 1621 } 1622 1623 void handle_eflags(struct hfi1_packet *packet) 1624 { 1625 struct hfi1_ctxtdata *rcd = packet->rcd; 1626 1627 rcv_hdrerr(rcd, rcd->ppd, packet); 1628 if (rhf_err_flags(packet->rhf)) 1629 show_eflags_errs(packet); 1630 } 1631 1632 static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet) 1633 { 1634 struct hfi1_ibport *ibp; 1635 struct net_device *netdev; 1636 struct hfi1_ctxtdata *rcd = packet->rcd; 1637 struct napi_struct *napi = rcd->napi; 1638 struct sk_buff *skb; 1639 struct hfi1_netdev_rxq *rxq = container_of(napi, 1640 struct hfi1_netdev_rxq, napi); 1641 u32 extra_bytes; 1642 u32 tlen, qpnum; 1643 bool do_work, do_cnp; 1644 1645 trace_hfi1_rcvhdr(packet); 1646 1647 hfi1_setup_ib_header(packet); 1648 1649 packet->ohdr = &((struct ib_header *)packet->hdr)->u.oth; 1650 packet->grh = NULL; 1651 1652 if (unlikely(rhf_err_flags(packet->rhf))) { 1653 handle_eflags(packet); 1654 return; 1655 } 1656 1657 qpnum = ib_bth_get_qpn(packet->ohdr); 1658 netdev = hfi1_netdev_get_data(rcd->dd, qpnum); 1659 if (!netdev) 1660 goto drop_no_nd; 1661 1662 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf))); 1663 trace_ctxt_rsm_hist(rcd->ctxt); 1664 1665 /* handle congestion notifications */ 1666 do_work = hfi1_may_ecn(packet); 1667 if (unlikely(do_work)) { 1668 do_cnp = (packet->opcode != IB_OPCODE_CNP); 1669 (void)hfi1_process_ecn_slowpath(hfi1_ipoib_priv(netdev)->qp, 1670 packet, do_cnp); 1671 } 1672 1673 /* 1674 * We have split point after last byte of DETH 1675 * lets strip padding and CRC and ICRC. 1676 * tlen is whole packet len so we need to 1677 * subtract header size as well. 1678 */ 1679 tlen = packet->tlen; 1680 extra_bytes = ib_bth_get_pad(packet->ohdr) + (SIZE_OF_CRC << 2) + 1681 packet->hlen; 1682 if (unlikely(tlen < extra_bytes)) 1683 goto drop; 1684 1685 tlen -= extra_bytes; 1686 1687 skb = hfi1_ipoib_prepare_skb(rxq, tlen, packet->ebuf); 1688 if (unlikely(!skb)) 1689 goto drop; 1690 1691 dev_sw_netstats_rx_add(netdev, skb->len); 1692 1693 skb->dev = netdev; 1694 skb->pkt_type = PACKET_HOST; 1695 netif_receive_skb(skb); 1696 1697 return; 1698 1699 drop: 1700 ++netdev->stats.rx_dropped; 1701 drop_no_nd: 1702 ibp = rcd_to_iport(packet->rcd); 1703 ++ibp->rvp.n_pkt_drops; 1704 } 1705 1706 /* 1707 * The following functions are called by the interrupt handler. They are type 1708 * specific handlers for each packet type. 1709 */ 1710 static void process_receive_ib(struct hfi1_packet *packet) 1711 { 1712 if (hfi1_setup_9B_packet(packet)) 1713 return; 1714 1715 if (unlikely(hfi1_dbg_should_fault_rx(packet))) 1716 return; 1717 1718 trace_hfi1_rcvhdr(packet); 1719 1720 if (unlikely(rhf_err_flags(packet->rhf))) { 1721 handle_eflags(packet); 1722 return; 1723 } 1724 1725 hfi1_ib_rcv(packet); 1726 } 1727 1728 static void process_receive_bypass(struct hfi1_packet *packet) 1729 { 1730 struct hfi1_devdata *dd = packet->rcd->dd; 1731 1732 if (hfi1_setup_bypass_packet(packet)) 1733 return; 1734 1735 trace_hfi1_rcvhdr(packet); 1736 1737 if (unlikely(rhf_err_flags(packet->rhf))) { 1738 handle_eflags(packet); 1739 return; 1740 } 1741 1742 if (hfi1_16B_get_l2(packet->hdr) == 0x2) { 1743 hfi1_16B_rcv(packet); 1744 } else { 1745 dd_dev_err(dd, 1746 "Bypass packets other than 16B are not supported in normal operation. Dropping\n"); 1747 incr_cntr64(&dd->sw_rcv_bypass_packet_errors); 1748 if (!(dd->err_info_rcvport.status_and_code & 1749 OPA_EI_STATUS_SMASK)) { 1750 u64 *flits = packet->ebuf; 1751 1752 if (flits && !(packet->rhf & RHF_LEN_ERR)) { 1753 dd->err_info_rcvport.packet_flit1 = flits[0]; 1754 dd->err_info_rcvport.packet_flit2 = 1755 packet->tlen > sizeof(flits[0]) ? 1756 flits[1] : 0; 1757 } 1758 dd->err_info_rcvport.status_and_code |= 1759 (OPA_EI_STATUS_SMASK | BAD_L2_ERR); 1760 } 1761 } 1762 } 1763 1764 static void process_receive_error(struct hfi1_packet *packet) 1765 { 1766 /* KHdrHCRCErr -- KDETH packet with a bad HCRC */ 1767 if (unlikely( 1768 hfi1_dbg_fault_suppress_err(&packet->rcd->dd->verbs_dev) && 1769 (rhf_rcv_type_err(packet->rhf) == RHF_RCV_TYPE_ERROR || 1770 packet->rhf & RHF_DC_ERR))) 1771 return; 1772 1773 hfi1_setup_ib_header(packet); 1774 handle_eflags(packet); 1775 1776 if (unlikely(rhf_err_flags(packet->rhf))) 1777 dd_dev_err(packet->rcd->dd, 1778 "Unhandled error packet received. Dropping.\n"); 1779 } 1780 1781 static void kdeth_process_expected(struct hfi1_packet *packet) 1782 { 1783 hfi1_setup_9B_packet(packet); 1784 if (unlikely(hfi1_dbg_should_fault_rx(packet))) 1785 return; 1786 1787 if (unlikely(rhf_err_flags(packet->rhf))) { 1788 struct hfi1_ctxtdata *rcd = packet->rcd; 1789 1790 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet)) 1791 return; 1792 } 1793 1794 hfi1_kdeth_expected_rcv(packet); 1795 } 1796 1797 static void kdeth_process_eager(struct hfi1_packet *packet) 1798 { 1799 hfi1_setup_9B_packet(packet); 1800 if (unlikely(hfi1_dbg_should_fault_rx(packet))) 1801 return; 1802 1803 trace_hfi1_rcvhdr(packet); 1804 if (unlikely(rhf_err_flags(packet->rhf))) { 1805 struct hfi1_ctxtdata *rcd = packet->rcd; 1806 1807 show_eflags_errs(packet); 1808 if (hfi1_handle_kdeth_eflags(rcd, rcd->ppd, packet)) 1809 return; 1810 } 1811 1812 hfi1_kdeth_eager_rcv(packet); 1813 } 1814 1815 static void process_receive_invalid(struct hfi1_packet *packet) 1816 { 1817 dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n", 1818 rhf_rcv_type(packet->rhf)); 1819 } 1820 1821 #define HFI1_RCVHDR_DUMP_MAX 5 1822 1823 void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd) 1824 { 1825 struct hfi1_packet packet; 1826 struct ps_mdata mdata; 1827 int i; 1828 1829 seq_printf(s, "Rcd %u: RcvHdr cnt %u entsize %u %s ctrl 0x%08llx status 0x%08llx, head %llu tail %llu sw head %u\n", 1830 rcd->ctxt, get_hdrq_cnt(rcd), get_hdrqentsize(rcd), 1831 get_dma_rtail_setting(rcd) ? 1832 "dma_rtail" : "nodma_rtail", 1833 read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_CTRL), 1834 read_kctxt_csr(rcd->dd, rcd->ctxt, RCV_CTXT_STATUS), 1835 read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD) & 1836 RCV_HDR_HEAD_HEAD_MASK, 1837 read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL), 1838 rcd->head); 1839 1840 init_packet(rcd, &packet); 1841 init_ps_mdata(&mdata, &packet); 1842 1843 for (i = 0; i < HFI1_RCVHDR_DUMP_MAX; i++) { 1844 __le32 *rhf_addr = (__le32 *)rcd->rcvhdrq + mdata.ps_head + 1845 rcd->rhf_offset; 1846 struct ib_header *hdr; 1847 u64 rhf = rhf_to_cpu(rhf_addr); 1848 u32 etype = rhf_rcv_type(rhf), qpn; 1849 u8 opcode; 1850 u32 psn; 1851 u8 lnh; 1852 1853 if (ps_done(&mdata, rhf, rcd)) 1854 break; 1855 1856 if (ps_skip(&mdata, rhf, rcd)) 1857 goto next; 1858 1859 if (etype > RHF_RCV_TYPE_IB) 1860 goto next; 1861 1862 packet.hdr = hfi1_get_msgheader(rcd, rhf_addr); 1863 hdr = packet.hdr; 1864 1865 lnh = be16_to_cpu(hdr->lrh[0]) & 3; 1866 1867 if (lnh == HFI1_LRH_BTH) 1868 packet.ohdr = &hdr->u.oth; 1869 else if (lnh == HFI1_LRH_GRH) 1870 packet.ohdr = &hdr->u.l.oth; 1871 else 1872 goto next; /* just in case */ 1873 1874 opcode = (be32_to_cpu(packet.ohdr->bth[0]) >> 24); 1875 qpn = be32_to_cpu(packet.ohdr->bth[1]) & RVT_QPN_MASK; 1876 psn = mask_psn(be32_to_cpu(packet.ohdr->bth[2])); 1877 1878 seq_printf(s, "\tEnt %u: opcode 0x%x, qpn 0x%x, psn 0x%x\n", 1879 mdata.ps_head, opcode, qpn, psn); 1880 next: 1881 update_ps_mdata(&mdata, rcd); 1882 } 1883 } 1884 1885 const rhf_rcv_function_ptr normal_rhf_rcv_functions[] = { 1886 [RHF_RCV_TYPE_EXPECTED] = kdeth_process_expected, 1887 [RHF_RCV_TYPE_EAGER] = kdeth_process_eager, 1888 [RHF_RCV_TYPE_IB] = process_receive_ib, 1889 [RHF_RCV_TYPE_ERROR] = process_receive_error, 1890 [RHF_RCV_TYPE_BYPASS] = process_receive_bypass, 1891 [RHF_RCV_TYPE_INVALID5] = process_receive_invalid, 1892 [RHF_RCV_TYPE_INVALID6] = process_receive_invalid, 1893 [RHF_RCV_TYPE_INVALID7] = process_receive_invalid, 1894 }; 1895 1896 const rhf_rcv_function_ptr netdev_rhf_rcv_functions[] = { 1897 [RHF_RCV_TYPE_EXPECTED] = process_receive_invalid, 1898 [RHF_RCV_TYPE_EAGER] = process_receive_invalid, 1899 [RHF_RCV_TYPE_IB] = hfi1_ipoib_ib_rcv, 1900 [RHF_RCV_TYPE_ERROR] = process_receive_error, 1901 [RHF_RCV_TYPE_BYPASS] = process_receive_invalid, 1902 [RHF_RCV_TYPE_INVALID5] = process_receive_invalid, 1903 [RHF_RCV_TYPE_INVALID6] = process_receive_invalid, 1904 [RHF_RCV_TYPE_INVALID7] = process_receive_invalid, 1905 }; 1906