1 #ifndef _HFI1_KERNEL_H 2 #define _HFI1_KERNEL_H 3 /* 4 * Copyright(c) 2015-2017 Intel Corporation. 5 * 6 * This file is provided under a dual BSD/GPLv2 license. When using or 7 * redistributing this file, you may do so under either license. 8 * 9 * GPL LICENSE SUMMARY 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of version 2 of the GNU General Public License as 13 * published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, but 16 * WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * BSD LICENSE 21 * 22 * Redistribution and use in source and binary forms, with or without 23 * modification, are permitted provided that the following conditions 24 * are met: 25 * 26 * - Redistributions of source code must retain the above copyright 27 * notice, this list of conditions and the following disclaimer. 28 * - Redistributions in binary form must reproduce the above copyright 29 * notice, this list of conditions and the following disclaimer in 30 * the documentation and/or other materials provided with the 31 * distribution. 32 * - Neither the name of Intel Corporation nor the names of its 33 * contributors may be used to endorse or promote products derived 34 * from this software without specific prior written permission. 35 * 36 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 37 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 38 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 39 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 40 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 42 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 43 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 44 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 45 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 46 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 47 * 48 */ 49 50 #include <linux/interrupt.h> 51 #include <linux/pci.h> 52 #include <linux/dma-mapping.h> 53 #include <linux/mutex.h> 54 #include <linux/list.h> 55 #include <linux/scatterlist.h> 56 #include <linux/slab.h> 57 #include <linux/idr.h> 58 #include <linux/io.h> 59 #include <linux/fs.h> 60 #include <linux/completion.h> 61 #include <linux/kref.h> 62 #include <linux/sched.h> 63 #include <linux/cdev.h> 64 #include <linux/delay.h> 65 #include <linux/kthread.h> 66 #include <linux/i2c.h> 67 #include <linux/i2c-algo-bit.h> 68 #include <rdma/ib_hdrs.h> 69 #include <rdma/opa_addr.h> 70 #include <linux/rhashtable.h> 71 #include <linux/netdevice.h> 72 #include <rdma/rdma_vt.h> 73 74 #include "chip_registers.h" 75 #include "common.h" 76 #include "verbs.h" 77 #include "pio.h" 78 #include "chip.h" 79 #include "mad.h" 80 #include "qsfp.h" 81 #include "platform.h" 82 #include "affinity.h" 83 84 /* bumped 1 from s/w major version of TrueScale */ 85 #define HFI1_CHIP_VERS_MAJ 3U 86 87 /* don't care about this except printing */ 88 #define HFI1_CHIP_VERS_MIN 0U 89 90 /* The Organization Unique Identifier (Mfg code), and its position in GUID */ 91 #define HFI1_OUI 0x001175 92 #define HFI1_OUI_LSB 40 93 94 #define DROP_PACKET_OFF 0 95 #define DROP_PACKET_ON 1 96 97 extern unsigned long hfi1_cap_mask; 98 #define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap) 99 #define HFI1_CAP_UGET_MASK(mask, cap) \ 100 (((mask) >> HFI1_CAP_USER_SHIFT) & HFI1_CAP_##cap) 101 #define HFI1_CAP_KGET(cap) (HFI1_CAP_KGET_MASK(hfi1_cap_mask, cap)) 102 #define HFI1_CAP_UGET(cap) (HFI1_CAP_UGET_MASK(hfi1_cap_mask, cap)) 103 #define HFI1_CAP_IS_KSET(cap) (!!HFI1_CAP_KGET(cap)) 104 #define HFI1_CAP_IS_USET(cap) (!!HFI1_CAP_UGET(cap)) 105 #define HFI1_MISC_GET() ((hfi1_cap_mask >> HFI1_CAP_MISC_SHIFT) & \ 106 HFI1_CAP_MISC_MASK) 107 /* Offline Disabled Reason is 4-bits */ 108 #define HFI1_ODR_MASK(rsn) ((rsn) & OPA_PI_MASK_OFFLINE_REASON) 109 110 /* 111 * Control context is always 0 and handles the error packets. 112 * It also handles the VL15 and multicast packets. 113 */ 114 #define HFI1_CTRL_CTXT 0 115 116 /* 117 * Driver context will store software counters for each of the events 118 * associated with these status registers 119 */ 120 #define NUM_CCE_ERR_STATUS_COUNTERS 41 121 #define NUM_RCV_ERR_STATUS_COUNTERS 64 122 #define NUM_MISC_ERR_STATUS_COUNTERS 13 123 #define NUM_SEND_PIO_ERR_STATUS_COUNTERS 36 124 #define NUM_SEND_DMA_ERR_STATUS_COUNTERS 4 125 #define NUM_SEND_EGRESS_ERR_STATUS_COUNTERS 64 126 #define NUM_SEND_ERR_STATUS_COUNTERS 3 127 #define NUM_SEND_CTXT_ERR_STATUS_COUNTERS 5 128 #define NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS 24 129 130 /* 131 * per driver stats, either not device nor port-specific, or 132 * summed over all of the devices and ports. 133 * They are described by name via ipathfs filesystem, so layout 134 * and number of elements can change without breaking compatibility. 135 * If members are added or deleted hfi1_statnames[] in debugfs.c must 136 * change to match. 137 */ 138 struct hfi1_ib_stats { 139 __u64 sps_ints; /* number of interrupts handled */ 140 __u64 sps_errints; /* number of error interrupts */ 141 __u64 sps_txerrs; /* tx-related packet errors */ 142 __u64 sps_rcverrs; /* non-crc rcv packet errors */ 143 __u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */ 144 __u64 sps_nopiobufs; /* no pio bufs avail from kernel */ 145 __u64 sps_ctxts; /* number of contexts currently open */ 146 __u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */ 147 __u64 sps_buffull; 148 __u64 sps_hdrfull; 149 }; 150 151 extern struct hfi1_ib_stats hfi1_stats; 152 extern const struct pci_error_handlers hfi1_pci_err_handler; 153 154 /* 155 * First-cut criterion for "device is active" is 156 * two thousand dwords combined Tx, Rx traffic per 157 * 5-second interval. SMA packets are 64 dwords, 158 * and occur "a few per second", presumably each way. 159 */ 160 #define HFI1_TRAFFIC_ACTIVE_THRESHOLD (2000) 161 162 /* 163 * Below contains all data related to a single context (formerly called port). 164 */ 165 166 #ifdef CONFIG_DEBUG_FS 167 struct hfi1_opcode_stats_perctx; 168 #endif 169 170 struct ctxt_eager_bufs { 171 ssize_t size; /* total size of eager buffers */ 172 u32 count; /* size of buffers array */ 173 u32 numbufs; /* number of buffers allocated */ 174 u32 alloced; /* number of rcvarray entries used */ 175 u32 rcvtid_size; /* size of each eager rcv tid */ 176 u32 threshold; /* head update threshold */ 177 struct eager_buffer { 178 void *addr; 179 dma_addr_t dma; 180 ssize_t len; 181 } *buffers; 182 struct { 183 void *addr; 184 dma_addr_t dma; 185 } *rcvtids; 186 }; 187 188 struct exp_tid_set { 189 struct list_head list; 190 u32 count; 191 }; 192 193 struct hfi1_ctxtdata { 194 /* shadow the ctxt's RcvCtrl register */ 195 u64 rcvctrl; 196 /* rcvhdrq base, needs mmap before useful */ 197 void *rcvhdrq; 198 /* kernel virtual address where hdrqtail is updated */ 199 volatile __le64 *rcvhdrtail_kvaddr; 200 /* when waiting for rcv or pioavail */ 201 wait_queue_head_t wait; 202 /* rcvhdrq size (for freeing) */ 203 size_t rcvhdrq_size; 204 /* number of rcvhdrq entries */ 205 u16 rcvhdrq_cnt; 206 /* size of each of the rcvhdrq entries */ 207 u16 rcvhdrqentsize; 208 /* mmap of hdrq, must fit in 44 bits */ 209 dma_addr_t rcvhdrq_dma; 210 dma_addr_t rcvhdrqtailaddr_dma; 211 struct ctxt_eager_bufs egrbufs; 212 /* this receive context's assigned PIO ACK send context */ 213 struct send_context *sc; 214 215 /* dynamic receive available interrupt timeout */ 216 u32 rcvavail_timeout; 217 /* Reference count the base context usage */ 218 struct kref kref; 219 220 /* Device context index */ 221 u16 ctxt; 222 /* 223 * non-zero if ctxt can be shared, and defines the maximum number of 224 * sub-contexts for this device context. 225 */ 226 u16 subctxt_cnt; 227 /* non-zero if ctxt is being shared. */ 228 u16 subctxt_id; 229 u8 uuid[16]; 230 /* job key */ 231 u16 jkey; 232 /* number of RcvArray groups for this context. */ 233 u32 rcv_array_groups; 234 /* index of first eager TID entry. */ 235 u32 eager_base; 236 /* number of expected TID entries */ 237 u32 expected_count; 238 /* index of first expected TID entry. */ 239 u32 expected_base; 240 241 struct exp_tid_set tid_group_list; 242 struct exp_tid_set tid_used_list; 243 struct exp_tid_set tid_full_list; 244 245 /* lock protecting all Expected TID data */ 246 struct mutex exp_lock; 247 /* per-context configuration flags */ 248 unsigned long flags; 249 /* per-context event flags for fileops/intr communication */ 250 unsigned long event_flags; 251 /* total number of polled urgent packets */ 252 u32 urgent; 253 /* saved total number of polled urgent packets for poll edge trigger */ 254 u32 urgent_poll; 255 /* same size as task_struct .comm[], command that opened context */ 256 char comm[TASK_COMM_LEN]; 257 /* so file ops can get at unit */ 258 struct hfi1_devdata *dd; 259 /* so functions that need physical port can get it easily */ 260 struct hfi1_pportdata *ppd; 261 /* associated msix interrupt */ 262 u32 msix_intr; 263 /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */ 264 void *subctxt_uregbase; 265 /* An array of pages for the eager receive buffers * N */ 266 void *subctxt_rcvegrbuf; 267 /* An array of pages for the eager header queue entries * N */ 268 void *subctxt_rcvhdr_base; 269 /* Bitmask of in use context(s) */ 270 DECLARE_BITMAP(in_use_ctxts, HFI1_MAX_SHARED_CTXTS); 271 /* The version of the library which opened this ctxt */ 272 u32 userversion; 273 /* Type of packets or conditions we want to poll for */ 274 u16 poll_type; 275 /* receive packet sequence counter */ 276 u8 seq_cnt; 277 /* ctxt rcvhdrq head offset */ 278 u32 head; 279 /* QPs waiting for context processing */ 280 struct list_head qp_wait_list; 281 /* interrupt handling */ 282 u64 imask; /* clear interrupt mask */ 283 int ireg; /* clear interrupt register */ 284 unsigned numa_id; /* numa node of this context */ 285 /* verbs stats per CTX */ 286 struct hfi1_opcode_stats_perctx *opstats; 287 288 /* Is ASPM interrupt supported for this context */ 289 bool aspm_intr_supported; 290 /* ASPM state (enabled/disabled) for this context */ 291 bool aspm_enabled; 292 /* Timer for re-enabling ASPM if interrupt activity quietens down */ 293 struct timer_list aspm_timer; 294 /* Lock to serialize between intr, timer intr and user threads */ 295 spinlock_t aspm_lock; 296 /* Is ASPM processing enabled for this context (in intr context) */ 297 bool aspm_intr_enable; 298 /* Last interrupt timestamp */ 299 ktime_t aspm_ts_last_intr; 300 /* Last timestamp at which we scheduled a timer for this context */ 301 ktime_t aspm_ts_timer_sched; 302 303 /* 304 * The interrupt handler for a particular receive context can vary 305 * throughout it's lifetime. This is not a lock protected data member so 306 * it must be updated atomically and the prev and new value must always 307 * be valid. Worst case is we process an extra interrupt and up to 64 308 * packets with the wrong interrupt handler. 309 */ 310 int (*do_interrupt)(struct hfi1_ctxtdata *rcd, int threaded); 311 312 /* Indicates that this is vnic context */ 313 bool is_vnic; 314 315 /* vnic queue index this context is mapped to */ 316 u8 vnic_q_idx; 317 }; 318 319 /* 320 * Represents a single packet at a high level. Put commonly computed things in 321 * here so we do not have to keep doing them over and over. The rule of thumb is 322 * if something is used one time to derive some value, store that something in 323 * here. If it is used multiple times, then store the result of that derivation 324 * in here. 325 */ 326 struct hfi1_packet { 327 void *ebuf; 328 void *hdr; 329 void *payload; 330 struct hfi1_ctxtdata *rcd; 331 __le32 *rhf_addr; 332 struct rvt_qp *qp; 333 struct ib_other_headers *ohdr; 334 struct ib_grh *grh; 335 u64 rhf; 336 u32 maxcnt; 337 u32 rhqoff; 338 u32 dlid; 339 u32 slid; 340 u16 tlen; 341 s16 etail; 342 u8 hlen; 343 u8 numpkt; 344 u8 rsize; 345 u8 updegr; 346 u8 etype; 347 u8 extra_byte; 348 u8 pad; 349 u8 sc; 350 u8 sl; 351 u8 opcode; 352 bool becn; 353 bool fecn; 354 }; 355 356 /* 357 * OPA 16B Header 358 */ 359 #define OPA_16B_L4_MASK 0xFFull 360 #define OPA_16B_SC_MASK 0x1F00000ull 361 #define OPA_16B_SC_SHIFT 20 362 #define OPA_16B_LID_MASK 0xFFFFFull 363 #define OPA_16B_DLID_MASK 0xF000ull 364 #define OPA_16B_DLID_SHIFT 20 365 #define OPA_16B_DLID_HIGH_SHIFT 12 366 #define OPA_16B_SLID_MASK 0xF00ull 367 #define OPA_16B_SLID_SHIFT 20 368 #define OPA_16B_SLID_HIGH_SHIFT 8 369 #define OPA_16B_BECN_MASK 0x80000000ull 370 #define OPA_16B_BECN_SHIFT 31 371 #define OPA_16B_FECN_MASK 0x10000000ull 372 #define OPA_16B_FECN_SHIFT 28 373 #define OPA_16B_L2_MASK 0x60000000ull 374 #define OPA_16B_L2_SHIFT 29 375 376 /* 377 * OPA 16B L2/L4 Encodings 378 */ 379 #define OPA_16B_L2_TYPE 0x02 380 #define OPA_16B_L4_IB_LOCAL 0x09 381 #define OPA_16B_L4_IB_GLOBAL 0x0A 382 #define OPA_16B_L4_ETHR OPA_VNIC_L4_ETHR 383 384 static inline u8 hfi1_16B_get_l4(struct hfi1_16b_header *hdr) 385 { 386 return (u8)(hdr->lrh[2] & OPA_16B_L4_MASK); 387 } 388 389 static inline u8 hfi1_16B_get_sc(struct hfi1_16b_header *hdr) 390 { 391 return (u8)((hdr->lrh[1] & OPA_16B_SC_MASK) >> OPA_16B_SC_SHIFT); 392 } 393 394 static inline u32 hfi1_16B_get_dlid(struct hfi1_16b_header *hdr) 395 { 396 return (u32)((hdr->lrh[1] & OPA_16B_LID_MASK) | 397 (((hdr->lrh[2] & OPA_16B_DLID_MASK) >> 398 OPA_16B_DLID_HIGH_SHIFT) << OPA_16B_DLID_SHIFT)); 399 } 400 401 static inline u32 hfi1_16B_get_slid(struct hfi1_16b_header *hdr) 402 { 403 return (u32)((hdr->lrh[0] & OPA_16B_LID_MASK) | 404 (((hdr->lrh[2] & OPA_16B_SLID_MASK) >> 405 OPA_16B_SLID_HIGH_SHIFT) << OPA_16B_SLID_SHIFT)); 406 } 407 408 static inline u8 hfi1_16B_get_becn(struct hfi1_16b_header *hdr) 409 { 410 return (u8)((hdr->lrh[0] & OPA_16B_BECN_MASK) >> OPA_16B_BECN_SHIFT); 411 } 412 413 static inline u8 hfi1_16B_get_fecn(struct hfi1_16b_header *hdr) 414 { 415 return (u8)((hdr->lrh[1] & OPA_16B_FECN_MASK) >> OPA_16B_FECN_SHIFT); 416 } 417 418 static inline u8 hfi1_16B_get_l2(struct hfi1_16b_header *hdr) 419 { 420 return (u8)((hdr->lrh[1] & OPA_16B_L2_MASK) >> OPA_16B_L2_SHIFT); 421 } 422 423 /* 424 * BTH 425 */ 426 #define OPA_16B_BTH_PAD_MASK 7 427 static inline u8 hfi1_16B_bth_get_pad(struct ib_other_headers *ohdr) 428 { 429 return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_PAD_SHIFT) & 430 OPA_16B_BTH_PAD_MASK); 431 } 432 433 struct rvt_sge_state; 434 435 /* 436 * Get/Set IB link-level config parameters for f_get/set_ib_cfg() 437 * Mostly for MADs that set or query link parameters, also ipath 438 * config interfaces 439 */ 440 #define HFI1_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */ 441 #define HFI1_IB_CFG_LWID_DG_ENB 1 /* allowed Link-width downgrade */ 442 #define HFI1_IB_CFG_LWID_ENB 2 /* allowed Link-width */ 443 #define HFI1_IB_CFG_LWID 3 /* currently active Link-width */ 444 #define HFI1_IB_CFG_SPD_ENB 4 /* allowed Link speeds */ 445 #define HFI1_IB_CFG_SPD 5 /* current Link spd */ 446 #define HFI1_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */ 447 #define HFI1_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */ 448 #define HFI1_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */ 449 #define HFI1_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */ 450 #define HFI1_IB_CFG_OP_VLS 10 /* operational VLs */ 451 #define HFI1_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */ 452 #define HFI1_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */ 453 #define HFI1_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */ 454 #define HFI1_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */ 455 #define HFI1_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */ 456 #define HFI1_IB_CFG_PKEYS 16 /* update partition keys */ 457 #define HFI1_IB_CFG_MTU 17 /* update MTU in IBC */ 458 #define HFI1_IB_CFG_VL_HIGH_LIMIT 19 459 #define HFI1_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */ 460 #define HFI1_IB_CFG_PORT 21 /* switch port we are connected to */ 461 462 /* 463 * HFI or Host Link States 464 * 465 * These describe the states the driver thinks the logical and physical 466 * states are in. Used as an argument to set_link_state(). Implemented 467 * as bits for easy multi-state checking. The actual state can only be 468 * one. 469 */ 470 #define __HLS_UP_INIT_BP 0 471 #define __HLS_UP_ARMED_BP 1 472 #define __HLS_UP_ACTIVE_BP 2 473 #define __HLS_DN_DOWNDEF_BP 3 /* link down default */ 474 #define __HLS_DN_POLL_BP 4 475 #define __HLS_DN_DISABLE_BP 5 476 #define __HLS_DN_OFFLINE_BP 6 477 #define __HLS_VERIFY_CAP_BP 7 478 #define __HLS_GOING_UP_BP 8 479 #define __HLS_GOING_OFFLINE_BP 9 480 #define __HLS_LINK_COOLDOWN_BP 10 481 482 #define HLS_UP_INIT BIT(__HLS_UP_INIT_BP) 483 #define HLS_UP_ARMED BIT(__HLS_UP_ARMED_BP) 484 #define HLS_UP_ACTIVE BIT(__HLS_UP_ACTIVE_BP) 485 #define HLS_DN_DOWNDEF BIT(__HLS_DN_DOWNDEF_BP) /* link down default */ 486 #define HLS_DN_POLL BIT(__HLS_DN_POLL_BP) 487 #define HLS_DN_DISABLE BIT(__HLS_DN_DISABLE_BP) 488 #define HLS_DN_OFFLINE BIT(__HLS_DN_OFFLINE_BP) 489 #define HLS_VERIFY_CAP BIT(__HLS_VERIFY_CAP_BP) 490 #define HLS_GOING_UP BIT(__HLS_GOING_UP_BP) 491 #define HLS_GOING_OFFLINE BIT(__HLS_GOING_OFFLINE_BP) 492 #define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP) 493 494 #define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE) 495 #define HLS_DOWN ~(HLS_UP) 496 497 /* use this MTU size if none other is given */ 498 #define HFI1_DEFAULT_ACTIVE_MTU 10240 499 /* use this MTU size as the default maximum */ 500 #define HFI1_DEFAULT_MAX_MTU 10240 501 /* default partition key */ 502 #define DEFAULT_PKEY 0xffff 503 504 /* 505 * Possible fabric manager config parameters for fm_{get,set}_table() 506 */ 507 #define FM_TBL_VL_HIGH_ARB 1 /* Get/set VL high prio weights */ 508 #define FM_TBL_VL_LOW_ARB 2 /* Get/set VL low prio weights */ 509 #define FM_TBL_BUFFER_CONTROL 3 /* Get/set Buffer Control */ 510 #define FM_TBL_SC2VLNT 4 /* Get/set SC->VLnt */ 511 #define FM_TBL_VL_PREEMPT_ELEMS 5 /* Get (no set) VL preempt elems */ 512 #define FM_TBL_VL_PREEMPT_MATRIX 6 /* Get (no set) VL preempt matrix */ 513 514 /* 515 * Possible "operations" for f_rcvctrl(ppd, op, ctxt) 516 * these are bits so they can be combined, e.g. 517 * HFI1_RCVCTRL_INTRAVAIL_ENB | HFI1_RCVCTRL_CTXT_ENB 518 */ 519 #define HFI1_RCVCTRL_TAILUPD_ENB 0x01 520 #define HFI1_RCVCTRL_TAILUPD_DIS 0x02 521 #define HFI1_RCVCTRL_CTXT_ENB 0x04 522 #define HFI1_RCVCTRL_CTXT_DIS 0x08 523 #define HFI1_RCVCTRL_INTRAVAIL_ENB 0x10 524 #define HFI1_RCVCTRL_INTRAVAIL_DIS 0x20 525 #define HFI1_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */ 526 #define HFI1_RCVCTRL_PKEY_DIS 0x80 527 #define HFI1_RCVCTRL_TIDFLOW_ENB 0x0400 528 #define HFI1_RCVCTRL_TIDFLOW_DIS 0x0800 529 #define HFI1_RCVCTRL_ONE_PKT_EGR_ENB 0x1000 530 #define HFI1_RCVCTRL_ONE_PKT_EGR_DIS 0x2000 531 #define HFI1_RCVCTRL_NO_RHQ_DROP_ENB 0x4000 532 #define HFI1_RCVCTRL_NO_RHQ_DROP_DIS 0x8000 533 #define HFI1_RCVCTRL_NO_EGR_DROP_ENB 0x10000 534 #define HFI1_RCVCTRL_NO_EGR_DROP_DIS 0x20000 535 536 /* partition enforcement flags */ 537 #define HFI1_PART_ENFORCE_IN 0x1 538 #define HFI1_PART_ENFORCE_OUT 0x2 539 540 /* how often we check for synthetic counter wrap around */ 541 #define SYNTH_CNT_TIME 3 542 543 /* Counter flags */ 544 #define CNTR_NORMAL 0x0 /* Normal counters, just read register */ 545 #define CNTR_SYNTH 0x1 /* Synthetic counters, saturate at all 1s */ 546 #define CNTR_DISABLED 0x2 /* Disable this counter */ 547 #define CNTR_32BIT 0x4 /* Simulate 64 bits for this counter */ 548 #define CNTR_VL 0x8 /* Per VL counter */ 549 #define CNTR_SDMA 0x10 550 #define CNTR_INVALID_VL -1 /* Specifies invalid VL */ 551 #define CNTR_MODE_W 0x0 552 #define CNTR_MODE_R 0x1 553 554 /* VLs Supported/Operational */ 555 #define HFI1_MIN_VLS_SUPPORTED 1 556 #define HFI1_MAX_VLS_SUPPORTED 8 557 558 #define HFI1_GUIDS_PER_PORT 5 559 #define HFI1_PORT_GUID_INDEX 0 560 561 static inline void incr_cntr64(u64 *cntr) 562 { 563 if (*cntr < (u64)-1LL) 564 (*cntr)++; 565 } 566 567 static inline void incr_cntr32(u32 *cntr) 568 { 569 if (*cntr < (u32)-1LL) 570 (*cntr)++; 571 } 572 573 #define MAX_NAME_SIZE 64 574 struct hfi1_msix_entry { 575 enum irq_type type; 576 int irq; 577 void *arg; 578 char name[MAX_NAME_SIZE]; 579 cpumask_t mask; 580 struct irq_affinity_notify notify; 581 }; 582 583 /* per-SL CCA information */ 584 struct cca_timer { 585 struct hrtimer hrtimer; 586 struct hfi1_pportdata *ppd; /* read-only */ 587 int sl; /* read-only */ 588 u16 ccti; /* read/write - current value of CCTI */ 589 }; 590 591 struct link_down_reason { 592 /* 593 * SMA-facing value. Should be set from .latest when 594 * HLS_UP_* -> HLS_DN_* transition actually occurs. 595 */ 596 u8 sma; 597 u8 latest; 598 }; 599 600 enum { 601 LO_PRIO_TABLE, 602 HI_PRIO_TABLE, 603 MAX_PRIO_TABLE 604 }; 605 606 struct vl_arb_cache { 607 /* protect vl arb cache */ 608 spinlock_t lock; 609 struct ib_vl_weight_elem table[VL_ARB_TABLE_SIZE]; 610 }; 611 612 /* 613 * The structure below encapsulates data relevant to a physical IB Port. 614 * Current chips support only one such port, but the separation 615 * clarifies things a bit. Note that to conform to IB conventions, 616 * port-numbers are one-based. The first or only port is port1. 617 */ 618 struct hfi1_pportdata { 619 struct hfi1_ibport ibport_data; 620 621 struct hfi1_devdata *dd; 622 struct kobject pport_cc_kobj; 623 struct kobject sc2vl_kobj; 624 struct kobject sl2sc_kobj; 625 struct kobject vl2mtu_kobj; 626 627 /* PHY support */ 628 struct qsfp_data qsfp_info; 629 /* Values for SI tuning of SerDes */ 630 u32 port_type; 631 u32 tx_preset_eq; 632 u32 tx_preset_noeq; 633 u32 rx_preset; 634 u8 local_atten; 635 u8 remote_atten; 636 u8 default_atten; 637 u8 max_power_class; 638 639 /* GUIDs for this interface, in host order, guids[0] is a port guid */ 640 u64 guids[HFI1_GUIDS_PER_PORT]; 641 642 /* GUID for peer interface, in host order */ 643 u64 neighbor_guid; 644 645 /* up or down physical link state */ 646 u32 linkup; 647 648 /* 649 * this address is mapped read-only into user processes so they can 650 * get status cheaply, whenever they want. One qword of status per port 651 */ 652 u64 *statusp; 653 654 /* SendDMA related entries */ 655 656 struct workqueue_struct *hfi1_wq; 657 struct workqueue_struct *link_wq; 658 659 /* move out of interrupt context */ 660 struct work_struct link_vc_work; 661 struct work_struct link_up_work; 662 struct work_struct link_down_work; 663 struct work_struct sma_message_work; 664 struct work_struct freeze_work; 665 struct work_struct link_downgrade_work; 666 struct work_struct link_bounce_work; 667 struct delayed_work start_link_work; 668 /* host link state variables */ 669 struct mutex hls_lock; 670 u32 host_link_state; 671 672 /* these are the "32 bit" regs */ 673 674 u32 ibmtu; /* The MTU programmed for this unit */ 675 /* 676 * Current max size IB packet (in bytes) including IB headers, that 677 * we can send. Changes when ibmtu changes. 678 */ 679 u32 ibmaxlen; 680 u32 current_egress_rate; /* units [10^6 bits/sec] */ 681 /* LID programmed for this instance */ 682 u16 lid; 683 /* list of pkeys programmed; 0 if not set */ 684 u16 pkeys[MAX_PKEY_VALUES]; 685 u16 link_width_supported; 686 u16 link_width_downgrade_supported; 687 u16 link_speed_supported; 688 u16 link_width_enabled; 689 u16 link_width_downgrade_enabled; 690 u16 link_speed_enabled; 691 u16 link_width_active; 692 u16 link_width_downgrade_tx_active; 693 u16 link_width_downgrade_rx_active; 694 u16 link_speed_active; 695 u8 vls_supported; 696 u8 vls_operational; 697 u8 actual_vls_operational; 698 /* LID mask control */ 699 u8 lmc; 700 /* Rx Polarity inversion (compensate for ~tx on partner) */ 701 u8 rx_pol_inv; 702 703 u8 hw_pidx; /* physical port index */ 704 u8 port; /* IB port number and index into dd->pports - 1 */ 705 /* type of neighbor node */ 706 u8 neighbor_type; 707 u8 neighbor_normal; 708 u8 neighbor_fm_security; /* 1 if firmware checking is disabled */ 709 u8 neighbor_port_number; 710 u8 is_sm_config_started; 711 u8 offline_disabled_reason; 712 u8 is_active_optimize_enabled; 713 u8 driver_link_ready; /* driver ready for active link */ 714 u8 link_enabled; /* link enabled? */ 715 u8 linkinit_reason; 716 u8 local_tx_rate; /* rate given to 8051 firmware */ 717 u8 pstate; /* info only */ 718 u8 qsfp_retry_count; 719 720 /* placeholders for IB MAD packet settings */ 721 u8 overrun_threshold; 722 u8 phy_error_threshold; 723 unsigned int is_link_down_queued; 724 725 /* Used to override LED behavior for things like maintenance beaconing*/ 726 /* 727 * Alternates per phase of blink 728 * [0] holds LED off duration, [1] holds LED on duration 729 */ 730 unsigned long led_override_vals[2]; 731 u8 led_override_phase; /* LSB picks from vals[] */ 732 atomic_t led_override_timer_active; 733 /* Used to flash LEDs in override mode */ 734 struct timer_list led_override_timer; 735 736 u32 sm_trap_qp; 737 u32 sa_qp; 738 739 /* 740 * cca_timer_lock protects access to the per-SL cca_timer 741 * structures (specifically the ccti member). 742 */ 743 spinlock_t cca_timer_lock ____cacheline_aligned_in_smp; 744 struct cca_timer cca_timer[OPA_MAX_SLS]; 745 746 /* List of congestion control table entries */ 747 struct ib_cc_table_entry_shadow ccti_entries[CC_TABLE_SHADOW_MAX]; 748 749 /* congestion entries, each entry corresponding to a SL */ 750 struct opa_congestion_setting_entry_shadow 751 congestion_entries[OPA_MAX_SLS]; 752 753 /* 754 * cc_state_lock protects (write) access to the per-port 755 * struct cc_state. 756 */ 757 spinlock_t cc_state_lock ____cacheline_aligned_in_smp; 758 759 struct cc_state __rcu *cc_state; 760 761 /* Total number of congestion control table entries */ 762 u16 total_cct_entry; 763 764 /* Bit map identifying service level */ 765 u32 cc_sl_control_map; 766 767 /* CA's max number of 64 entry units in the congestion control table */ 768 u8 cc_max_table_entries; 769 770 /* 771 * begin congestion log related entries 772 * cc_log_lock protects all congestion log related data 773 */ 774 spinlock_t cc_log_lock ____cacheline_aligned_in_smp; 775 u8 threshold_cong_event_map[OPA_MAX_SLS / 8]; 776 u16 threshold_event_counter; 777 struct opa_hfi1_cong_log_event_internal cc_events[OPA_CONG_LOG_ELEMS]; 778 int cc_log_idx; /* index for logging events */ 779 int cc_mad_idx; /* index for reporting events */ 780 /* end congestion log related entries */ 781 782 struct vl_arb_cache vl_arb_cache[MAX_PRIO_TABLE]; 783 784 /* port relative counter buffer */ 785 u64 *cntrs; 786 /* port relative synthetic counter buffer */ 787 u64 *scntrs; 788 /* port_xmit_discards are synthesized from different egress errors */ 789 u64 port_xmit_discards; 790 u64 port_xmit_discards_vl[C_VL_COUNT]; 791 u64 port_xmit_constraint_errors; 792 u64 port_rcv_constraint_errors; 793 /* count of 'link_err' interrupts from DC */ 794 u64 link_downed; 795 /* number of times link retrained successfully */ 796 u64 link_up; 797 /* number of times a link unknown frame was reported */ 798 u64 unknown_frame_count; 799 /* port_ltp_crc_mode is returned in 'portinfo' MADs */ 800 u16 port_ltp_crc_mode; 801 /* port_crc_mode_enabled is the crc we support */ 802 u8 port_crc_mode_enabled; 803 /* mgmt_allowed is also returned in 'portinfo' MADs */ 804 u8 mgmt_allowed; 805 u8 part_enforce; /* partition enforcement flags */ 806 struct link_down_reason local_link_down_reason; 807 struct link_down_reason neigh_link_down_reason; 808 /* Value to be sent to link peer on LinkDown .*/ 809 u8 remote_link_down_reason; 810 /* Error events that will cause a port bounce. */ 811 u32 port_error_action; 812 struct work_struct linkstate_active_work; 813 /* Does this port need to prescan for FECNs */ 814 bool cc_prescan; 815 }; 816 817 typedef int (*rhf_rcv_function_ptr)(struct hfi1_packet *packet); 818 819 typedef void (*opcode_handler)(struct hfi1_packet *packet); 820 821 /* return values for the RHF receive functions */ 822 #define RHF_RCV_CONTINUE 0 /* keep going */ 823 #define RHF_RCV_DONE 1 /* stop, this packet processed */ 824 #define RHF_RCV_REPROCESS 2 /* stop. retain this packet */ 825 826 struct rcv_array_data { 827 u8 group_size; 828 u16 ngroups; 829 u16 nctxt_extra; 830 }; 831 832 struct per_vl_data { 833 u16 mtu; 834 struct send_context *sc; 835 }; 836 837 /* 16 to directly index */ 838 #define PER_VL_SEND_CONTEXTS 16 839 840 struct err_info_rcvport { 841 u8 status_and_code; 842 u64 packet_flit1; 843 u64 packet_flit2; 844 }; 845 846 struct err_info_constraint { 847 u8 status; 848 u16 pkey; 849 u32 slid; 850 }; 851 852 struct hfi1_temp { 853 unsigned int curr; /* current temperature */ 854 unsigned int lo_lim; /* low temperature limit */ 855 unsigned int hi_lim; /* high temperature limit */ 856 unsigned int crit_lim; /* critical temperature limit */ 857 u8 triggers; /* temperature triggers */ 858 }; 859 860 struct hfi1_i2c_bus { 861 struct hfi1_devdata *controlling_dd; /* current controlling device */ 862 struct i2c_adapter adapter; /* bus details */ 863 struct i2c_algo_bit_data algo; /* bus algorithm details */ 864 int num; /* bus number, 0 or 1 */ 865 }; 866 867 /* common data between shared ASIC HFIs */ 868 struct hfi1_asic_data { 869 struct hfi1_devdata *dds[2]; /* back pointers */ 870 struct mutex asic_resource_mutex; 871 struct hfi1_i2c_bus *i2c_bus0; 872 struct hfi1_i2c_bus *i2c_bus1; 873 }; 874 875 /* sizes for both the QP and RSM map tables */ 876 #define NUM_MAP_ENTRIES 256 877 #define NUM_MAP_REGS 32 878 879 /* 880 * Number of VNIC contexts used. Ensure it is less than or equal to 881 * max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE). 882 */ 883 #define HFI1_NUM_VNIC_CTXT 8 884 885 /* Number of VNIC RSM entries */ 886 #define NUM_VNIC_MAP_ENTRIES 8 887 888 /* Virtual NIC information */ 889 struct hfi1_vnic_data { 890 struct hfi1_ctxtdata *ctxt[HFI1_NUM_VNIC_CTXT]; 891 struct kmem_cache *txreq_cache; 892 u8 num_vports; 893 struct idr vesw_idr; 894 u8 rmt_start; 895 u8 num_ctxt; 896 u32 msix_idx; 897 }; 898 899 struct hfi1_vnic_vport_info; 900 901 /* device data struct now contains only "general per-device" info. 902 * fields related to a physical IB port are in a hfi1_pportdata struct. 903 */ 904 struct sdma_engine; 905 struct sdma_vl_map; 906 907 #define BOARD_VERS_MAX 96 /* how long the version string can be */ 908 #define SERIAL_MAX 16 /* length of the serial number */ 909 910 typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64); 911 struct hfi1_devdata { 912 struct hfi1_ibdev verbs_dev; /* must be first */ 913 struct list_head list; 914 /* pointers to related structs for this device */ 915 /* pci access data structure */ 916 struct pci_dev *pcidev; 917 struct cdev user_cdev; 918 struct cdev diag_cdev; 919 struct cdev ui_cdev; 920 struct device *user_device; 921 struct device *diag_device; 922 struct device *ui_device; 923 924 /* first mapping up to RcvArray */ 925 u8 __iomem *kregbase1; 926 resource_size_t physaddr; 927 928 /* second uncached mapping from RcvArray to pio send buffers */ 929 u8 __iomem *kregbase2; 930 /* for detecting offset above kregbase2 address */ 931 u32 base2_start; 932 933 /* Per VL data. Enough for all VLs but not all elements are set/used. */ 934 struct per_vl_data vld[PER_VL_SEND_CONTEXTS]; 935 /* send context data */ 936 struct send_context_info *send_contexts; 937 /* map hardware send contexts to software index */ 938 u8 *hw_to_sw; 939 /* spinlock for allocating and releasing send context resources */ 940 spinlock_t sc_lock; 941 /* lock for pio_map */ 942 spinlock_t pio_map_lock; 943 /* Send Context initialization lock. */ 944 spinlock_t sc_init_lock; 945 /* lock for sdma_map */ 946 spinlock_t sde_map_lock; 947 /* array of kernel send contexts */ 948 struct send_context **kernel_send_context; 949 /* array of vl maps */ 950 struct pio_vl_map __rcu *pio_map; 951 /* default flags to last descriptor */ 952 u64 default_desc1; 953 954 /* fields common to all SDMA engines */ 955 956 volatile __le64 *sdma_heads_dma; /* DMA'ed by chip */ 957 dma_addr_t sdma_heads_phys; 958 void *sdma_pad_dma; /* DMA'ed by chip */ 959 dma_addr_t sdma_pad_phys; 960 /* for deallocation */ 961 size_t sdma_heads_size; 962 /* number from the chip */ 963 u32 chip_sdma_engines; 964 /* num used */ 965 u32 num_sdma; 966 /* array of engines sized by num_sdma */ 967 struct sdma_engine *per_sdma; 968 /* array of vl maps */ 969 struct sdma_vl_map __rcu *sdma_map; 970 /* SPC freeze waitqueue and variable */ 971 wait_queue_head_t sdma_unfreeze_wq; 972 atomic_t sdma_unfreeze_count; 973 974 u32 lcb_access_count; /* count of LCB users */ 975 976 /* common data between shared ASIC HFIs in this OS */ 977 struct hfi1_asic_data *asic_data; 978 979 /* mem-mapped pointer to base of PIO buffers */ 980 void __iomem *piobase; 981 /* 982 * write-combining mem-mapped pointer to base of RcvArray 983 * memory. 984 */ 985 void __iomem *rcvarray_wc; 986 /* 987 * credit return base - a per-NUMA range of DMA address that 988 * the chip will use to update the per-context free counter 989 */ 990 struct credit_return_base *cr_base; 991 992 /* send context numbers and sizes for each type */ 993 struct sc_config_sizes sc_sizes[SC_MAX]; 994 995 char *boardname; /* human readable board info */ 996 997 /* reset value */ 998 u64 z_int_counter; 999 u64 z_rcv_limit; 1000 u64 z_send_schedule; 1001 1002 u64 __percpu *send_schedule; 1003 /* number of receive contexts in use by the driver */ 1004 u32 num_rcv_contexts; 1005 /* number of pio send contexts in use by the driver */ 1006 u32 num_send_contexts; 1007 /* 1008 * number of ctxts available for PSM open 1009 */ 1010 u32 freectxts; 1011 /* total number of available user/PSM contexts */ 1012 u32 num_user_contexts; 1013 /* base receive interrupt timeout, in CSR units */ 1014 u32 rcv_intr_timeout_csr; 1015 1016 u32 freezelen; /* max length of freezemsg */ 1017 u64 __iomem *egrtidbase; 1018 spinlock_t sendctrl_lock; /* protect changes to SendCtrl */ 1019 spinlock_t rcvctrl_lock; /* protect changes to RcvCtrl */ 1020 spinlock_t uctxt_lock; /* protect rcd changes */ 1021 struct mutex dc8051_lock; /* exclusive access to 8051 */ 1022 struct workqueue_struct *update_cntr_wq; 1023 struct work_struct update_cntr_work; 1024 /* exclusive access to 8051 memory */ 1025 spinlock_t dc8051_memlock; 1026 int dc8051_timed_out; /* remember if the 8051 timed out */ 1027 /* 1028 * A page that will hold event notification bitmaps for all 1029 * contexts. This page will be mapped into all processes. 1030 */ 1031 unsigned long *events; 1032 /* 1033 * per unit status, see also portdata statusp 1034 * mapped read-only into user processes so they can get unit and 1035 * IB link status cheaply 1036 */ 1037 struct hfi1_status *status; 1038 1039 /* revision register shadow */ 1040 u64 revision; 1041 /* Base GUID for device (network order) */ 1042 u64 base_guid; 1043 1044 /* these are the "32 bit" regs */ 1045 1046 /* value we put in kr_rcvhdrsize */ 1047 u32 rcvhdrsize; 1048 /* number of receive contexts the chip supports */ 1049 u32 chip_rcv_contexts; 1050 /* number of receive array entries */ 1051 u32 chip_rcv_array_count; 1052 /* number of PIO send contexts the chip supports */ 1053 u32 chip_send_contexts; 1054 /* number of bytes in the PIO memory buffer */ 1055 u32 chip_pio_mem_size; 1056 /* number of bytes in the SDMA memory buffer */ 1057 u32 chip_sdma_mem_size; 1058 1059 /* size of each rcvegrbuffer */ 1060 u32 rcvegrbufsize; 1061 /* log2 of above */ 1062 u16 rcvegrbufsize_shift; 1063 /* both sides of the PCIe link are gen3 capable */ 1064 u8 link_gen3_capable; 1065 /* default link down value (poll/sleep) */ 1066 u8 link_default; 1067 /* localbus width (1, 2,4,8,16,32) from config space */ 1068 u32 lbus_width; 1069 /* localbus speed in MHz */ 1070 u32 lbus_speed; 1071 int unit; /* unit # of this chip */ 1072 int node; /* home node of this chip */ 1073 1074 /* save these PCI fields to restore after a reset */ 1075 u32 pcibar0; 1076 u32 pcibar1; 1077 u32 pci_rom; 1078 u16 pci_command; 1079 u16 pcie_devctl; 1080 u16 pcie_lnkctl; 1081 u16 pcie_devctl2; 1082 u32 pci_msix0; 1083 u32 pci_lnkctl3; 1084 u32 pci_tph2; 1085 1086 /* 1087 * ASCII serial number, from flash, large enough for original 1088 * all digit strings, and longer serial number format 1089 */ 1090 u8 serial[SERIAL_MAX]; 1091 /* human readable board version */ 1092 u8 boardversion[BOARD_VERS_MAX]; 1093 u8 lbus_info[32]; /* human readable localbus info */ 1094 /* chip major rev, from CceRevision */ 1095 u8 majrev; 1096 /* chip minor rev, from CceRevision */ 1097 u8 minrev; 1098 /* hardware ID */ 1099 u8 hfi1_id; 1100 /* implementation code */ 1101 u8 icode; 1102 /* vAU of this device */ 1103 u8 vau; 1104 /* vCU of this device */ 1105 u8 vcu; 1106 /* link credits of this device */ 1107 u16 link_credits; 1108 /* initial vl15 credits to use */ 1109 u16 vl15_init; 1110 1111 /* 1112 * Cached value for vl15buf, read during verify cap interrupt. VL15 1113 * credits are to be kept at 0 and set when handling the link-up 1114 * interrupt. This removes the possibility of receiving VL15 MAD 1115 * packets before this HFI is ready. 1116 */ 1117 u16 vl15buf_cached; 1118 1119 /* Misc small ints */ 1120 u8 n_krcv_queues; 1121 u8 qos_shift; 1122 1123 u16 irev; /* implementation revision */ 1124 u32 dc8051_ver; /* 8051 firmware version */ 1125 1126 spinlock_t hfi1_diag_trans_lock; /* protect diag observer ops */ 1127 struct platform_config platform_config; 1128 struct platform_config_cache pcfg_cache; 1129 1130 struct diag_client *diag_client; 1131 1132 /* MSI-X information */ 1133 struct hfi1_msix_entry *msix_entries; 1134 u32 num_msix_entries; 1135 u32 first_dyn_msix_idx; 1136 1137 /* INTx information */ 1138 u32 requested_intx_irq; /* did we request one? */ 1139 char intx_name[MAX_NAME_SIZE]; /* INTx name */ 1140 1141 /* general interrupt: mask of handled interrupts */ 1142 u64 gi_mask[CCE_NUM_INT_CSRS]; 1143 1144 struct rcv_array_data rcv_entries; 1145 1146 /* cycle length of PS* counters in HW (in picoseconds) */ 1147 u16 psxmitwait_check_rate; 1148 1149 /* 1150 * 64 bit synthetic counters 1151 */ 1152 struct timer_list synth_stats_timer; 1153 1154 /* 1155 * device counters 1156 */ 1157 char *cntrnames; 1158 size_t cntrnameslen; 1159 size_t ndevcntrs; 1160 u64 *cntrs; 1161 u64 *scntrs; 1162 1163 /* 1164 * remembered values for synthetic counters 1165 */ 1166 u64 last_tx; 1167 u64 last_rx; 1168 1169 /* 1170 * per-port counters 1171 */ 1172 size_t nportcntrs; 1173 char *portcntrnames; 1174 size_t portcntrnameslen; 1175 1176 struct err_info_rcvport err_info_rcvport; 1177 struct err_info_constraint err_info_rcv_constraint; 1178 struct err_info_constraint err_info_xmit_constraint; 1179 1180 atomic_t drop_packet; 1181 u8 do_drop; 1182 u8 err_info_uncorrectable; 1183 u8 err_info_fmconfig; 1184 1185 /* 1186 * Software counters for the status bits defined by the 1187 * associated error status registers 1188 */ 1189 u64 cce_err_status_cnt[NUM_CCE_ERR_STATUS_COUNTERS]; 1190 u64 rcv_err_status_cnt[NUM_RCV_ERR_STATUS_COUNTERS]; 1191 u64 misc_err_status_cnt[NUM_MISC_ERR_STATUS_COUNTERS]; 1192 u64 send_pio_err_status_cnt[NUM_SEND_PIO_ERR_STATUS_COUNTERS]; 1193 u64 send_dma_err_status_cnt[NUM_SEND_DMA_ERR_STATUS_COUNTERS]; 1194 u64 send_egress_err_status_cnt[NUM_SEND_EGRESS_ERR_STATUS_COUNTERS]; 1195 u64 send_err_status_cnt[NUM_SEND_ERR_STATUS_COUNTERS]; 1196 1197 /* Software counter that spans all contexts */ 1198 u64 sw_ctxt_err_status_cnt[NUM_SEND_CTXT_ERR_STATUS_COUNTERS]; 1199 /* Software counter that spans all DMA engines */ 1200 u64 sw_send_dma_eng_err_status_cnt[ 1201 NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS]; 1202 /* Software counter that aggregates all cce_err_status errors */ 1203 u64 sw_cce_err_status_aggregate; 1204 /* Software counter that aggregates all bypass packet rcv errors */ 1205 u64 sw_rcv_bypass_packet_errors; 1206 /* receive interrupt function */ 1207 rhf_rcv_function_ptr normal_rhf_rcv_functions[8]; 1208 1209 /* Save the enabled LCB error bits */ 1210 u64 lcb_err_en; 1211 1212 /* 1213 * Capability to have different send engines simply by changing a 1214 * pointer value. 1215 */ 1216 send_routine process_pio_send ____cacheline_aligned_in_smp; 1217 send_routine process_dma_send; 1218 void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf, 1219 u64 pbc, const void *from, size_t count); 1220 int (*process_vnic_dma_send)(struct hfi1_devdata *dd, u8 q_idx, 1221 struct hfi1_vnic_vport_info *vinfo, 1222 struct sk_buff *skb, u64 pbc, u8 plen); 1223 /* hfi1_pportdata, points to array of (physical) port-specific 1224 * data structs, indexed by pidx (0..n-1) 1225 */ 1226 struct hfi1_pportdata *pport; 1227 /* receive context data */ 1228 struct hfi1_ctxtdata **rcd; 1229 u64 __percpu *int_counter; 1230 /* device (not port) flags, basically device capabilities */ 1231 u16 flags; 1232 /* Number of physical ports available */ 1233 u8 num_pports; 1234 /* Lowest context number which can be used by user processes or VNIC */ 1235 u8 first_dyn_alloc_ctxt; 1236 /* adding a new field here would make it part of this cacheline */ 1237 1238 /* seqlock for sc2vl */ 1239 seqlock_t sc2vl_lock ____cacheline_aligned_in_smp; 1240 u64 sc2vl[4]; 1241 /* receive interrupt functions */ 1242 rhf_rcv_function_ptr *rhf_rcv_function_map; 1243 u64 __percpu *rcv_limit; 1244 u16 rhf_offset; /* offset of RHF within receive header entry */ 1245 /* adding a new field here would make it part of this cacheline */ 1246 1247 /* OUI comes from the HW. Used everywhere as 3 separate bytes. */ 1248 u8 oui1; 1249 u8 oui2; 1250 u8 oui3; 1251 u8 dc_shutdown; 1252 1253 /* Timer and counter used to detect RcvBufOvflCnt changes */ 1254 struct timer_list rcverr_timer; 1255 1256 wait_queue_head_t event_queue; 1257 1258 /* receive context tail dummy address */ 1259 __le64 *rcvhdrtail_dummy_kvaddr; 1260 dma_addr_t rcvhdrtail_dummy_dma; 1261 1262 u32 rcv_ovfl_cnt; 1263 /* Serialize ASPM enable/disable between multiple verbs contexts */ 1264 spinlock_t aspm_lock; 1265 /* Number of verbs contexts which have disabled ASPM */ 1266 atomic_t aspm_disabled_cnt; 1267 /* Keeps track of user space clients */ 1268 atomic_t user_refcount; 1269 /* Used to wait for outstanding user space clients before dev removal */ 1270 struct completion user_comp; 1271 1272 bool eprom_available; /* true if EPROM is available for this device */ 1273 bool aspm_supported; /* Does HW support ASPM */ 1274 bool aspm_enabled; /* ASPM state: enabled/disabled */ 1275 struct rhashtable *sdma_rht; 1276 1277 struct kobject kobj; 1278 1279 /* vnic data */ 1280 struct hfi1_vnic_data vnic; 1281 }; 1282 1283 static inline bool hfi1_vnic_is_rsm_full(struct hfi1_devdata *dd, int spare) 1284 { 1285 return (dd->vnic.rmt_start + spare) > NUM_MAP_ENTRIES; 1286 } 1287 1288 /* 8051 firmware version helper */ 1289 #define dc8051_ver(a, b, c) ((a) << 16 | (b) << 8 | (c)) 1290 #define dc8051_ver_maj(a) (((a) & 0xff0000) >> 16) 1291 #define dc8051_ver_min(a) (((a) & 0x00ff00) >> 8) 1292 #define dc8051_ver_patch(a) ((a) & 0x0000ff) 1293 1294 /* f_put_tid types */ 1295 #define PT_EXPECTED 0 1296 #define PT_EAGER 1 1297 #define PT_INVALID_FLUSH 2 1298 #define PT_INVALID 3 1299 1300 struct tid_rb_node; 1301 struct mmu_rb_node; 1302 struct mmu_rb_handler; 1303 1304 /* Private data for file operations */ 1305 struct hfi1_filedata { 1306 struct hfi1_devdata *dd; 1307 struct hfi1_ctxtdata *uctxt; 1308 struct hfi1_user_sdma_comp_q *cq; 1309 struct hfi1_user_sdma_pkt_q *pq; 1310 u16 subctxt; 1311 /* for cpu affinity; -1 if none */ 1312 int rec_cpu_num; 1313 u32 tid_n_pinned; 1314 struct mmu_rb_handler *handler; 1315 struct tid_rb_node **entry_to_rb; 1316 spinlock_t tid_lock; /* protect tid_[limit,used] counters */ 1317 u32 tid_limit; 1318 u32 tid_used; 1319 u32 *invalid_tids; 1320 u32 invalid_tid_idx; 1321 /* protect invalid_tids array and invalid_tid_idx */ 1322 spinlock_t invalid_lock; 1323 struct mm_struct *mm; 1324 }; 1325 1326 extern struct list_head hfi1_dev_list; 1327 extern spinlock_t hfi1_devs_lock; 1328 struct hfi1_devdata *hfi1_lookup(int unit); 1329 extern u32 hfi1_cpulist_count; 1330 extern unsigned long *hfi1_cpulist; 1331 1332 int hfi1_init(struct hfi1_devdata *dd, int reinit); 1333 int hfi1_count_active_units(void); 1334 1335 int hfi1_diag_add(struct hfi1_devdata *dd); 1336 void hfi1_diag_remove(struct hfi1_devdata *dd); 1337 void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup); 1338 1339 void handle_user_interrupt(struct hfi1_ctxtdata *rcd); 1340 1341 int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd); 1342 int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd); 1343 int hfi1_create_kctxts(struct hfi1_devdata *dd); 1344 int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, 1345 struct hfi1_ctxtdata **rcd); 1346 void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd); 1347 void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, 1348 struct hfi1_devdata *dd, u8 hw_pidx, u8 port); 1349 void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd); 1350 int hfi1_rcd_put(struct hfi1_ctxtdata *rcd); 1351 void hfi1_rcd_get(struct hfi1_ctxtdata *rcd); 1352 struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt); 1353 int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread); 1354 int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread); 1355 int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread); 1356 void set_all_slowpath(struct hfi1_devdata *dd); 1357 void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd); 1358 void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd); 1359 void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd); 1360 1361 extern const struct pci_device_id hfi1_pci_tbl[]; 1362 1363 /* receive packet handler dispositions */ 1364 #define RCV_PKT_OK 0x0 /* keep going */ 1365 #define RCV_PKT_LIMIT 0x1 /* stop, hit limit, start thread */ 1366 #define RCV_PKT_DONE 0x2 /* stop, no more packets detected */ 1367 1368 /* calculate the current RHF address */ 1369 static inline __le32 *get_rhf_addr(struct hfi1_ctxtdata *rcd) 1370 { 1371 return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->dd->rhf_offset; 1372 } 1373 1374 int hfi1_reset_device(int); 1375 1376 /* return the driver's idea of the physical OPA port state */ 1377 static inline u32 driver_pstate(struct hfi1_pportdata *ppd) 1378 { 1379 /* 1380 * When DC is shut down and state is changed, its CSRs are not 1381 * impacted, therefore host_link_state should be used to get 1382 * current physical state. 1383 */ 1384 if (ppd->dd->dc_shutdown) 1385 return driver_physical_state(ppd); 1386 /* 1387 * The driver does some processing from the time the physical 1388 * link state is at LINKUP to the time the SM can be notified 1389 * as such. Return IB_PORTPHYSSTATE_TRAINING until the software 1390 * state is ready. 1391 */ 1392 if (ppd->pstate == PLS_LINKUP && 1393 !(ppd->host_link_state & HLS_UP)) 1394 return IB_PORTPHYSSTATE_TRAINING; 1395 else 1396 return chip_to_opa_pstate(ppd->dd, ppd->pstate); 1397 } 1398 1399 void receive_interrupt_work(struct work_struct *work); 1400 1401 /* extract service channel from header and rhf */ 1402 static inline int hfi1_9B_get_sc5(struct ib_header *hdr, u64 rhf) 1403 { 1404 return ib_get_sc(hdr) | ((!!(rhf_dc_info(rhf))) << 4); 1405 } 1406 1407 #define HFI1_JKEY_WIDTH 16 1408 #define HFI1_JKEY_MASK (BIT(16) - 1) 1409 #define HFI1_ADMIN_JKEY_RANGE 32 1410 1411 /* 1412 * J_KEYs are split and allocated in the following groups: 1413 * 0 - 31 - users with administrator privileges 1414 * 32 - 63 - kernel protocols using KDETH packets 1415 * 64 - 65535 - all other users using KDETH packets 1416 */ 1417 static inline u16 generate_jkey(kuid_t uid) 1418 { 1419 u16 jkey = from_kuid(current_user_ns(), uid) & HFI1_JKEY_MASK; 1420 1421 if (capable(CAP_SYS_ADMIN)) 1422 jkey &= HFI1_ADMIN_JKEY_RANGE - 1; 1423 else if (jkey < 64) 1424 jkey |= BIT(HFI1_JKEY_WIDTH - 1); 1425 1426 return jkey; 1427 } 1428 1429 /* 1430 * active_egress_rate 1431 * 1432 * returns the active egress rate in units of [10^6 bits/sec] 1433 */ 1434 static inline u32 active_egress_rate(struct hfi1_pportdata *ppd) 1435 { 1436 u16 link_speed = ppd->link_speed_active; 1437 u16 link_width = ppd->link_width_active; 1438 u32 egress_rate; 1439 1440 if (link_speed == OPA_LINK_SPEED_25G) 1441 egress_rate = 25000; 1442 else /* assume OPA_LINK_SPEED_12_5G */ 1443 egress_rate = 12500; 1444 1445 switch (link_width) { 1446 case OPA_LINK_WIDTH_4X: 1447 egress_rate *= 4; 1448 break; 1449 case OPA_LINK_WIDTH_3X: 1450 egress_rate *= 3; 1451 break; 1452 case OPA_LINK_WIDTH_2X: 1453 egress_rate *= 2; 1454 break; 1455 default: 1456 /* assume IB_WIDTH_1X */ 1457 break; 1458 } 1459 1460 return egress_rate; 1461 } 1462 1463 /* 1464 * egress_cycles 1465 * 1466 * Returns the number of 'fabric clock cycles' to egress a packet 1467 * of length 'len' bytes, at 'rate' Mbit/s. Since the fabric clock 1468 * rate is (approximately) 805 MHz, the units of the returned value 1469 * are (1/805 MHz). 1470 */ 1471 static inline u32 egress_cycles(u32 len, u32 rate) 1472 { 1473 u32 cycles; 1474 1475 /* 1476 * cycles is: 1477 * 1478 * (length) [bits] / (rate) [bits/sec] 1479 * --------------------------------------------------- 1480 * fabric_clock_period == 1 /(805 * 10^6) [cycles/sec] 1481 */ 1482 1483 cycles = len * 8; /* bits */ 1484 cycles *= 805; 1485 cycles /= rate; 1486 1487 return cycles; 1488 } 1489 1490 void set_link_ipg(struct hfi1_pportdata *ppd); 1491 void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn, 1492 u32 rqpn, u8 svc_type); 1493 void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, 1494 u32 pkey, u32 slid, u32 dlid, u8 sc5, 1495 const struct ib_grh *old_grh); 1496 #define PKEY_CHECK_INVALID -1 1497 int egress_pkey_check(struct hfi1_pportdata *ppd, __be16 *lrh, __be32 *bth, 1498 u8 sc5, int8_t s_pkey_index); 1499 1500 #define PACKET_EGRESS_TIMEOUT 350 1501 static inline void pause_for_credit_return(struct hfi1_devdata *dd) 1502 { 1503 /* Pause at least 1us, to ensure chip returns all credits */ 1504 u32 usec = cclock_to_ns(dd, PACKET_EGRESS_TIMEOUT) / 1000; 1505 1506 udelay(usec ? usec : 1); 1507 } 1508 1509 /** 1510 * sc_to_vlt() reverse lookup sc to vl 1511 * @dd - devdata 1512 * @sc5 - 5 bit sc 1513 */ 1514 static inline u8 sc_to_vlt(struct hfi1_devdata *dd, u8 sc5) 1515 { 1516 unsigned seq; 1517 u8 rval; 1518 1519 if (sc5 >= OPA_MAX_SCS) 1520 return (u8)(0xff); 1521 1522 do { 1523 seq = read_seqbegin(&dd->sc2vl_lock); 1524 rval = *(((u8 *)dd->sc2vl) + sc5); 1525 } while (read_seqretry(&dd->sc2vl_lock, seq)); 1526 1527 return rval; 1528 } 1529 1530 #define PKEY_MEMBER_MASK 0x8000 1531 #define PKEY_LOW_15_MASK 0x7fff 1532 1533 /* 1534 * ingress_pkey_matches_entry - return 1 if the pkey matches ent (ent 1535 * being an entry from the ingress partition key table), return 0 1536 * otherwise. Use the matching criteria for ingress partition keys 1537 * specified in the OPAv1 spec., section 9.10.14. 1538 */ 1539 static inline int ingress_pkey_matches_entry(u16 pkey, u16 ent) 1540 { 1541 u16 mkey = pkey & PKEY_LOW_15_MASK; 1542 u16 ment = ent & PKEY_LOW_15_MASK; 1543 1544 if (mkey == ment) { 1545 /* 1546 * If pkey[15] is clear (limited partition member), 1547 * is bit 15 in the corresponding table element 1548 * clear (limited member)? 1549 */ 1550 if (!(pkey & PKEY_MEMBER_MASK)) 1551 return !!(ent & PKEY_MEMBER_MASK); 1552 return 1; 1553 } 1554 return 0; 1555 } 1556 1557 /* 1558 * ingress_pkey_table_search - search the entire pkey table for 1559 * an entry which matches 'pkey'. return 0 if a match is found, 1560 * and 1 otherwise. 1561 */ 1562 static int ingress_pkey_table_search(struct hfi1_pportdata *ppd, u16 pkey) 1563 { 1564 int i; 1565 1566 for (i = 0; i < MAX_PKEY_VALUES; i++) { 1567 if (ingress_pkey_matches_entry(pkey, ppd->pkeys[i])) 1568 return 0; 1569 } 1570 return 1; 1571 } 1572 1573 /* 1574 * ingress_pkey_table_fail - record a failure of ingress pkey validation, 1575 * i.e., increment port_rcv_constraint_errors for the port, and record 1576 * the 'error info' for this failure. 1577 */ 1578 static void ingress_pkey_table_fail(struct hfi1_pportdata *ppd, u16 pkey, 1579 u16 slid) 1580 { 1581 struct hfi1_devdata *dd = ppd->dd; 1582 1583 incr_cntr64(&ppd->port_rcv_constraint_errors); 1584 if (!(dd->err_info_rcv_constraint.status & OPA_EI_STATUS_SMASK)) { 1585 dd->err_info_rcv_constraint.status |= OPA_EI_STATUS_SMASK; 1586 dd->err_info_rcv_constraint.slid = slid; 1587 dd->err_info_rcv_constraint.pkey = pkey; 1588 } 1589 } 1590 1591 /* 1592 * ingress_pkey_check - Return 0 if the ingress pkey is valid, return 1 1593 * otherwise. Use the criteria in the OPAv1 spec, section 9.10.14. idx 1594 * is a hint as to the best place in the partition key table to begin 1595 * searching. This function should not be called on the data path because 1596 * of performance reasons. On datapath pkey check is expected to be done 1597 * by HW and rcv_pkey_check function should be called instead. 1598 */ 1599 static inline int ingress_pkey_check(struct hfi1_pportdata *ppd, u16 pkey, 1600 u8 sc5, u8 idx, u16 slid) 1601 { 1602 if (!(ppd->part_enforce & HFI1_PART_ENFORCE_IN)) 1603 return 0; 1604 1605 /* If SC15, pkey[0:14] must be 0x7fff */ 1606 if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK)) 1607 goto bad; 1608 1609 /* Is the pkey = 0x0, or 0x8000? */ 1610 if ((pkey & PKEY_LOW_15_MASK) == 0) 1611 goto bad; 1612 1613 /* The most likely matching pkey has index 'idx' */ 1614 if (ingress_pkey_matches_entry(pkey, ppd->pkeys[idx])) 1615 return 0; 1616 1617 /* no match - try the whole table */ 1618 if (!ingress_pkey_table_search(ppd, pkey)) 1619 return 0; 1620 1621 bad: 1622 ingress_pkey_table_fail(ppd, pkey, slid); 1623 return 1; 1624 } 1625 1626 /* 1627 * rcv_pkey_check - Return 0 if the ingress pkey is valid, return 1 1628 * otherwise. It only ensures pkey is vlid for QP0. This function 1629 * should be called on the data path instead of ingress_pkey_check 1630 * as on data path, pkey check is done by HW (except for QP0). 1631 */ 1632 static inline int rcv_pkey_check(struct hfi1_pportdata *ppd, u16 pkey, 1633 u8 sc5, u16 slid) 1634 { 1635 if (!(ppd->part_enforce & HFI1_PART_ENFORCE_IN)) 1636 return 0; 1637 1638 /* If SC15, pkey[0:14] must be 0x7fff */ 1639 if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK)) 1640 goto bad; 1641 1642 return 0; 1643 bad: 1644 ingress_pkey_table_fail(ppd, pkey, slid); 1645 return 1; 1646 } 1647 1648 /* MTU handling */ 1649 1650 /* MTU enumeration, 256-4k match IB */ 1651 #define OPA_MTU_0 0 1652 #define OPA_MTU_256 1 1653 #define OPA_MTU_512 2 1654 #define OPA_MTU_1024 3 1655 #define OPA_MTU_2048 4 1656 #define OPA_MTU_4096 5 1657 1658 u32 lrh_max_header_bytes(struct hfi1_devdata *dd); 1659 int mtu_to_enum(u32 mtu, int default_if_bad); 1660 u16 enum_to_mtu(int mtu); 1661 static inline int valid_ib_mtu(unsigned int mtu) 1662 { 1663 return mtu == 256 || mtu == 512 || 1664 mtu == 1024 || mtu == 2048 || 1665 mtu == 4096; 1666 } 1667 1668 static inline int valid_opa_max_mtu(unsigned int mtu) 1669 { 1670 return mtu >= 2048 && 1671 (valid_ib_mtu(mtu) || mtu == 8192 || mtu == 10240); 1672 } 1673 1674 int set_mtu(struct hfi1_pportdata *ppd); 1675 1676 int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc); 1677 void hfi1_disable_after_error(struct hfi1_devdata *dd); 1678 int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit); 1679 int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode); 1680 1681 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t); 1682 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t); 1683 1684 void set_up_vau(struct hfi1_devdata *dd, u8 vau); 1685 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf); 1686 void reset_link_credits(struct hfi1_devdata *dd); 1687 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); 1688 1689 int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc); 1690 1691 static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd) 1692 { 1693 return ppd->dd; 1694 } 1695 1696 static inline struct hfi1_devdata *dd_from_dev(struct hfi1_ibdev *dev) 1697 { 1698 return container_of(dev, struct hfi1_devdata, verbs_dev); 1699 } 1700 1701 static inline struct hfi1_devdata *dd_from_ibdev(struct ib_device *ibdev) 1702 { 1703 return dd_from_dev(to_idev(ibdev)); 1704 } 1705 1706 static inline struct hfi1_pportdata *ppd_from_ibp(struct hfi1_ibport *ibp) 1707 { 1708 return container_of(ibp, struct hfi1_pportdata, ibport_data); 1709 } 1710 1711 static inline struct hfi1_ibdev *dev_from_rdi(struct rvt_dev_info *rdi) 1712 { 1713 return container_of(rdi, struct hfi1_ibdev, rdi); 1714 } 1715 1716 static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u8 port) 1717 { 1718 struct hfi1_devdata *dd = dd_from_ibdev(ibdev); 1719 unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ 1720 1721 WARN_ON(pidx >= dd->num_pports); 1722 return &dd->pport[pidx].ibport_data; 1723 } 1724 1725 static inline struct hfi1_ibport *rcd_to_iport(struct hfi1_ctxtdata *rcd) 1726 { 1727 return &rcd->ppd->ibport_data; 1728 } 1729 1730 void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt, 1731 bool do_cnp); 1732 static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt, 1733 bool do_cnp) 1734 { 1735 struct ib_other_headers *ohdr = pkt->ohdr; 1736 u32 bth1; 1737 1738 bth1 = be32_to_cpu(ohdr->bth[1]); 1739 if (unlikely(bth1 & (IB_BECN_SMASK | IB_FECN_SMASK))) { 1740 hfi1_process_ecn_slowpath(qp, pkt, do_cnp); 1741 return !!(bth1 & IB_FECN_SMASK); 1742 } 1743 return false; 1744 } 1745 1746 /* 1747 * Return the indexed PKEY from the port PKEY table. 1748 */ 1749 static inline u16 hfi1_get_pkey(struct hfi1_ibport *ibp, unsigned index) 1750 { 1751 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1752 u16 ret; 1753 1754 if (index >= ARRAY_SIZE(ppd->pkeys)) 1755 ret = 0; 1756 else 1757 ret = ppd->pkeys[index]; 1758 1759 return ret; 1760 } 1761 1762 /* 1763 * Return the indexed GUID from the port GUIDs table. 1764 */ 1765 static inline __be64 get_sguid(struct hfi1_ibport *ibp, unsigned int index) 1766 { 1767 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); 1768 1769 WARN_ON(index >= HFI1_GUIDS_PER_PORT); 1770 return cpu_to_be64(ppd->guids[index]); 1771 } 1772 1773 /* 1774 * Called by readers of cc_state only, must call under rcu_read_lock(). 1775 */ 1776 static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd) 1777 { 1778 return rcu_dereference(ppd->cc_state); 1779 } 1780 1781 /* 1782 * Called by writers of cc_state only, must call under cc_state_lock. 1783 */ 1784 static inline 1785 struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd) 1786 { 1787 return rcu_dereference_protected(ppd->cc_state, 1788 lockdep_is_held(&ppd->cc_state_lock)); 1789 } 1790 1791 /* 1792 * values for dd->flags (_device_ related flags) 1793 */ 1794 #define HFI1_INITTED 0x1 /* chip and driver up and initted */ 1795 #define HFI1_PRESENT 0x2 /* chip accesses can be done */ 1796 #define HFI1_FROZEN 0x4 /* chip in SPC freeze */ 1797 #define HFI1_HAS_SDMA_TIMEOUT 0x8 1798 #define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */ 1799 #define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */ 1800 1801 /* IB dword length mask in PBC (lower 11 bits); same for all chips */ 1802 #define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1) 1803 1804 /* ctxt_flag bit offsets */ 1805 /* base context has not finished initializing */ 1806 #define HFI1_CTXT_BASE_UNINIT 1 1807 /* base context initaliation failed */ 1808 #define HFI1_CTXT_BASE_FAILED 2 1809 /* waiting for a packet to arrive */ 1810 #define HFI1_CTXT_WAITING_RCV 3 1811 /* waiting for an urgent packet to arrive */ 1812 #define HFI1_CTXT_WAITING_URG 4 1813 1814 /* free up any allocated data at closes */ 1815 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, 1816 const struct pci_device_id *ent); 1817 void hfi1_free_devdata(struct hfi1_devdata *dd); 1818 struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra); 1819 1820 /* LED beaconing functions */ 1821 void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, 1822 unsigned int timeoff); 1823 void shutdown_led_override(struct hfi1_pportdata *ppd); 1824 1825 #define HFI1_CREDIT_RETURN_RATE (100) 1826 1827 /* 1828 * The number of words for the KDETH protocol field. If this is 1829 * larger then the actual field used, then part of the payload 1830 * will be in the header. 1831 * 1832 * Optimally, we want this sized so that a typical case will 1833 * use full cache lines. The typical local KDETH header would 1834 * be: 1835 * 1836 * Bytes Field 1837 * 8 LRH 1838 * 12 BHT 1839 * ?? KDETH 1840 * 8 RHF 1841 * --- 1842 * 28 + KDETH 1843 * 1844 * For a 64-byte cache line, KDETH would need to be 36 bytes or 9 DWORDS 1845 */ 1846 #define DEFAULT_RCVHDRSIZE 9 1847 1848 /* 1849 * Maximal header byte count: 1850 * 1851 * Bytes Field 1852 * 8 LRH 1853 * 40 GRH (optional) 1854 * 12 BTH 1855 * ?? KDETH 1856 * 8 RHF 1857 * --- 1858 * 68 + KDETH 1859 * 1860 * We also want to maintain a cache line alignment to assist DMA'ing 1861 * of the header bytes. Round up to a good size. 1862 */ 1863 #define DEFAULT_RCVHDR_ENTSIZE 32 1864 1865 bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm, 1866 u32 nlocked, u32 npages); 1867 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, 1868 size_t npages, bool writable, struct page **pages); 1869 void hfi1_release_user_pages(struct mm_struct *mm, struct page **p, 1870 size_t npages, bool dirty); 1871 1872 static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd) 1873 { 1874 *((u64 *)rcd->rcvhdrtail_kvaddr) = 0ULL; 1875 } 1876 1877 static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd) 1878 { 1879 /* 1880 * volatile because it's a DMA target from the chip, routine is 1881 * inlined, and don't want register caching or reordering. 1882 */ 1883 return (u32)le64_to_cpu(*rcd->rcvhdrtail_kvaddr); 1884 } 1885 1886 /* 1887 * sysfs interface. 1888 */ 1889 1890 extern const char ib_hfi1_version[]; 1891 1892 int hfi1_device_create(struct hfi1_devdata *dd); 1893 void hfi1_device_remove(struct hfi1_devdata *dd); 1894 1895 int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, 1896 struct kobject *kobj); 1897 int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd); 1898 void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd); 1899 /* Hook for sysfs read of QSFP */ 1900 int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len); 1901 1902 int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent); 1903 void hfi1_pcie_cleanup(struct pci_dev *pdev); 1904 int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev); 1905 void hfi1_pcie_ddcleanup(struct hfi1_devdata *); 1906 int pcie_speeds(struct hfi1_devdata *dd); 1907 int request_msix(struct hfi1_devdata *dd, u32 msireq); 1908 int restore_pci_variables(struct hfi1_devdata *dd); 1909 int save_pci_variables(struct hfi1_devdata *dd); 1910 int do_pcie_gen3_transition(struct hfi1_devdata *dd); 1911 int parse_platform_config(struct hfi1_devdata *dd); 1912 int get_platform_config_field(struct hfi1_devdata *dd, 1913 enum platform_config_table_type_encoding 1914 table_type, int table_index, int field_index, 1915 u32 *data, u32 len); 1916 1917 const char *get_unit_name(int unit); 1918 const char *get_card_name(struct rvt_dev_info *rdi); 1919 struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi); 1920 1921 /* 1922 * Flush write combining store buffers (if present) and perform a write 1923 * barrier. 1924 */ 1925 static inline void flush_wc(void) 1926 { 1927 asm volatile("sfence" : : : "memory"); 1928 } 1929 1930 void handle_eflags(struct hfi1_packet *packet); 1931 int process_receive_ib(struct hfi1_packet *packet); 1932 int process_receive_bypass(struct hfi1_packet *packet); 1933 int process_receive_error(struct hfi1_packet *packet); 1934 int kdeth_process_expected(struct hfi1_packet *packet); 1935 int kdeth_process_eager(struct hfi1_packet *packet); 1936 int process_receive_invalid(struct hfi1_packet *packet); 1937 1938 /* global module parameter variables */ 1939 extern unsigned int hfi1_max_mtu; 1940 extern unsigned int hfi1_cu; 1941 extern unsigned int user_credit_return_threshold; 1942 extern int num_user_contexts; 1943 extern unsigned long n_krcvqs; 1944 extern uint krcvqs[]; 1945 extern int krcvqsset; 1946 extern uint kdeth_qp; 1947 extern uint loopback; 1948 extern uint quick_linkup; 1949 extern uint rcv_intr_timeout; 1950 extern uint rcv_intr_count; 1951 extern uint rcv_intr_dynamic; 1952 extern ushort link_crc_mask; 1953 1954 extern struct mutex hfi1_mutex; 1955 1956 /* Number of seconds before our card status check... */ 1957 #define STATUS_TIMEOUT 60 1958 1959 #define DRIVER_NAME "hfi1" 1960 #define HFI1_USER_MINOR_BASE 0 1961 #define HFI1_TRACE_MINOR 127 1962 #define HFI1_NMINORS 255 1963 1964 #define PCI_VENDOR_ID_INTEL 0x8086 1965 #define PCI_DEVICE_ID_INTEL0 0x24f0 1966 #define PCI_DEVICE_ID_INTEL1 0x24f1 1967 1968 #define HFI1_PKT_USER_SC_INTEGRITY \ 1969 (SEND_CTXT_CHECK_ENABLE_DISALLOW_NON_KDETH_PACKETS_SMASK \ 1970 | SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK \ 1971 | SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_SMASK \ 1972 | SEND_CTXT_CHECK_ENABLE_DISALLOW_GRH_SMASK) 1973 1974 #define HFI1_PKT_KERNEL_SC_INTEGRITY \ 1975 (SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK) 1976 1977 static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, 1978 u16 ctxt_type) 1979 { 1980 u64 base_sc_integrity; 1981 1982 /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */ 1983 if (HFI1_CAP_IS_KSET(NO_INTEGRITY)) 1984 return 0; 1985 1986 base_sc_integrity = 1987 SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK 1988 | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK 1989 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK 1990 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK 1991 | SEND_CTXT_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK 1992 | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK 1993 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK 1994 | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK 1995 | SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK 1996 | SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_SMASK 1997 | SEND_CTXT_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK 1998 | SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK 1999 | SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK 2000 | SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK 2001 | SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK 2002 | SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK; 2003 2004 if (ctxt_type == SC_USER) 2005 base_sc_integrity |= HFI1_PKT_USER_SC_INTEGRITY; 2006 else 2007 base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY; 2008 2009 /* turn on send-side job key checks if !A0 */ 2010 if (!is_ax(dd)) 2011 base_sc_integrity |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 2012 2013 return base_sc_integrity; 2014 } 2015 2016 static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) 2017 { 2018 u64 base_sdma_integrity; 2019 2020 /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */ 2021 if (HFI1_CAP_IS_KSET(NO_INTEGRITY)) 2022 return 0; 2023 2024 base_sdma_integrity = 2025 SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK 2026 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK 2027 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK 2028 | SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK 2029 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK 2030 | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK 2031 | SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK 2032 | SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_SMASK 2033 | SEND_DMA_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK 2034 | SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK 2035 | SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK 2036 | SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK 2037 | SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK 2038 | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK; 2039 2040 if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL)) 2041 base_sdma_integrity |= 2042 SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK; 2043 2044 /* turn on send-side job key checks if !A0 */ 2045 if (!is_ax(dd)) 2046 base_sdma_integrity |= 2047 SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; 2048 2049 return base_sdma_integrity; 2050 } 2051 2052 /* 2053 * hfi1_early_err is used (only!) to print early errors before devdata is 2054 * allocated, or when dd->pcidev may not be valid, and at the tail end of 2055 * cleanup when devdata may have been freed, etc. hfi1_dev_porterr is 2056 * the same as dd_dev_err, but is used when the message really needs 2057 * the IB port# to be definitive as to what's happening.. 2058 */ 2059 #define hfi1_early_err(dev, fmt, ...) \ 2060 dev_err(dev, fmt, ##__VA_ARGS__) 2061 2062 #define hfi1_early_info(dev, fmt, ...) \ 2063 dev_info(dev, fmt, ##__VA_ARGS__) 2064 2065 #define dd_dev_emerg(dd, fmt, ...) \ 2066 dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \ 2067 get_unit_name((dd)->unit), ##__VA_ARGS__) 2068 #define dd_dev_err(dd, fmt, ...) \ 2069 dev_err(&(dd)->pcidev->dev, "%s: " fmt, \ 2070 get_unit_name((dd)->unit), ##__VA_ARGS__) 2071 #define dd_dev_warn(dd, fmt, ...) \ 2072 dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \ 2073 get_unit_name((dd)->unit), ##__VA_ARGS__) 2074 2075 #define dd_dev_warn_ratelimited(dd, fmt, ...) \ 2076 dev_warn_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \ 2077 get_unit_name((dd)->unit), ##__VA_ARGS__) 2078 2079 #define dd_dev_info(dd, fmt, ...) \ 2080 dev_info(&(dd)->pcidev->dev, "%s: " fmt, \ 2081 get_unit_name((dd)->unit), ##__VA_ARGS__) 2082 2083 #define dd_dev_info_ratelimited(dd, fmt, ...) \ 2084 dev_info_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \ 2085 get_unit_name((dd)->unit), ##__VA_ARGS__) 2086 2087 #define dd_dev_dbg(dd, fmt, ...) \ 2088 dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \ 2089 get_unit_name((dd)->unit), ##__VA_ARGS__) 2090 2091 #define hfi1_dev_porterr(dd, port, fmt, ...) \ 2092 dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \ 2093 get_unit_name((dd)->unit), (port), ##__VA_ARGS__) 2094 2095 /* 2096 * this is used for formatting hw error messages... 2097 */ 2098 struct hfi1_hwerror_msgs { 2099 u64 mask; 2100 const char *msg; 2101 size_t sz; 2102 }; 2103 2104 /* in intr.c... */ 2105 void hfi1_format_hwerrors(u64 hwerrs, 2106 const struct hfi1_hwerror_msgs *hwerrmsgs, 2107 size_t nhwerrmsgs, char *msg, size_t lmsg); 2108 2109 #define USER_OPCODE_CHECK_VAL 0xC0 2110 #define USER_OPCODE_CHECK_MASK 0xC0 2111 #define OPCODE_CHECK_VAL_DISABLED 0x0 2112 #define OPCODE_CHECK_MASK_DISABLED 0x0 2113 2114 static inline void hfi1_reset_cpu_counters(struct hfi1_devdata *dd) 2115 { 2116 struct hfi1_pportdata *ppd; 2117 int i; 2118 2119 dd->z_int_counter = get_all_cpu_total(dd->int_counter); 2120 dd->z_rcv_limit = get_all_cpu_total(dd->rcv_limit); 2121 dd->z_send_schedule = get_all_cpu_total(dd->send_schedule); 2122 2123 ppd = (struct hfi1_pportdata *)(dd + 1); 2124 for (i = 0; i < dd->num_pports; i++, ppd++) { 2125 ppd->ibport_data.rvp.z_rc_acks = 2126 get_all_cpu_total(ppd->ibport_data.rvp.rc_acks); 2127 ppd->ibport_data.rvp.z_rc_qacks = 2128 get_all_cpu_total(ppd->ibport_data.rvp.rc_qacks); 2129 } 2130 } 2131 2132 /* Control LED state */ 2133 static inline void setextled(struct hfi1_devdata *dd, u32 on) 2134 { 2135 if (on) 2136 write_csr(dd, DCC_CFG_LED_CNTRL, 0x1F); 2137 else 2138 write_csr(dd, DCC_CFG_LED_CNTRL, 0x10); 2139 } 2140 2141 /* return the i2c resource given the target */ 2142 static inline u32 i2c_target(u32 target) 2143 { 2144 return target ? CR_I2C2 : CR_I2C1; 2145 } 2146 2147 /* return the i2c chain chip resource that this HFI uses for QSFP */ 2148 static inline u32 qsfp_resource(struct hfi1_devdata *dd) 2149 { 2150 return i2c_target(dd->hfi1_id); 2151 } 2152 2153 /* Is this device integrated or discrete? */ 2154 static inline bool is_integrated(struct hfi1_devdata *dd) 2155 { 2156 return dd->pcidev->device == PCI_DEVICE_ID_INTEL1; 2157 } 2158 2159 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp); 2160 2161 #define DD_DEV_ENTRY(dd) __string(dev, dev_name(&(dd)->pcidev->dev)) 2162 #define DD_DEV_ASSIGN(dd) __assign_str(dev, dev_name(&(dd)->pcidev->dev)) 2163 2164 /* 2165 * hfi1_check_mcast- Check if the given lid is 2166 * in the OPA multicast range. 2167 * 2168 * The LID might either reside in ah.dlid or might be 2169 * in the GRH of the address handle as DGID if extended 2170 * addresses are in use. 2171 */ 2172 static inline bool hfi1_check_mcast(u32 lid) 2173 { 2174 return ((lid >= opa_get_mcast_base(OPA_MCAST_NR)) && 2175 (lid != be32_to_cpu(OPA_LID_PERMISSIVE))); 2176 } 2177 2178 #define opa_get_lid(lid, format) \ 2179 __opa_get_lid(lid, OPA_PORT_PACKET_FORMAT_##format) 2180 2181 /* Convert a lid to a specific lid space */ 2182 static inline u32 __opa_get_lid(u32 lid, u8 format) 2183 { 2184 bool is_mcast = hfi1_check_mcast(lid); 2185 2186 switch (format) { 2187 case OPA_PORT_PACKET_FORMAT_8B: 2188 case OPA_PORT_PACKET_FORMAT_10B: 2189 if (is_mcast) 2190 return (lid - opa_get_mcast_base(OPA_MCAST_NR) + 2191 0xF0000); 2192 return lid & 0xFFFFF; 2193 case OPA_PORT_PACKET_FORMAT_16B: 2194 if (is_mcast) 2195 return (lid - opa_get_mcast_base(OPA_MCAST_NR) + 2196 0xF00000); 2197 return lid & 0xFFFFFF; 2198 case OPA_PORT_PACKET_FORMAT_9B: 2199 if (is_mcast) 2200 return (lid - 2201 opa_get_mcast_base(OPA_MCAST_NR) + 2202 be16_to_cpu(IB_MULTICAST_LID_BASE)); 2203 else 2204 return lid & 0xFFFF; 2205 default: 2206 return lid; 2207 } 2208 } 2209 2210 /* Return true if the given lid is the OPA 16B multicast range */ 2211 static inline bool hfi1_is_16B_mcast(u32 lid) 2212 { 2213 return ((lid >= 2214 opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 16B)) && 2215 (lid != opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))); 2216 } 2217 #endif /* _HFI1_KERNEL_H */ 2218