1 /* 2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. 3 * All rights reserved. 4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #ifndef QIB_VERBS_H 36 #define QIB_VERBS_H 37 38 #include <linux/types.h> 39 #include <linux/spinlock.h> 40 #include <linux/kernel.h> 41 #include <linux/interrupt.h> 42 #include <linux/kref.h> 43 #include <linux/workqueue.h> 44 #include <rdma/ib_pack.h> 45 #include <rdma/ib_user_verbs.h> 46 47 struct qib_ctxtdata; 48 struct qib_pportdata; 49 struct qib_devdata; 50 struct qib_verbs_txreq; 51 52 #define QIB_MAX_RDMA_ATOMIC 16 53 #define QIB_GUIDS_PER_PORT 5 54 55 #define QPN_MAX (1 << 24) 56 #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) 57 58 /* 59 * Increment this value if any changes that break userspace ABI 60 * compatibility are made. 61 */ 62 #define QIB_UVERBS_ABI_VERSION 2 63 64 /* 65 * Define an ib_cq_notify value that is not valid so we know when CQ 66 * notifications are armed. 67 */ 68 #define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1) 69 70 #define IB_SEQ_NAK (3 << 29) 71 72 /* AETH NAK opcode values */ 73 #define IB_RNR_NAK 0x20 74 #define IB_NAK_PSN_ERROR 0x60 75 #define IB_NAK_INVALID_REQUEST 0x61 76 #define IB_NAK_REMOTE_ACCESS_ERROR 0x62 77 #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63 78 #define IB_NAK_INVALID_RD_REQUEST 0x64 79 80 /* Flags for checking QP state (see ib_qib_state_ops[]) */ 81 #define QIB_POST_SEND_OK 0x01 82 #define QIB_POST_RECV_OK 0x02 83 #define QIB_PROCESS_RECV_OK 0x04 84 #define QIB_PROCESS_SEND_OK 0x08 85 #define QIB_PROCESS_NEXT_SEND_OK 0x10 86 #define QIB_FLUSH_SEND 0x20 87 #define QIB_FLUSH_RECV 0x40 88 #define QIB_PROCESS_OR_FLUSH_SEND \ 89 (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND) 90 91 /* IB Performance Manager status values */ 92 #define IB_PMA_SAMPLE_STATUS_DONE 0x00 93 #define IB_PMA_SAMPLE_STATUS_STARTED 0x01 94 #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02 95 96 /* Mandatory IB performance counter select values. */ 97 #define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001) 98 #define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002) 99 #define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003) 100 #define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004) 101 #define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005) 102 103 #define QIB_VENDOR_IPG cpu_to_be16(0xFFA0) 104 105 #define IB_BTH_REQ_ACK (1 << 31) 106 #define IB_BTH_SOLICITED (1 << 23) 107 #define IB_BTH_MIG_REQ (1 << 22) 108 109 /* XXX Should be defined in ib_verbs.h enum ib_port_cap_flags */ 110 #define IB_PORT_OTHER_LOCAL_CHANGES_SUP (1 << 26) 111 112 #define IB_GRH_VERSION 6 113 #define IB_GRH_VERSION_MASK 0xF 114 #define IB_GRH_VERSION_SHIFT 28 115 #define IB_GRH_TCLASS_MASK 0xFF 116 #define IB_GRH_TCLASS_SHIFT 20 117 #define IB_GRH_FLOW_MASK 0xFFFFF 118 #define IB_GRH_FLOW_SHIFT 0 119 #define IB_GRH_NEXT_HDR 0x1B 120 121 #define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL) 122 123 /* Values for set/get portinfo VLCap OperationalVLs */ 124 #define IB_VL_VL0 1 125 #define IB_VL_VL0_1 2 126 #define IB_VL_VL0_3 3 127 #define IB_VL_VL0_7 4 128 #define IB_VL_VL0_14 5 129 130 static inline int qib_num_vls(int vls) 131 { 132 switch (vls) { 133 default: 134 case IB_VL_VL0: 135 return 1; 136 case IB_VL_VL0_1: 137 return 2; 138 case IB_VL_VL0_3: 139 return 4; 140 case IB_VL_VL0_7: 141 return 8; 142 case IB_VL_VL0_14: 143 return 15; 144 } 145 } 146 147 struct ib_reth { 148 __be64 vaddr; 149 __be32 rkey; 150 __be32 length; 151 } __attribute__ ((packed)); 152 153 struct ib_atomic_eth { 154 __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */ 155 __be32 rkey; 156 __be64 swap_data; 157 __be64 compare_data; 158 } __attribute__ ((packed)); 159 160 struct qib_other_headers { 161 __be32 bth[3]; 162 union { 163 struct { 164 __be32 deth[2]; 165 __be32 imm_data; 166 } ud; 167 struct { 168 struct ib_reth reth; 169 __be32 imm_data; 170 } rc; 171 struct { 172 __be32 aeth; 173 __be32 atomic_ack_eth[2]; 174 } at; 175 __be32 imm_data; 176 __be32 aeth; 177 struct ib_atomic_eth atomic_eth; 178 } u; 179 } __attribute__ ((packed)); 180 181 /* 182 * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes 183 * long (72 w/ imm_data). Only the first 56 bytes of the IB header 184 * will be in the eager header buffer. The remaining 12 or 16 bytes 185 * are in the data buffer. 186 */ 187 struct qib_ib_header { 188 __be16 lrh[4]; 189 union { 190 struct { 191 struct ib_grh grh; 192 struct qib_other_headers oth; 193 } l; 194 struct qib_other_headers oth; 195 } u; 196 } __attribute__ ((packed)); 197 198 struct qib_pio_header { 199 __le32 pbc[2]; 200 struct qib_ib_header hdr; 201 } __attribute__ ((packed)); 202 203 /* 204 * There is one struct qib_mcast for each multicast GID. 205 * All attached QPs are then stored as a list of 206 * struct qib_mcast_qp. 207 */ 208 struct qib_mcast_qp { 209 struct list_head list; 210 struct qib_qp *qp; 211 }; 212 213 struct qib_mcast { 214 struct rb_node rb_node; 215 union ib_gid mgid; 216 struct list_head qp_list; 217 wait_queue_head_t wait; 218 atomic_t refcount; 219 int n_attached; 220 }; 221 222 /* Protection domain */ 223 struct qib_pd { 224 struct ib_pd ibpd; 225 int user; /* non-zero if created from user space */ 226 }; 227 228 /* Address Handle */ 229 struct qib_ah { 230 struct ib_ah ibah; 231 struct ib_ah_attr attr; 232 atomic_t refcount; 233 }; 234 235 /* 236 * This structure is used by qib_mmap() to validate an offset 237 * when an mmap() request is made. The vm_area_struct then uses 238 * this as its vm_private_data. 239 */ 240 struct qib_mmap_info { 241 struct list_head pending_mmaps; 242 struct ib_ucontext *context; 243 void *obj; 244 __u64 offset; 245 struct kref ref; 246 unsigned size; 247 }; 248 249 /* 250 * This structure is used to contain the head pointer, tail pointer, 251 * and completion queue entries as a single memory allocation so 252 * it can be mmap'ed into user space. 253 */ 254 struct qib_cq_wc { 255 u32 head; /* index of next entry to fill */ 256 u32 tail; /* index of next ib_poll_cq() entry */ 257 union { 258 /* these are actually size ibcq.cqe + 1 */ 259 struct ib_uverbs_wc uqueue[0]; 260 struct ib_wc kqueue[0]; 261 }; 262 }; 263 264 /* 265 * The completion queue structure. 266 */ 267 struct qib_cq { 268 struct ib_cq ibcq; 269 struct work_struct comptask; 270 spinlock_t lock; /* protect changes in this struct */ 271 u8 notify; 272 u8 triggered; 273 struct qib_cq_wc *queue; 274 struct qib_mmap_info *ip; 275 }; 276 277 /* 278 * A segment is a linear region of low physical memory. 279 * XXX Maybe we should use phys addr here and kmap()/kunmap(). 280 * Used by the verbs layer. 281 */ 282 struct qib_seg { 283 void *vaddr; 284 size_t length; 285 }; 286 287 /* The number of qib_segs that fit in a page. */ 288 #define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg)) 289 290 struct qib_segarray { 291 struct qib_seg segs[QIB_SEGSZ]; 292 }; 293 294 struct qib_mregion { 295 struct ib_pd *pd; /* shares refcnt of ibmr.pd */ 296 u64 user_base; /* User's address for this region */ 297 u64 iova; /* IB start address of this region */ 298 size_t length; 299 u32 lkey; 300 u32 offset; /* offset (bytes) to start of region */ 301 int access_flags; 302 u32 max_segs; /* number of qib_segs in all the arrays */ 303 u32 mapsz; /* size of the map array */ 304 atomic_t refcount; 305 struct qib_segarray *map[0]; /* the segments */ 306 }; 307 308 /* 309 * These keep track of the copy progress within a memory region. 310 * Used by the verbs layer. 311 */ 312 struct qib_sge { 313 struct qib_mregion *mr; 314 void *vaddr; /* kernel virtual address of segment */ 315 u32 sge_length; /* length of the SGE */ 316 u32 length; /* remaining length of the segment */ 317 u16 m; /* current index: mr->map[m] */ 318 u16 n; /* current index: mr->map[m]->segs[n] */ 319 }; 320 321 /* Memory region */ 322 struct qib_mr { 323 struct ib_mr ibmr; 324 struct ib_umem *umem; 325 struct qib_mregion mr; /* must be last */ 326 }; 327 328 /* 329 * Send work request queue entry. 330 * The size of the sg_list is determined when the QP is created and stored 331 * in qp->s_max_sge. 332 */ 333 struct qib_swqe { 334 struct ib_send_wr wr; /* don't use wr.sg_list */ 335 u32 psn; /* first packet sequence number */ 336 u32 lpsn; /* last packet sequence number */ 337 u32 ssn; /* send sequence number */ 338 u32 length; /* total length of data in sg_list */ 339 struct qib_sge sg_list[0]; 340 }; 341 342 /* 343 * Receive work request queue entry. 344 * The size of the sg_list is determined when the QP (or SRQ) is created 345 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge). 346 */ 347 struct qib_rwqe { 348 u64 wr_id; 349 u8 num_sge; 350 struct ib_sge sg_list[0]; 351 }; 352 353 /* 354 * This structure is used to contain the head pointer, tail pointer, 355 * and receive work queue entries as a single memory allocation so 356 * it can be mmap'ed into user space. 357 * Note that the wq array elements are variable size so you can't 358 * just index into the array to get the N'th element; 359 * use get_rwqe_ptr() instead. 360 */ 361 struct qib_rwq { 362 u32 head; /* new work requests posted to the head */ 363 u32 tail; /* receives pull requests from here. */ 364 struct qib_rwqe wq[0]; 365 }; 366 367 struct qib_rq { 368 struct qib_rwq *wq; 369 spinlock_t lock; /* protect changes in this struct */ 370 u32 size; /* size of RWQE array */ 371 u8 max_sge; 372 }; 373 374 struct qib_srq { 375 struct ib_srq ibsrq; 376 struct qib_rq rq; 377 struct qib_mmap_info *ip; 378 /* send signal when number of RWQEs < limit */ 379 u32 limit; 380 }; 381 382 struct qib_sge_state { 383 struct qib_sge *sg_list; /* next SGE to be used if any */ 384 struct qib_sge sge; /* progress state for the current SGE */ 385 u32 total_len; 386 u8 num_sge; 387 }; 388 389 /* 390 * This structure holds the information that the send tasklet needs 391 * to send a RDMA read response or atomic operation. 392 */ 393 struct qib_ack_entry { 394 u8 opcode; 395 u8 sent; 396 u32 psn; 397 u32 lpsn; 398 union { 399 struct qib_sge rdma_sge; 400 u64 atomic_data; 401 }; 402 }; 403 404 /* 405 * Variables prefixed with s_ are for the requester (sender). 406 * Variables prefixed with r_ are for the responder (receiver). 407 * Variables prefixed with ack_ are for responder replies. 408 * 409 * Common variables are protected by both r_rq.lock and s_lock in that order 410 * which only happens in modify_qp() or changing the QP 'state'. 411 */ 412 struct qib_qp { 413 struct ib_qp ibqp; 414 struct qib_qp *next; /* link list for QPN hash table */ 415 struct qib_qp *timer_next; /* link list for qib_ib_timer() */ 416 struct list_head iowait; /* link for wait PIO buf */ 417 struct list_head rspwait; /* link for waititing to respond */ 418 struct ib_ah_attr remote_ah_attr; 419 struct ib_ah_attr alt_ah_attr; 420 struct qib_ib_header s_hdr; /* next packet header to send */ 421 atomic_t refcount; 422 wait_queue_head_t wait; 423 wait_queue_head_t wait_dma; 424 struct timer_list s_timer; 425 struct work_struct s_work; 426 struct qib_mmap_info *ip; 427 struct qib_sge_state *s_cur_sge; 428 struct qib_verbs_txreq *s_tx; 429 struct qib_mregion *s_rdma_mr; 430 struct qib_sge_state s_sge; /* current send request data */ 431 struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1]; 432 struct qib_sge_state s_ack_rdma_sge; 433 struct qib_sge_state s_rdma_read_sge; 434 struct qib_sge_state r_sge; /* current receive data */ 435 spinlock_t r_lock; /* used for APM */ 436 spinlock_t s_lock; 437 atomic_t s_dma_busy; 438 unsigned processor_id; /* Processor ID QP is bound to */ 439 u32 s_flags; 440 u32 s_cur_size; /* size of send packet in bytes */ 441 u32 s_len; /* total length of s_sge */ 442 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ 443 u32 s_next_psn; /* PSN for next request */ 444 u32 s_last_psn; /* last response PSN processed */ 445 u32 s_sending_psn; /* lowest PSN that is being sent */ 446 u32 s_sending_hpsn; /* highest PSN that is being sent */ 447 u32 s_psn; /* current packet sequence number */ 448 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */ 449 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */ 450 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */ 451 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ 452 u64 r_wr_id; /* ID for current receive WQE */ 453 unsigned long r_aflags; 454 u32 r_len; /* total length of r_sge */ 455 u32 r_rcv_len; /* receive data len processed */ 456 u32 r_psn; /* expected rcv packet sequence number */ 457 u32 r_msn; /* message sequence number */ 458 u16 s_hdrwords; /* size of s_hdr in 32 bit words */ 459 u16 s_rdma_ack_cnt; 460 u8 state; /* QP state */ 461 u8 s_state; /* opcode of last packet sent */ 462 u8 s_ack_state; /* opcode of packet to ACK */ 463 u8 s_nak_state; /* non-zero if NAK is pending */ 464 u8 r_state; /* opcode of last packet received */ 465 u8 r_nak_state; /* non-zero if NAK is pending */ 466 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ 467 u8 r_flags; 468 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ 469 u8 r_head_ack_queue; /* index into s_ack_queue[] */ 470 u8 qp_access_flags; 471 u8 s_max_sge; /* size of s_wq->sg_list */ 472 u8 s_retry_cnt; /* number of times to retry */ 473 u8 s_rnr_retry_cnt; 474 u8 s_retry; /* requester retry counter */ 475 u8 s_rnr_retry; /* requester RNR retry counter */ 476 u8 s_pkey_index; /* PKEY index to use */ 477 u8 s_alt_pkey_index; /* Alternate path PKEY index to use */ 478 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ 479 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ 480 u8 s_tail_ack_queue; /* index into s_ack_queue[] */ 481 u8 s_srate; 482 u8 s_draining; 483 u8 s_mig_state; 484 u8 timeout; /* Timeout for this QP */ 485 u8 alt_timeout; /* Alternate path timeout for this QP */ 486 u8 port_num; 487 enum ib_mtu path_mtu; 488 u32 remote_qpn; 489 u32 qkey; /* QKEY for this QP (for UD or RD) */ 490 u32 s_size; /* send work queue size */ 491 u32 s_head; /* new entries added here */ 492 u32 s_tail; /* next entry to process */ 493 u32 s_cur; /* current work queue entry */ 494 u32 s_acked; /* last un-ACK'ed entry */ 495 u32 s_last; /* last completed entry */ 496 u32 s_ssn; /* SSN of tail entry */ 497 u32 s_lsn; /* limit sequence number (credit) */ 498 struct qib_swqe *s_wq; /* send work queue */ 499 struct qib_swqe *s_wqe; 500 struct qib_rq r_rq; /* receive work queue */ 501 struct qib_sge r_sg_list[0]; /* verified SGEs */ 502 }; 503 504 /* 505 * Atomic bit definitions for r_aflags. 506 */ 507 #define QIB_R_WRID_VALID 0 508 #define QIB_R_REWIND_SGE 1 509 510 /* 511 * Bit definitions for r_flags. 512 */ 513 #define QIB_R_REUSE_SGE 0x01 514 #define QIB_R_RDMAR_SEQ 0x02 515 #define QIB_R_RSP_NAK 0x04 516 #define QIB_R_RSP_SEND 0x08 517 #define QIB_R_COMM_EST 0x10 518 519 /* 520 * Bit definitions for s_flags. 521 * 522 * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled 523 * QIB_S_BUSY - send tasklet is processing the QP 524 * QIB_S_TIMER - the RC retry timer is active 525 * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics 526 * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs 527 * before processing the next SWQE 528 * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete 529 * before processing the next SWQE 530 * QIB_S_WAIT_RNR - waiting for RNR timeout 531 * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE 532 * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating 533 * next send completion entry not via send DMA 534 * QIB_S_WAIT_PIO - waiting for a send buffer to be available 535 * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available 536 * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available 537 * QIB_S_WAIT_KMEM - waiting for kernel memory to be available 538 * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue 539 * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests 540 * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK 541 */ 542 #define QIB_S_SIGNAL_REQ_WR 0x0001 543 #define QIB_S_BUSY 0x0002 544 #define QIB_S_TIMER 0x0004 545 #define QIB_S_RESP_PENDING 0x0008 546 #define QIB_S_ACK_PENDING 0x0010 547 #define QIB_S_WAIT_FENCE 0x0020 548 #define QIB_S_WAIT_RDMAR 0x0040 549 #define QIB_S_WAIT_RNR 0x0080 550 #define QIB_S_WAIT_SSN_CREDIT 0x0100 551 #define QIB_S_WAIT_DMA 0x0200 552 #define QIB_S_WAIT_PIO 0x0400 553 #define QIB_S_WAIT_TX 0x0800 554 #define QIB_S_WAIT_DMA_DESC 0x1000 555 #define QIB_S_WAIT_KMEM 0x2000 556 #define QIB_S_WAIT_PSN 0x4000 557 #define QIB_S_WAIT_ACK 0x8000 558 #define QIB_S_SEND_ONE 0x10000 559 #define QIB_S_UNLIMITED_CREDIT 0x20000 560 561 /* 562 * Wait flags that would prevent any packet type from being sent. 563 */ 564 #define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \ 565 QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM) 566 567 /* 568 * Wait flags that would prevent send work requests from making progress. 569 */ 570 #define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \ 571 QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \ 572 QIB_S_WAIT_PSN | QIB_S_WAIT_ACK) 573 574 #define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND) 575 576 #define QIB_PSN_CREDIT 16 577 578 /* 579 * Since struct qib_swqe is not a fixed size, we can't simply index into 580 * struct qib_qp.s_wq. This function does the array index computation. 581 */ 582 static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp, 583 unsigned n) 584 { 585 return (struct qib_swqe *)((char *)qp->s_wq + 586 (sizeof(struct qib_swqe) + 587 qp->s_max_sge * 588 sizeof(struct qib_sge)) * n); 589 } 590 591 /* 592 * Since struct qib_rwqe is not a fixed size, we can't simply index into 593 * struct qib_rwq.wq. This function does the array index computation. 594 */ 595 static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n) 596 { 597 return (struct qib_rwqe *) 598 ((char *) rq->wq->wq + 599 (sizeof(struct qib_rwqe) + 600 rq->max_sge * sizeof(struct ib_sge)) * n); 601 } 602 603 /* 604 * QPN-map pages start out as NULL, they get allocated upon 605 * first use and are never deallocated. This way, 606 * large bitmaps are not allocated unless large numbers of QPs are used. 607 */ 608 struct qpn_map { 609 void *page; 610 }; 611 612 struct qib_qpn_table { 613 spinlock_t lock; /* protect changes in this struct */ 614 unsigned flags; /* flags for QP0/1 allocated for each port */ 615 u32 last; /* last QP number allocated */ 616 u32 nmaps; /* size of the map table */ 617 u16 limit; 618 u16 mask; 619 /* bit map of free QP numbers other than 0/1 */ 620 struct qpn_map map[QPNMAP_ENTRIES]; 621 }; 622 623 struct qib_lkey_table { 624 spinlock_t lock; /* protect changes in this struct */ 625 u32 next; /* next unused index (speeds search) */ 626 u32 gen; /* generation count */ 627 u32 max; /* size of the table */ 628 struct qib_mregion **table; 629 }; 630 631 struct qib_opcode_stats { 632 u64 n_packets; /* number of packets */ 633 u64 n_bytes; /* total number of bytes */ 634 }; 635 636 struct qib_ibport { 637 struct qib_qp *qp0; 638 struct qib_qp *qp1; 639 struct ib_mad_agent *send_agent; /* agent for SMI (traps) */ 640 struct qib_ah *sm_ah; 641 struct qib_ah *smi_ah; 642 struct rb_root mcast_tree; 643 spinlock_t lock; /* protect changes in this struct */ 644 645 /* non-zero when timer is set */ 646 unsigned long mkey_lease_timeout; 647 unsigned long trap_timeout; 648 __be64 gid_prefix; /* in network order */ 649 __be64 mkey; 650 __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */ 651 u64 tid; /* TID for traps */ 652 u64 n_unicast_xmit; /* total unicast packets sent */ 653 u64 n_unicast_rcv; /* total unicast packets received */ 654 u64 n_multicast_xmit; /* total multicast packets sent */ 655 u64 n_multicast_rcv; /* total multicast packets received */ 656 u64 z_symbol_error_counter; /* starting count for PMA */ 657 u64 z_link_error_recovery_counter; /* starting count for PMA */ 658 u64 z_link_downed_counter; /* starting count for PMA */ 659 u64 z_port_rcv_errors; /* starting count for PMA */ 660 u64 z_port_rcv_remphys_errors; /* starting count for PMA */ 661 u64 z_port_xmit_discards; /* starting count for PMA */ 662 u64 z_port_xmit_data; /* starting count for PMA */ 663 u64 z_port_rcv_data; /* starting count for PMA */ 664 u64 z_port_xmit_packets; /* starting count for PMA */ 665 u64 z_port_rcv_packets; /* starting count for PMA */ 666 u32 z_local_link_integrity_errors; /* starting count for PMA */ 667 u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */ 668 u32 z_vl15_dropped; /* starting count for PMA */ 669 u32 n_rc_resends; 670 u32 n_rc_acks; 671 u32 n_rc_qacks; 672 u32 n_rc_delayed_comp; 673 u32 n_seq_naks; 674 u32 n_rdma_seq; 675 u32 n_rnr_naks; 676 u32 n_other_naks; 677 u32 n_loop_pkts; 678 u32 n_pkt_drops; 679 u32 n_vl15_dropped; 680 u32 n_rc_timeouts; 681 u32 n_dmawait; 682 u32 n_unaligned; 683 u32 n_rc_dupreq; 684 u32 n_rc_seqnak; 685 u32 port_cap_flags; 686 u32 pma_sample_start; 687 u32 pma_sample_interval; 688 __be16 pma_counter_select[5]; 689 u16 pma_tag; 690 u16 pkey_violations; 691 u16 qkey_violations; 692 u16 mkey_violations; 693 u16 mkey_lease_period; 694 u16 sm_lid; 695 u16 repress_traps; 696 u8 sm_sl; 697 u8 mkeyprot; 698 u8 subnet_timeout; 699 u8 vl_high_limit; 700 u8 sl_to_vl[16]; 701 702 struct qib_opcode_stats opstats[128]; 703 }; 704 705 struct qib_ibdev { 706 struct ib_device ibdev; 707 struct list_head pending_mmaps; 708 spinlock_t mmap_offset_lock; /* protect mmap_offset */ 709 u32 mmap_offset; 710 struct qib_mregion *dma_mr; 711 712 /* QP numbers are shared by all IB ports */ 713 struct qib_qpn_table qpn_table; 714 struct qib_lkey_table lk_table; 715 struct list_head piowait; /* list for wait PIO buf */ 716 struct list_head dmawait; /* list for wait DMA */ 717 struct list_head txwait; /* list for wait qib_verbs_txreq */ 718 struct list_head memwait; /* list for wait kernel memory */ 719 struct list_head txreq_free; 720 struct timer_list mem_timer; 721 struct qib_qp **qp_table; 722 struct qib_pio_header *pio_hdrs; 723 dma_addr_t pio_hdrs_phys; 724 /* list of QPs waiting for RNR timer */ 725 spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */ 726 unsigned qp_table_size; /* size of the hash table */ 727 spinlock_t qpt_lock; 728 729 u32 n_piowait; 730 u32 n_txwait; 731 732 u32 n_pds_allocated; /* number of PDs allocated for device */ 733 spinlock_t n_pds_lock; 734 u32 n_ahs_allocated; /* number of AHs allocated for device */ 735 spinlock_t n_ahs_lock; 736 u32 n_cqs_allocated; /* number of CQs allocated for device */ 737 spinlock_t n_cqs_lock; 738 u32 n_qps_allocated; /* number of QPs allocated for device */ 739 spinlock_t n_qps_lock; 740 u32 n_srqs_allocated; /* number of SRQs allocated for device */ 741 spinlock_t n_srqs_lock; 742 u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ 743 spinlock_t n_mcast_grps_lock; 744 }; 745 746 struct qib_verbs_counters { 747 u64 symbol_error_counter; 748 u64 link_error_recovery_counter; 749 u64 link_downed_counter; 750 u64 port_rcv_errors; 751 u64 port_rcv_remphys_errors; 752 u64 port_xmit_discards; 753 u64 port_xmit_data; 754 u64 port_rcv_data; 755 u64 port_xmit_packets; 756 u64 port_rcv_packets; 757 u32 local_link_integrity_errors; 758 u32 excessive_buffer_overrun_errors; 759 u32 vl15_dropped; 760 }; 761 762 static inline struct qib_mr *to_imr(struct ib_mr *ibmr) 763 { 764 return container_of(ibmr, struct qib_mr, ibmr); 765 } 766 767 static inline struct qib_pd *to_ipd(struct ib_pd *ibpd) 768 { 769 return container_of(ibpd, struct qib_pd, ibpd); 770 } 771 772 static inline struct qib_ah *to_iah(struct ib_ah *ibah) 773 { 774 return container_of(ibah, struct qib_ah, ibah); 775 } 776 777 static inline struct qib_cq *to_icq(struct ib_cq *ibcq) 778 { 779 return container_of(ibcq, struct qib_cq, ibcq); 780 } 781 782 static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq) 783 { 784 return container_of(ibsrq, struct qib_srq, ibsrq); 785 } 786 787 static inline struct qib_qp *to_iqp(struct ib_qp *ibqp) 788 { 789 return container_of(ibqp, struct qib_qp, ibqp); 790 } 791 792 static inline struct qib_ibdev *to_idev(struct ib_device *ibdev) 793 { 794 return container_of(ibdev, struct qib_ibdev, ibdev); 795 } 796 797 /* 798 * Send if not busy or waiting for I/O and either 799 * a RC response is pending or we can process send work requests. 800 */ 801 static inline int qib_send_ok(struct qib_qp *qp) 802 { 803 return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) && 804 (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) || 805 !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); 806 } 807 808 extern struct workqueue_struct *qib_wq; 809 extern struct workqueue_struct *qib_cq_wq; 810 811 /* 812 * This must be called with s_lock held. 813 */ 814 static inline void qib_schedule_send(struct qib_qp *qp) 815 { 816 if (qib_send_ok(qp)) { 817 if (qp->processor_id == smp_processor_id()) 818 queue_work(qib_wq, &qp->s_work); 819 else 820 queue_work_on(qp->processor_id, 821 qib_wq, &qp->s_work); 822 } 823 } 824 825 static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) 826 { 827 u16 p1 = pkey1 & 0x7FFF; 828 u16 p2 = pkey2 & 0x7FFF; 829 830 /* 831 * Low 15 bits must be non-zero and match, and 832 * one of the two must be a full member. 833 */ 834 return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0); 835 } 836 837 void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, 838 u32 qp1, u32 qp2, __be16 lid1, __be16 lid2); 839 void qib_cap_mask_chg(struct qib_ibport *ibp); 840 void qib_sys_guid_chg(struct qib_ibport *ibp); 841 void qib_node_desc_chg(struct qib_ibport *ibp); 842 int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 843 struct ib_wc *in_wc, struct ib_grh *in_grh, 844 struct ib_mad *in_mad, struct ib_mad *out_mad); 845 int qib_create_agents(struct qib_ibdev *dev); 846 void qib_free_agents(struct qib_ibdev *dev); 847 848 /* 849 * Compare the lower 24 bits of the two values. 850 * Returns an integer <, ==, or > than zero. 851 */ 852 static inline int qib_cmp24(u32 a, u32 b) 853 { 854 return (((int) a) - ((int) b)) << 8; 855 } 856 857 struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid); 858 859 int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords, 860 u64 *rwords, u64 *spkts, u64 *rpkts, 861 u64 *xmit_wait); 862 863 int qib_get_counters(struct qib_pportdata *ppd, 864 struct qib_verbs_counters *cntrs); 865 866 int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 867 868 int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 869 870 int qib_mcast_tree_empty(struct qib_ibport *ibp); 871 872 __be32 qib_compute_aeth(struct qib_qp *qp); 873 874 struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn); 875 876 struct ib_qp *qib_create_qp(struct ib_pd *ibpd, 877 struct ib_qp_init_attr *init_attr, 878 struct ib_udata *udata); 879 880 int qib_destroy_qp(struct ib_qp *ibqp); 881 882 int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err); 883 884 int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 885 int attr_mask, struct ib_udata *udata); 886 887 int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 888 int attr_mask, struct ib_qp_init_attr *init_attr); 889 890 unsigned qib_free_all_qps(struct qib_devdata *dd); 891 892 void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt); 893 894 void qib_free_qpn_table(struct qib_qpn_table *qpt); 895 896 void qib_get_credit(struct qib_qp *qp, u32 aeth); 897 898 unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult); 899 900 void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail); 901 902 void qib_put_txreq(struct qib_verbs_txreq *tx); 903 904 int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, 905 u32 hdrwords, struct qib_sge_state *ss, u32 len); 906 907 void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, 908 int release); 909 910 void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release); 911 912 void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, 913 int has_grh, void *data, u32 tlen, struct qib_qp *qp); 914 915 void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, 916 int has_grh, void *data, u32 tlen, struct qib_qp *qp); 917 918 int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); 919 920 void qib_rc_rnr_retry(unsigned long arg); 921 922 void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr); 923 924 void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err); 925 926 int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr); 927 928 void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, 929 int has_grh, void *data, u32 tlen, struct qib_qp *qp); 930 931 int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr); 932 933 int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr); 934 935 int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, 936 struct qib_sge *isge, struct ib_sge *sge, int acc); 937 938 int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, 939 u32 len, u64 vaddr, u32 rkey, int acc); 940 941 int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 942 struct ib_recv_wr **bad_wr); 943 944 struct ib_srq *qib_create_srq(struct ib_pd *ibpd, 945 struct ib_srq_init_attr *srq_init_attr, 946 struct ib_udata *udata); 947 948 int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, 949 enum ib_srq_attr_mask attr_mask, 950 struct ib_udata *udata); 951 952 int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); 953 954 int qib_destroy_srq(struct ib_srq *ibsrq); 955 956 void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig); 957 958 int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); 959 960 struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries, 961 int comp_vector, struct ib_ucontext *context, 962 struct ib_udata *udata); 963 964 int qib_destroy_cq(struct ib_cq *ibcq); 965 966 int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); 967 968 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); 969 970 struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc); 971 972 struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd, 973 struct ib_phys_buf *buffer_list, 974 int num_phys_buf, int acc, u64 *iova_start); 975 976 struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 977 u64 virt_addr, int mr_access_flags, 978 struct ib_udata *udata); 979 980 int qib_dereg_mr(struct ib_mr *ibmr); 981 982 struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len); 983 984 struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list( 985 struct ib_device *ibdev, int page_list_len); 986 987 void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl); 988 989 int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr); 990 991 struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, 992 struct ib_fmr_attr *fmr_attr); 993 994 int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, 995 int list_len, u64 iova); 996 997 int qib_unmap_fmr(struct list_head *fmr_list); 998 999 int qib_dealloc_fmr(struct ib_fmr *ibfmr); 1000 1001 void qib_release_mmap_info(struct kref *ref); 1002 1003 struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size, 1004 struct ib_ucontext *context, 1005 void *obj); 1006 1007 void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip, 1008 u32 size, void *obj); 1009 1010 int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 1011 1012 int qib_get_rwqe(struct qib_qp *qp, int wr_id_only); 1013 1014 void qib_migrate_qp(struct qib_qp *qp); 1015 1016 int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, 1017 int has_grh, struct qib_qp *qp, u32 bth0); 1018 1019 u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr, 1020 struct ib_global_route *grh, u32 hwords, u32 nwords); 1021 1022 void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, 1023 u32 bth0, u32 bth2); 1024 1025 void qib_do_send(struct work_struct *work); 1026 1027 void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, 1028 enum ib_wc_status status); 1029 1030 void qib_send_rc_ack(struct qib_qp *qp); 1031 1032 int qib_make_rc_req(struct qib_qp *qp); 1033 1034 int qib_make_uc_req(struct qib_qp *qp); 1035 1036 int qib_make_ud_req(struct qib_qp *qp); 1037 1038 int qib_register_ib_device(struct qib_devdata *); 1039 1040 void qib_unregister_ib_device(struct qib_devdata *); 1041 1042 void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32); 1043 1044 void qib_ib_piobufavail(struct qib_devdata *); 1045 1046 unsigned qib_get_npkeys(struct qib_devdata *); 1047 1048 unsigned qib_get_pkey(struct qib_ibport *, unsigned); 1049 1050 extern const enum ib_wc_opcode ib_qib_wc_opcode[]; 1051 1052 /* 1053 * Below HCA-independent IB PhysPortState values, returned 1054 * by the f_ibphys_portstate() routine. 1055 */ 1056 #define IB_PHYSPORTSTATE_SLEEP 1 1057 #define IB_PHYSPORTSTATE_POLL 2 1058 #define IB_PHYSPORTSTATE_DISABLED 3 1059 #define IB_PHYSPORTSTATE_CFG_TRAIN 4 1060 #define IB_PHYSPORTSTATE_LINKUP 5 1061 #define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6 1062 #define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8 1063 #define IB_PHYSPORTSTATE_CFG_IDLE 0xB 1064 #define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC 1065 #define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE 1066 #define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF 1067 #define IB_PHYSPORTSTATE_CFG_ENH 0x10 1068 #define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13 1069 1070 extern const int ib_qib_state_ops[]; 1071 1072 extern __be64 ib_qib_sys_image_guid; /* in network order */ 1073 1074 extern unsigned int ib_qib_lkey_table_size; 1075 1076 extern unsigned int ib_qib_max_cqes; 1077 1078 extern unsigned int ib_qib_max_cqs; 1079 1080 extern unsigned int ib_qib_max_qp_wrs; 1081 1082 extern unsigned int ib_qib_max_qps; 1083 1084 extern unsigned int ib_qib_max_sges; 1085 1086 extern unsigned int ib_qib_max_mcast_grps; 1087 1088 extern unsigned int ib_qib_max_mcast_qp_attached; 1089 1090 extern unsigned int ib_qib_max_srqs; 1091 1092 extern unsigned int ib_qib_max_srq_sges; 1093 1094 extern unsigned int ib_qib_max_srq_wrs; 1095 1096 extern const u32 ib_qib_rnr_table[]; 1097 1098 extern struct ib_dma_mapping_ops qib_dma_mapping_ops; 1099 1100 #endif /* QIB_VERBS_H */ 1101