1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33 #ifndef __IW_CXGB4_H__
34 #define __IW_CXGB4_H__
35
36 #include <linux/list.h>
37 #include <linux/spinlock.h>
38 #include <linux/idr.h>
39 #include <linux/completion.h>
40 #include <linux/sched.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/wait.h>
44 #include <linux/kref.h>
45 #include <linux/timer.h>
46 #include <linux/io.h>
47 #include <sys/vmem.h>
48
49 #include <asm/byteorder.h>
50
51 #include <netinet/in.h>
52 #include <netinet/toecore.h>
53
54 #include <rdma/ib_verbs.h>
55 #include <rdma/iw_cm.h>
56 #include <rdma/uverbs_ioctl.h>
57
58 #include "common/common.h"
59 #include "common/t4_msg.h"
60 #include "common/t4_regs.h"
61 #include "common/t4_tcb.h"
62 #include "t4_l2t.h"
63
64 #define DRV_NAME "iw_cxgbe"
65 #define MOD DRV_NAME ":"
66 #define KTR_IW_CXGBE KTR_SPARE3
67
68 extern int c4iw_debug;
69 extern int use_dsgl;
70 extern int inline_threshold;
71
72 #define PDBG(fmt, args...) \
73 do { \
74 if (c4iw_debug) \
75 printf(MOD fmt, ## args); \
76 } while (0)
77
78 #include "t4.h"
79
cplhdr(struct mbuf * m)80 static inline void *cplhdr(struct mbuf *m)
81 {
82 return mtod(m, void*);
83 }
84
85 #define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.pbl.start)
86 #define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.rq.start)
87
88 #define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */
89 #define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */
90 #define C4IW_MAX_PAGE_SIZE 0x8000000
91
92 struct c4iw_id_table {
93 u32 flags;
94 u32 start; /* logical minimal id */
95 u32 last; /* hint for find */
96 u32 max;
97 spinlock_t lock;
98 unsigned long *table;
99 };
100
101 struct c4iw_resource {
102 struct c4iw_id_table tpt_table;
103 struct c4iw_id_table qid_table;
104 struct c4iw_id_table pdid_table;
105 };
106
107 struct c4iw_qid_list {
108 struct list_head entry;
109 u32 qid;
110 };
111
112 struct c4iw_dev_ucontext {
113 struct list_head qpids;
114 struct list_head cqids;
115 struct mutex lock;
116 };
117
118 enum c4iw_rdev_flags {
119 T4_IW_STOPPED = (1<<0),
120 T4_STATUS_PAGE_DISABLED = (1<<1),
121 };
122
123 struct c4iw_stat {
124 u64 total;
125 u64 cur;
126 u64 max;
127 u64 fail;
128 };
129
130 struct c4iw_stats {
131 struct mutex lock;
132 struct c4iw_stat qid;
133 struct c4iw_stat pd;
134 struct c4iw_stat stag;
135 struct c4iw_stat pbl;
136 struct c4iw_stat rqt;
137 };
138
139 struct c4iw_hw_queue {
140 int t4_eq_status_entries;
141 int t4_max_eq_size;
142 int t4_max_iq_size;
143 int t4_max_rq_size;
144 int t4_max_sq_size;
145 int t4_max_qp_depth;
146 int t4_max_cq_depth;
147 int t4_stat_len;
148 };
149
150 struct c4iw_rdev {
151 struct adapter *adap;
152 struct c4iw_resource resource;
153 unsigned long qpshift;
154 u32 qpmask;
155 unsigned long cqshift;
156 u32 cqmask;
157 struct c4iw_dev_ucontext uctx;
158 vmem_t *rqt_arena;
159 vmem_t *pbl_arena;
160 u32 flags;
161 struct c4iw_stats stats;
162 struct c4iw_hw_queue hw_queue;
163 struct t4_dev_status_page *status_page;
164 unsigned long bar2_pa;
165 void __iomem *bar2_kva;
166 unsigned int bar2_len;
167 struct workqueue_struct *free_workq;
168 };
169
c4iw_stopped(struct c4iw_rdev * rdev)170 static inline int c4iw_stopped(struct c4iw_rdev *rdev)
171 {
172 return rdev->flags & T4_IW_STOPPED;
173 }
174
c4iw_num_stags(struct c4iw_rdev * rdev)175 static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
176 {
177 return (int)(rdev->adap->vres.stag.size >> 5);
178 }
179
t4_max_fr_depth(struct c4iw_rdev * rdev,bool use_dsgl)180 static inline int t4_max_fr_depth(struct c4iw_rdev *rdev, bool use_dsgl)
181 {
182 if (rdev->adap->params.ulptx_memwrite_dsgl && use_dsgl)
183 return rdev->adap->params.dev_512sgl_mr ? T4_MAX_FR_FW_DSGL_DEPTH : T4_MAX_FR_DSGL_DEPTH;
184 else
185 return T4_MAX_FR_IMMD_DEPTH;
186 }
187
188 #define C4IW_WR_TO (60*HZ)
189
190 struct c4iw_wr_wait {
191 int ret;
192 struct completion completion;
193 };
194
c4iw_init_wr_wait(struct c4iw_wr_wait * wr_waitp)195 static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
196 {
197 wr_waitp->ret = 0;
198 init_completion(&wr_waitp->completion);
199 }
200
c4iw_wake_up(struct c4iw_wr_wait * wr_waitp,int ret)201 static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
202 {
203 wr_waitp->ret = ret;
204 complete(&wr_waitp->completion);
205 }
206
207 static inline int
c4iw_wait_for_reply(struct c4iw_rdev * rdev,struct c4iw_wr_wait * wr_waitp,u32 hwtid,u32 qpid,struct socket * so,const char * func)208 c4iw_wait_for_reply(struct c4iw_rdev *rdev, struct c4iw_wr_wait *wr_waitp,
209 u32 hwtid, u32 qpid, struct socket *so, const char *func)
210 {
211 struct adapter *sc = rdev->adap;
212 unsigned to = C4IW_WR_TO;
213 int ret;
214 int timedout = 0;
215 struct timeval t1, t2;
216
217 if (c4iw_stopped(rdev)) {
218 wr_waitp->ret = -EIO;
219 goto out;
220 }
221
222 getmicrotime(&t1);
223 do {
224 /* If waiting for reply in rdma_init()/rdma_fini() threads, then
225 * check if there are any connection errors.
226 */
227 if (so && so->so_error) {
228 wr_waitp->ret = -ECONNRESET;
229 CTR5(KTR_IW_CXGBE, "%s - Connection ERROR %u for sock %p"
230 "tid %u qpid %u", func,
231 so->so_error, so, hwtid, qpid);
232 break;
233 }
234
235 ret = wait_for_completion_timeout(&wr_waitp->completion, to);
236 if (!ret) {
237 getmicrotime(&t2);
238 timevalsub(&t2, &t1);
239 printf("%s - Device %s not responding after %ld.%06ld "
240 "seconds - tid %u qpid %u\n", func,
241 device_get_nameunit(sc->dev), t2.tv_sec, t2.tv_usec,
242 hwtid, qpid);
243 if (c4iw_stopped(rdev)) {
244 wr_waitp->ret = -EIO;
245 break;
246 }
247 to = to << 2;
248 timedout = 1;
249 }
250 } while (!ret);
251
252 out:
253 if (timedout) {
254 getmicrotime(&t2);
255 timevalsub(&t2, &t1);
256 printf("%s - Device %s reply after %ld.%06ld seconds - "
257 "tid %u qpid %u\n", func, device_get_nameunit(sc->dev),
258 t2.tv_sec, t2.tv_usec, hwtid, qpid);
259 }
260 if (wr_waitp->ret)
261 CTR4(KTR_IW_CXGBE, "%p: FW reply %d tid %u qpid %u", sc,
262 wr_waitp->ret, hwtid, qpid);
263 return (wr_waitp->ret);
264 }
265
266 struct c4iw_dev {
267 struct ib_device ibdev;
268 struct pci_dev pdev;
269 struct c4iw_rdev rdev;
270 u32 device_cap_flags;
271 struct idr cqidr;
272 struct idr qpidr;
273 struct idr mmidr;
274 spinlock_t lock;
275 struct dentry *debugfs_root;
276 u32 avail_ird;
277 };
278
to_c4iw_dev(struct ib_device * ibdev)279 static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
280 {
281 return container_of(ibdev, struct c4iw_dev, ibdev);
282 }
283
rdev_to_c4iw_dev(struct c4iw_rdev * rdev)284 static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
285 {
286 return container_of(rdev, struct c4iw_dev, rdev);
287 }
288
get_chp(struct c4iw_dev * rhp,u32 cqid)289 static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
290 {
291 return idr_find(&rhp->cqidr, cqid);
292 }
293
get_qhp(struct c4iw_dev * rhp,u32 qpid)294 static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
295 {
296 return idr_find(&rhp->qpidr, qpid);
297 }
298
get_mhp(struct c4iw_dev * rhp,u32 mmid)299 static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
300 {
301 return idr_find(&rhp->mmidr, mmid);
302 }
303
_insert_handle(struct c4iw_dev * rhp,struct idr * idr,void * handle,u32 id,int lock)304 static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
305 void *handle, u32 id, int lock)
306 {
307 int ret;
308 int newid;
309
310 do {
311 if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
312 return -ENOMEM;
313 if (lock)
314 spin_lock_irq(&rhp->lock);
315 ret = idr_get_new_above(idr, handle, id, &newid);
316 BUG_ON(!ret && newid != id);
317 if (lock)
318 spin_unlock_irq(&rhp->lock);
319 } while (ret == -EAGAIN);
320
321 return ret;
322 }
323
insert_handle(struct c4iw_dev * rhp,struct idr * idr,void * handle,u32 id)324 static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
325 void *handle, u32 id)
326 {
327 return _insert_handle(rhp, idr, handle, id, 1);
328 }
329
insert_handle_nolock(struct c4iw_dev * rhp,struct idr * idr,void * handle,u32 id)330 static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
331 void *handle, u32 id)
332 {
333 return _insert_handle(rhp, idr, handle, id, 0);
334 }
335
_remove_handle(struct c4iw_dev * rhp,struct idr * idr,u32 id,int lock)336 static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
337 u32 id, int lock)
338 {
339 if (lock)
340 spin_lock_irq(&rhp->lock);
341 idr_remove(idr, id);
342 if (lock)
343 spin_unlock_irq(&rhp->lock);
344 }
345
remove_handle(struct c4iw_dev * rhp,struct idr * idr,u32 id)346 static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
347 {
348 _remove_handle(rhp, idr, id, 1);
349 }
350
remove_handle_nolock(struct c4iw_dev * rhp,struct idr * idr,u32 id)351 static inline void remove_handle_nolock(struct c4iw_dev *rhp,
352 struct idr *idr, u32 id)
353 {
354 _remove_handle(rhp, idr, id, 0);
355 }
356
357 extern int c4iw_max_read_depth;
358
cur_max_read_depth(struct c4iw_dev * dev)359 static inline int cur_max_read_depth(struct c4iw_dev *dev)
360 {
361 return min(dev->rdev.adap->params.max_ordird_qp, c4iw_max_read_depth);
362 }
363
364 struct c4iw_pd {
365 struct ib_pd ibpd;
366 u32 pdid;
367 struct c4iw_dev *rhp;
368 };
369
to_c4iw_pd(struct ib_pd * ibpd)370 static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
371 {
372 return container_of(ibpd, struct c4iw_pd, ibpd);
373 }
374
375 struct tpt_attributes {
376 u64 len;
377 u64 va_fbo;
378 enum fw_ri_mem_perms perms;
379 u32 stag;
380 u32 pdid;
381 u32 qpid;
382 u32 pbl_addr;
383 u32 pbl_size;
384 u32 state:1;
385 u32 type:2;
386 u32 rsvd:1;
387 u32 remote_invaliate_disable:1;
388 u32 zbva:1;
389 u32 mw_bind_enable:1;
390 u32 page_size:5;
391 };
392
393 struct c4iw_mr {
394 struct ib_mr ibmr;
395 struct ib_umem *umem;
396 struct c4iw_dev *rhp;
397 u64 kva;
398 struct tpt_attributes attr;
399 u64 *mpl;
400 dma_addr_t mpl_addr;
401 u32 max_mpl_len;
402 u32 mpl_len;
403 };
404
to_c4iw_mr(struct ib_mr * ibmr)405 static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
406 {
407 return container_of(ibmr, struct c4iw_mr, ibmr);
408 }
409
410 struct c4iw_mw {
411 struct ib_mw ibmw;
412 struct c4iw_dev *rhp;
413 u64 kva;
414 struct tpt_attributes attr;
415 };
416
to_c4iw_mw(struct ib_mw * ibmw)417 static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
418 {
419 return container_of(ibmw, struct c4iw_mw, ibmw);
420 }
421
422 struct c4iw_cq {
423 struct ib_cq ibcq;
424 struct c4iw_dev *rhp;
425 struct t4_cq cq;
426 spinlock_t lock;
427 spinlock_t comp_handler_lock;
428 atomic_t refcnt;
429 wait_queue_head_t wait;
430 };
431
to_c4iw_cq(struct ib_cq * ibcq)432 static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
433 {
434 return container_of(ibcq, struct c4iw_cq, ibcq);
435 }
436
437 struct c4iw_mpa_attributes {
438 u8 initiator;
439 u8 recv_marker_enabled;
440 u8 xmit_marker_enabled;
441 u8 crc_enabled;
442 u8 enhanced_rdma_conn;
443 u8 version;
444 u8 p2p_type;
445 };
446
447 struct c4iw_qp_attributes {
448 u32 scq;
449 u32 rcq;
450 u32 sq_num_entries;
451 u32 rq_num_entries;
452 u32 sq_max_sges;
453 u32 sq_max_sges_rdma_write;
454 u32 rq_max_sges;
455 u32 state;
456 u8 enable_rdma_read;
457 u8 enable_rdma_write;
458 u8 enable_bind;
459 u8 enable_mmid0_fastreg;
460 u32 max_ord;
461 u32 max_ird;
462 u32 pd;
463 u32 next_state;
464 char terminate_buffer[52];
465 u32 terminate_msg_len;
466 u8 is_terminate_local;
467 struct c4iw_mpa_attributes mpa_attr;
468 struct c4iw_ep *llp_stream_handle;
469 u8 layer_etype;
470 u8 ecode;
471 u16 sq_db_inc;
472 u16 rq_db_inc;
473 u8 send_term;
474 };
475
476 struct c4iw_ib_srq {
477 struct ib_srq ibsrq;
478 };
479
480 struct c4iw_ib_ah {
481 struct ib_ah ibah;
482 };
483
484 struct c4iw_qp {
485 struct ib_qp ibqp;
486 struct c4iw_dev *rhp;
487 struct c4iw_ep *ep;
488 struct c4iw_qp_attributes attr;
489 struct t4_wq wq;
490 spinlock_t lock;
491 struct mutex mutex;
492 struct kref kref;
493 wait_queue_head_t wait;
494 struct timer_list timer;
495 int sq_sig_all;
496 struct work_struct free_work;
497 struct c4iw_ucontext *ucontext;
498 };
499
to_c4iw_qp(struct ib_qp * ibqp)500 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
501 {
502 return container_of(ibqp, struct c4iw_qp, ibqp);
503 }
504
505 struct c4iw_ucontext {
506 struct ib_ucontext ibucontext;
507 struct c4iw_dev_ucontext uctx;
508 u32 key;
509 spinlock_t mmap_lock;
510 struct list_head mmaps;
511 };
512
to_c4iw_ucontext(struct ib_ucontext * c)513 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
514 {
515 return container_of(c, struct c4iw_ucontext, ibucontext);
516 }
517
518 struct c4iw_mm_entry {
519 struct list_head entry;
520 u64 addr;
521 u32 key;
522 unsigned len;
523 };
524
remove_mmap(struct c4iw_ucontext * ucontext,u32 key,unsigned len)525 static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
526 u32 key, unsigned len)
527 {
528 struct list_head *pos, *nxt;
529 struct c4iw_mm_entry *mm;
530
531 spin_lock(&ucontext->mmap_lock);
532 list_for_each_safe(pos, nxt, &ucontext->mmaps) {
533
534 mm = list_entry(pos, struct c4iw_mm_entry, entry);
535 if (mm->key == key && mm->len == len) {
536 list_del_init(&mm->entry);
537 spin_unlock(&ucontext->mmap_lock);
538 CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d",
539 __func__, key, (unsigned long long) mm->addr,
540 mm->len);
541 return mm;
542 }
543 }
544 spin_unlock(&ucontext->mmap_lock);
545 return NULL;
546 }
547
insert_mmap(struct c4iw_ucontext * ucontext,struct c4iw_mm_entry * mm)548 static inline void insert_mmap(struct c4iw_ucontext *ucontext,
549 struct c4iw_mm_entry *mm)
550 {
551 spin_lock(&ucontext->mmap_lock);
552 CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d", __func__, mm->key,
553 (unsigned long long) mm->addr, mm->len);
554 list_add_tail(&mm->entry, &ucontext->mmaps);
555 spin_unlock(&ucontext->mmap_lock);
556 }
557
558 enum c4iw_qp_attr_mask {
559 C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
560 C4IW_QP_ATTR_SQ_DB = 1<<1,
561 C4IW_QP_ATTR_RQ_DB = 1<<2,
562 C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
563 C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
564 C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
565 C4IW_QP_ATTR_MAX_ORD = 1 << 11,
566 C4IW_QP_ATTR_MAX_IRD = 1 << 12,
567 C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
568 C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
569 C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
570 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
571 C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
572 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
573 C4IW_QP_ATTR_MAX_ORD |
574 C4IW_QP_ATTR_MAX_IRD |
575 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
576 C4IW_QP_ATTR_STREAM_MSG_BUFFER |
577 C4IW_QP_ATTR_MPA_ATTR |
578 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
579 };
580
581 int c4iw_modify_qp(struct c4iw_dev *rhp,
582 struct c4iw_qp *qhp,
583 enum c4iw_qp_attr_mask mask,
584 struct c4iw_qp_attributes *attrs,
585 int internal);
586
587 enum c4iw_qp_state {
588 C4IW_QP_STATE_IDLE,
589 C4IW_QP_STATE_RTS,
590 C4IW_QP_STATE_ERROR,
591 C4IW_QP_STATE_TERMINATE,
592 C4IW_QP_STATE_CLOSING,
593 C4IW_QP_STATE_TOT
594 };
595
596 /*
597 * IW_CXGBE event bits.
598 * These bits are used for handling all events for a particular 'ep' serially.
599 */
600 #define C4IW_EVENT_SOCKET 0x0001
601 #define C4IW_EVENT_TIMEOUT 0x0002
602 #define C4IW_EVENT_TERM 0x0004
603
c4iw_convert_state(enum ib_qp_state ib_state)604 static inline int c4iw_convert_state(enum ib_qp_state ib_state)
605 {
606 switch (ib_state) {
607 case IB_QPS_RESET:
608 case IB_QPS_INIT:
609 return C4IW_QP_STATE_IDLE;
610 case IB_QPS_RTS:
611 return C4IW_QP_STATE_RTS;
612 case IB_QPS_SQD:
613 return C4IW_QP_STATE_CLOSING;
614 case IB_QPS_SQE:
615 return C4IW_QP_STATE_TERMINATE;
616 case IB_QPS_ERR:
617 return C4IW_QP_STATE_ERROR;
618 default:
619 return -1;
620 }
621 }
622
to_ib_qp_state(int c4iw_qp_state)623 static inline int to_ib_qp_state(int c4iw_qp_state)
624 {
625 switch (c4iw_qp_state) {
626 case C4IW_QP_STATE_IDLE:
627 return IB_QPS_INIT;
628 case C4IW_QP_STATE_RTS:
629 return IB_QPS_RTS;
630 case C4IW_QP_STATE_CLOSING:
631 return IB_QPS_SQD;
632 case C4IW_QP_STATE_TERMINATE:
633 return IB_QPS_SQE;
634 case C4IW_QP_STATE_ERROR:
635 return IB_QPS_ERR;
636 }
637 return IB_QPS_ERR;
638 }
639
640 #define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
641
c4iw_ib_to_tpt_access(int a)642 static inline u32 c4iw_ib_to_tpt_access(int a)
643 {
644 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
645 (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
646 (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
647 FW_RI_MEM_ACCESS_LOCAL_READ;
648 }
649
c4iw_ib_to_tpt_bind_access(int acc)650 static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
651 {
652 return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
653 (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
654 }
655
656 enum c4iw_mmid_state {
657 C4IW_STAG_STATE_VALID,
658 C4IW_STAG_STATE_INVALID
659 };
660
661 #define C4IW_NODE_DESC "iw_cxgbe Chelsio Communications"
662
663 #define MPA_KEY_REQ "MPA ID Req Frame"
664 #define MPA_KEY_REP "MPA ID Rep Frame"
665
666 #define MPA_MAX_PRIVATE_DATA 256
667 #define MPA_ENHANCED_RDMA_CONN 0x10
668 #define MPA_REJECT 0x20
669 #define MPA_CRC 0x40
670 #define MPA_MARKERS 0x80
671 #define MPA_FLAGS_MASK 0xE0
672
673 #define MPA_V2_PEER2PEER_MODEL 0x8000
674 #define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000
675 #define MPA_V2_RDMA_WRITE_RTR 0x8000
676 #define MPA_V2_RDMA_READ_RTR 0x4000
677 #define MPA_V2_IRD_ORD_MASK 0x3FFF
678
679 #define c4iw_put_ep(ep) { \
680 CTR4(KTR_IW_CXGBE, "put_ep (%s:%u) ep %p, refcnt %d", \
681 __func__, __LINE__, ep, kref_read(&(ep)->kref)); \
682 WARN_ON(kref_read(&(ep)->kref) < 1); \
683 kref_put(&((ep)->kref), _c4iw_free_ep); \
684 }
685
686 #define c4iw_get_ep(ep) { \
687 CTR4(KTR_IW_CXGBE, "get_ep (%s:%u) ep %p, refcnt %d", \
688 __func__, __LINE__, ep, kref_read(&(ep)->kref)); \
689 kref_get(&((ep)->kref)); \
690 }
691
692 void _c4iw_free_ep(struct kref *kref);
693
694 struct mpa_message {
695 u8 key[16];
696 u8 flags;
697 u8 revision;
698 __be16 private_data_size;
699 u8 private_data[0];
700 };
701
702 struct mpa_v2_conn_params {
703 __be16 ird;
704 __be16 ord;
705 };
706
707 struct terminate_message {
708 u8 layer_etype;
709 u8 ecode;
710 __be16 hdrct_rsvd;
711 u8 len_hdrs[0];
712 };
713
714 #define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
715
716 enum c4iw_layers_types {
717 LAYER_RDMAP = 0x00,
718 LAYER_DDP = 0x10,
719 LAYER_MPA = 0x20,
720 RDMAP_LOCAL_CATA = 0x00,
721 RDMAP_REMOTE_PROT = 0x01,
722 RDMAP_REMOTE_OP = 0x02,
723 DDP_LOCAL_CATA = 0x00,
724 DDP_TAGGED_ERR = 0x01,
725 DDP_UNTAGGED_ERR = 0x02,
726 DDP_LLP = 0x03
727 };
728
729 enum c4iw_rdma_ecodes {
730 RDMAP_INV_STAG = 0x00,
731 RDMAP_BASE_BOUNDS = 0x01,
732 RDMAP_ACC_VIOL = 0x02,
733 RDMAP_STAG_NOT_ASSOC = 0x03,
734 RDMAP_TO_WRAP = 0x04,
735 RDMAP_INV_VERS = 0x05,
736 RDMAP_INV_OPCODE = 0x06,
737 RDMAP_STREAM_CATA = 0x07,
738 RDMAP_GLOBAL_CATA = 0x08,
739 RDMAP_CANT_INV_STAG = 0x09,
740 RDMAP_UNSPECIFIED = 0xff
741 };
742
743 enum c4iw_ddp_ecodes {
744 DDPT_INV_STAG = 0x00,
745 DDPT_BASE_BOUNDS = 0x01,
746 DDPT_STAG_NOT_ASSOC = 0x02,
747 DDPT_TO_WRAP = 0x03,
748 DDPT_INV_VERS = 0x04,
749 DDPU_INV_QN = 0x01,
750 DDPU_INV_MSN_NOBUF = 0x02,
751 DDPU_INV_MSN_RANGE = 0x03,
752 DDPU_INV_MO = 0x04,
753 DDPU_MSG_TOOBIG = 0x05,
754 DDPU_INV_VERS = 0x06
755 };
756
757 enum c4iw_mpa_ecodes {
758 MPA_CRC_ERR = 0x02,
759 MPA_MARKER_ERR = 0x03,
760 MPA_LOCAL_CATA = 0x05,
761 MPA_INSUFF_IRD = 0x06,
762 MPA_NOMATCH_RTR = 0x07,
763 };
764
765 enum c4iw_ep_state {
766 IDLE = 0,
767 LISTEN,
768 CONNECTING,
769 MPA_REQ_WAIT,
770 MPA_REQ_SENT,
771 MPA_REQ_RCVD,
772 MPA_REP_SENT,
773 FPDU_MODE,
774 ABORTING,
775 CLOSING,
776 MORIBUND,
777 DEAD,
778 };
779
780 enum c4iw_ep_flags {
781 PEER_ABORT_IN_PROGRESS = 0,
782 ABORT_REQ_IN_PROGRESS = 1,
783 RELEASE_RESOURCES = 2,
784 CLOSE_SENT = 3,
785 TIMEOUT = 4,
786 QP_REFERENCED = 5,
787 STOP_MPA_TIMER = 7,
788 };
789
790 enum c4iw_ep_history {
791 ACT_OPEN_REQ = 0,
792 ACT_OFLD_CONN = 1,
793 ACT_OPEN_RPL = 2,
794 ACT_ESTAB = 3,
795 PASS_ACCEPT_REQ = 4,
796 PASS_ESTAB = 5,
797 ABORT_UPCALL = 6,
798 ESTAB_UPCALL = 7,
799 CLOSE_UPCALL = 8,
800 ULP_ACCEPT = 9,
801 ULP_REJECT = 10,
802 TIMEDOUT = 11,
803 PEER_ABORT = 12,
804 PEER_CLOSE = 13,
805 CONNREQ_UPCALL = 14,
806 ABORT_CONN = 15,
807 DISCONN_UPCALL = 16,
808 EP_DISC_CLOSE = 17,
809 EP_DISC_ABORT = 18,
810 CONN_RPL_UPCALL = 19,
811 ACT_RETRY_NOMEM = 20,
812 ACT_RETRY_INUSE = 21,
813 CLOSE_CON_RPL = 22,
814 EP_DISC_FAIL = 24,
815 QP_REFED = 25,
816 QP_DEREFED = 26,
817 CM_ID_REFED = 27,
818 CM_ID_DEREFED = 28
819 };
820
821 struct c4iw_ep_common {
822 TAILQ_ENTRY(c4iw_ep_common) entry; /* Work queue attachment */
823 struct iw_cm_id *cm_id;
824 struct c4iw_qp *qp;
825 struct c4iw_dev *dev;
826 enum c4iw_ep_state state;
827 struct kref kref;
828 struct mutex mutex;
829 struct sockaddr_storage local_addr;
830 struct sockaddr_storage remote_addr;
831 struct c4iw_wr_wait wr_wait;
832 unsigned long flags;
833 unsigned long history;
834 int rpl_err;
835 int rpl_done;
836 struct thread *thread;
837 struct socket *so;
838 int ep_events;
839 };
840
841 struct c4iw_listen_ep {
842 struct c4iw_ep_common com;
843 unsigned int stid;
844 int backlog;
845 struct list_head listen_ep_list; /* list of all listener ep's bound
846 to one port address */
847 };
848
849 struct c4iw_ep {
850 struct c4iw_ep_common com;
851 struct c4iw_listen_ep *parent_ep;
852 struct timer_list timer;
853 unsigned int atid;
854 u32 hwtid;
855 u32 snd_seq;
856 u32 rcv_seq;
857 struct l2t_entry *l2t;
858 struct dst_entry *dst;
859 struct c4iw_mpa_attributes mpa_attr;
860 u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
861 unsigned int mpa_pkt_len;
862 u32 ird;
863 u32 ord;
864 u32 tx_chan;
865 u32 mtu;
866 u16 mss;
867 u16 plen;
868 u16 rss_qid;
869 u16 txq_idx;
870 u16 ctrlq_idx;
871 u8 tos;
872 u8 retry_with_mpa_v1;
873 u8 tried_with_mpa_v1;
874 };
875
to_ep(struct iw_cm_id * cm_id)876 static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
877 {
878 return cm_id->provider_data;
879 }
880
to_listen_ep(struct iw_cm_id * cm_id)881 static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
882 {
883 return cm_id->provider_data;
884 }
885
compute_wscale(int win)886 static inline int compute_wscale(int win)
887 {
888 int wscale = 0;
889
890 while (wscale < 14 && (65535<<wscale) < win)
891 wscale++;
892 return wscale;
893 }
894
895 u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
896 void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
897 int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
898 u32 reserved, u32 flags);
899 void c4iw_id_table_free(struct c4iw_id_table *alloc);
900
901 typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct mbuf *m);
902
903 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
904 struct l2t_entry *l2t);
905 u32 c4iw_get_resource(struct c4iw_id_table *id_table);
906 void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
907 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
908 int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
909 int c4iw_pblpool_create(struct c4iw_rdev *rdev);
910 int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
911 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
912 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
913 void c4iw_destroy_resource(struct c4iw_resource *rscp);
914 int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
915 int c4iw_register_device(struct c4iw_dev *dev);
916 void c4iw_unregister_device(struct c4iw_dev *dev);
917 int __init c4iw_cm_init(void);
918 void __exit c4iw_cm_term(void);
919 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
920 struct c4iw_dev_ucontext *uctx);
921 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
922 struct c4iw_dev_ucontext *uctx);
923 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
924 int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
925 const struct ib_send_wr **bad_wr);
926 int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
927 const struct ib_recv_wr **bad_wr);
928 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
929 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
930 int c4iw_destroy_listen(struct iw_cm_id *cm_id);
931 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
932 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
933 void c4iw_qp_add_ref(struct ib_qp *qp);
934 void c4iw_qp_rem_ref(struct ib_qp *qp);
935 struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
936 u32 max_num_sg, struct ib_udata *udata);
937 int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
938 int sg_nents, unsigned int *sg_offset);
939 int c4iw_dealloc_mw(struct ib_mw *mw);
940 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
941 struct ib_udata *udata);
942 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64
943 virt, int acc, struct ib_udata *udata);
944 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
945 int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
946 void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
947 void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
948 int c4iw_create_cq(struct ib_cq *ibcq,
949 const struct ib_cq_init_attr *attr,
950 struct ib_udata *udata);
951 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
952 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
953 int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
954 struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
955 struct ib_qp_init_attr *attrs,
956 struct ib_udata *udata);
957 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
958 int attr_mask, struct ib_udata *udata);
959 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
960 int attr_mask, struct ib_qp_init_attr *init_attr);
961 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
962 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
963 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
964 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
965 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
966 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct mbuf *m);
967 void c4iw_flush_hw_cq(struct c4iw_cq *cq);
968 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
969 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
970 int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
971 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
972 int c4iw_flush_sq(struct c4iw_qp *qhp);
973 int c4iw_ev_handler(struct sge_iq *, const struct rsp_ctrl *);
974 u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
975 int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
976 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
977 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
978 struct c4iw_dev_ucontext *uctx);
979 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
980 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
981 struct c4iw_dev_ucontext *uctx);
982 void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
983 void t4_dump_stag(struct adapter *sc, const u32 stag);
984 void t4_dump_all_stag(struct adapter *sc);
985 #endif
986