1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
37 #include <linux/gfp.h>
38 #include <linux/hardirq.h>
39 #include <linux/sched.h>
40
41 #include <asm/io.h>
42
43 #include <rdma/ib_pack.h>
44
45 #include "mthca_dev.h"
46 #include "mthca_cmd.h"
47 #include "mthca_memfree.h"
48
49 enum {
50 MTHCA_MAX_DIRECT_CQ_SIZE = 4 * PAGE_SIZE
51 };
52
53 enum {
54 MTHCA_CQ_ENTRY_SIZE = 0x20
55 };
56
57 enum {
58 MTHCA_ATOMIC_BYTE_LEN = 8
59 };
60
61 /*
62 * Must be packed because start is 64 bits but only aligned to 32 bits.
63 */
64 struct mthca_cq_context {
65 __be32 flags;
66 __be64 start;
67 __be32 logsize_usrpage;
68 __be32 error_eqn; /* Tavor only */
69 __be32 comp_eqn;
70 __be32 pd;
71 __be32 lkey;
72 __be32 last_notified_index;
73 __be32 solicit_producer_index;
74 __be32 consumer_index;
75 __be32 producer_index;
76 __be32 cqn;
77 __be32 ci_db; /* Arbel only */
78 __be32 state_db; /* Arbel only */
79 u32 reserved;
80 } __attribute__((packed));
81
82 #define MTHCA_CQ_STATUS_OK ( 0 << 28)
83 #define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28)
84 #define MTHCA_CQ_STATUS_WRITE_FAIL (10 << 28)
85 #define MTHCA_CQ_FLAG_TR ( 1 << 18)
86 #define MTHCA_CQ_FLAG_OI ( 1 << 17)
87 #define MTHCA_CQ_STATE_DISARMED ( 0 << 8)
88 #define MTHCA_CQ_STATE_ARMED ( 1 << 8)
89 #define MTHCA_CQ_STATE_ARMED_SOL ( 4 << 8)
90 #define MTHCA_EQ_STATE_FIRED (10 << 8)
91
92 enum {
93 MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe
94 };
95
96 enum {
97 SYNDROME_LOCAL_LENGTH_ERR = 0x01,
98 SYNDROME_LOCAL_QP_OP_ERR = 0x02,
99 SYNDROME_LOCAL_EEC_OP_ERR = 0x03,
100 SYNDROME_LOCAL_PROT_ERR = 0x04,
101 SYNDROME_WR_FLUSH_ERR = 0x05,
102 SYNDROME_MW_BIND_ERR = 0x06,
103 SYNDROME_BAD_RESP_ERR = 0x10,
104 SYNDROME_LOCAL_ACCESS_ERR = 0x11,
105 SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
106 SYNDROME_REMOTE_ACCESS_ERR = 0x13,
107 SYNDROME_REMOTE_OP_ERR = 0x14,
108 SYNDROME_RETRY_EXC_ERR = 0x15,
109 SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
110 SYNDROME_LOCAL_RDD_VIOL_ERR = 0x20,
111 SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21,
112 SYNDROME_REMOTE_ABORTED_ERR = 0x22,
113 SYNDROME_INVAL_EECN_ERR = 0x23,
114 SYNDROME_INVAL_EEC_STATE_ERR = 0x24
115 };
116
117 struct mthca_cqe {
118 __be32 my_qpn;
119 __be32 my_ee;
120 __be32 rqpn;
121 u8 sl_ipok;
122 u8 g_mlpath;
123 __be16 rlid;
124 __be32 imm_etype_pkey_eec;
125 __be32 byte_cnt;
126 __be32 wqe;
127 u8 opcode;
128 u8 is_send;
129 u8 reserved;
130 u8 owner;
131 };
132
133 struct mthca_err_cqe {
134 __be32 my_qpn;
135 u32 reserved1[3];
136 u8 syndrome;
137 u8 vendor_err;
138 __be16 db_cnt;
139 u32 reserved2;
140 __be32 wqe;
141 u8 opcode;
142 u8 reserved3[2];
143 u8 owner;
144 };
145
146 #define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7)
147 #define MTHCA_CQ_ENTRY_OWNER_HW (1 << 7)
148
149 #define MTHCA_TAVOR_CQ_DB_INC_CI (1 << 24)
150 #define MTHCA_TAVOR_CQ_DB_REQ_NOT (2 << 24)
151 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL (3 << 24)
152 #define MTHCA_TAVOR_CQ_DB_SET_CI (4 << 24)
153 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24)
154
155 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL (1 << 24)
156 #define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24)
157 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24)
158
get_cqe_from_buf(struct mthca_cq_buf * buf,int entry)159 static inline struct mthca_cqe *get_cqe_from_buf(struct mthca_cq_buf *buf,
160 int entry)
161 {
162 if (buf->is_direct)
163 return buf->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE);
164 else
165 return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf
166 + (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE;
167 }
168
get_cqe(struct mthca_cq * cq,int entry)169 static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry)
170 {
171 return get_cqe_from_buf(&cq->buf, entry);
172 }
173
cqe_sw(struct mthca_cqe * cqe)174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe)
175 {
176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;
177 }
178
next_cqe_sw(struct mthca_cq * cq)179 static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq)
180 {
181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe));
182 }
183
set_cqe_hw(struct mthca_cqe * cqe)184 static inline void set_cqe_hw(struct mthca_cqe *cqe)
185 {
186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
187 }
188
dump_cqe(struct mthca_dev * dev,void * cqe_ptr)189 static void dump_cqe(struct mthca_dev *dev, void *cqe_ptr)
190 {
191 __be32 *cqe = cqe_ptr;
192
193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */
194 mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]),
196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]),
197 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7]));
198 }
199
200 /*
201 * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
202 * should be correct before calling update_cons_index().
203 */
update_cons_index(struct mthca_dev * dev,struct mthca_cq * cq,int incr)204 static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
205 int incr)
206 {
207 if (mthca_is_memfree(dev)) {
208 *cq->set_ci_db = cpu_to_be32(cq->cons_index);
209 wmb();
210 } else {
211 mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1,
212 dev->kar + MTHCA_CQ_DOORBELL,
213 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
214 /*
215 * Make sure doorbells don't leak out of CQ spinlock
216 * and reach the HCA out of order:
217 */
218 mmiowb();
219 }
220 }
221
mthca_cq_completion(struct mthca_dev * dev,u32 cqn)222 void mthca_cq_completion(struct mthca_dev *dev, u32 cqn)
223 {
224 struct mthca_cq *cq;
225
226 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
227
228 if (!cq) {
229 mthca_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
230 return;
231 }
232
233 ++cq->arm_sn;
234
235 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
236 }
237
mthca_cq_event(struct mthca_dev * dev,u32 cqn,enum ib_event_type event_type)238 void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
239 enum ib_event_type event_type)
240 {
241 struct mthca_cq *cq;
242 struct ib_event event;
243
244 spin_lock(&dev->cq_table.lock);
245
246 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
247 if (cq)
248 ++cq->refcount;
249
250 spin_unlock(&dev->cq_table.lock);
251
252 if (!cq) {
253 mthca_warn(dev, "Async event for bogus CQ %08x\n", cqn);
254 return;
255 }
256
257 event.device = &dev->ib_dev;
258 event.event = event_type;
259 event.element.cq = &cq->ibcq;
260 if (cq->ibcq.event_handler)
261 cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
262
263 spin_lock(&dev->cq_table.lock);
264 if (!--cq->refcount)
265 wake_up(&cq->wait);
266 spin_unlock(&dev->cq_table.lock);
267 }
268
is_recv_cqe(struct mthca_cqe * cqe)269 static inline int is_recv_cqe(struct mthca_cqe *cqe)
270 {
271 if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
272 MTHCA_ERROR_CQE_OPCODE_MASK)
273 return !(cqe->opcode & 0x01);
274 else
275 return !(cqe->is_send & 0x80);
276 }
277
mthca_cq_clean(struct mthca_dev * dev,struct mthca_cq * cq,u32 qpn,struct mthca_srq * srq)278 void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
279 struct mthca_srq *srq)
280 {
281 struct mthca_cqe *cqe;
282 u32 prod_index;
283 int i, nfreed = 0;
284
285 spin_lock_irq(&cq->lock);
286
287 /*
288 * First we need to find the current producer index, so we
289 * know where to start cleaning from. It doesn't matter if HW
290 * adds new entries after this loop -- the QP we're worried
291 * about is already in RESET, so the new entries won't come
292 * from our QP and therefore don't need to be checked.
293 */
294 for (prod_index = cq->cons_index;
295 cqe_sw(get_cqe(cq, prod_index & cq->ibcq.cqe));
296 ++prod_index)
297 if (prod_index == cq->cons_index + cq->ibcq.cqe)
298 break;
299
300 if (0)
301 mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
302 qpn, cq->cqn, cq->cons_index, prod_index);
303
304 /*
305 * Now sweep backwards through the CQ, removing CQ entries
306 * that match our QP by copying older entries on top of them.
307 */
308 while ((int) --prod_index - (int) cq->cons_index >= 0) {
309 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
310 if (cqe->my_qpn == cpu_to_be32(qpn)) {
311 if (srq && is_recv_cqe(cqe))
312 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
313 ++nfreed;
314 } else if (nfreed)
315 memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe),
316 cqe, MTHCA_CQ_ENTRY_SIZE);
317 }
318
319 if (nfreed) {
320 for (i = 0; i < nfreed; ++i)
321 set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe));
322 wmb();
323 cq->cons_index += nfreed;
324 update_cons_index(dev, cq, nfreed);
325 }
326
327 spin_unlock_irq(&cq->lock);
328 }
329
mthca_cq_resize_copy_cqes(struct mthca_cq * cq)330 void mthca_cq_resize_copy_cqes(struct mthca_cq *cq)
331 {
332 int i;
333
334 /*
335 * In Tavor mode, the hardware keeps the consumer and producer
336 * indices mod the CQ size. Since we might be making the CQ
337 * bigger, we need to deal with the case where the producer
338 * index wrapped around before the CQ was resized.
339 */
340 if (!mthca_is_memfree(to_mdev(cq->ibcq.device)) &&
341 cq->ibcq.cqe < cq->resize_buf->cqe) {
342 cq->cons_index &= cq->ibcq.cqe;
343 if (cqe_sw(get_cqe(cq, cq->ibcq.cqe)))
344 cq->cons_index -= cq->ibcq.cqe + 1;
345 }
346
347 for (i = cq->cons_index; cqe_sw(get_cqe(cq, i & cq->ibcq.cqe)); ++i)
348 memcpy(get_cqe_from_buf(&cq->resize_buf->buf,
349 i & cq->resize_buf->cqe),
350 get_cqe(cq, i & cq->ibcq.cqe), MTHCA_CQ_ENTRY_SIZE);
351 }
352
mthca_alloc_cq_buf(struct mthca_dev * dev,struct mthca_cq_buf * buf,int nent)353 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent)
354 {
355 int ret;
356 int i;
357
358 ret = mthca_buf_alloc(dev, nent * MTHCA_CQ_ENTRY_SIZE,
359 MTHCA_MAX_DIRECT_CQ_SIZE,
360 &buf->queue, &buf->is_direct,
361 &dev->driver_pd, 1, &buf->mr);
362 if (ret)
363 return ret;
364
365 for (i = 0; i < nent; ++i)
366 set_cqe_hw(get_cqe_from_buf(buf, i));
367
368 return 0;
369 }
370
mthca_free_cq_buf(struct mthca_dev * dev,struct mthca_cq_buf * buf,int cqe)371 void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe)
372 {
373 mthca_buf_free(dev, (cqe + 1) * MTHCA_CQ_ENTRY_SIZE, &buf->queue,
374 buf->is_direct, &buf->mr);
375 }
376
handle_error_cqe(struct mthca_dev * dev,struct mthca_cq * cq,struct mthca_qp * qp,int wqe_index,int is_send,struct mthca_err_cqe * cqe,struct ib_wc * entry,int * free_cqe)377 static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq,
378 struct mthca_qp *qp, int wqe_index, int is_send,
379 struct mthca_err_cqe *cqe,
380 struct ib_wc *entry, int *free_cqe)
381 {
382 int dbd;
383 __be32 new_wqe;
384
385 if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) {
386 mthca_dbg(dev, "local QP operation err "
387 "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n",
388 be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe),
389 cq->cqn, cq->cons_index);
390 dump_cqe(dev, cqe);
391 }
392
393 /*
394 * For completions in error, only work request ID, status, vendor error
395 * (and freed resource count for RD) have to be set.
396 */
397 switch (cqe->syndrome) {
398 case SYNDROME_LOCAL_LENGTH_ERR:
399 entry->status = IB_WC_LOC_LEN_ERR;
400 break;
401 case SYNDROME_LOCAL_QP_OP_ERR:
402 entry->status = IB_WC_LOC_QP_OP_ERR;
403 break;
404 case SYNDROME_LOCAL_EEC_OP_ERR:
405 entry->status = IB_WC_LOC_EEC_OP_ERR;
406 break;
407 case SYNDROME_LOCAL_PROT_ERR:
408 entry->status = IB_WC_LOC_PROT_ERR;
409 break;
410 case SYNDROME_WR_FLUSH_ERR:
411 entry->status = IB_WC_WR_FLUSH_ERR;
412 break;
413 case SYNDROME_MW_BIND_ERR:
414 entry->status = IB_WC_MW_BIND_ERR;
415 break;
416 case SYNDROME_BAD_RESP_ERR:
417 entry->status = IB_WC_BAD_RESP_ERR;
418 break;
419 case SYNDROME_LOCAL_ACCESS_ERR:
420 entry->status = IB_WC_LOC_ACCESS_ERR;
421 break;
422 case SYNDROME_REMOTE_INVAL_REQ_ERR:
423 entry->status = IB_WC_REM_INV_REQ_ERR;
424 break;
425 case SYNDROME_REMOTE_ACCESS_ERR:
426 entry->status = IB_WC_REM_ACCESS_ERR;
427 break;
428 case SYNDROME_REMOTE_OP_ERR:
429 entry->status = IB_WC_REM_OP_ERR;
430 break;
431 case SYNDROME_RETRY_EXC_ERR:
432 entry->status = IB_WC_RETRY_EXC_ERR;
433 break;
434 case SYNDROME_RNR_RETRY_EXC_ERR:
435 entry->status = IB_WC_RNR_RETRY_EXC_ERR;
436 break;
437 case SYNDROME_LOCAL_RDD_VIOL_ERR:
438 entry->status = IB_WC_LOC_RDD_VIOL_ERR;
439 break;
440 case SYNDROME_REMOTE_INVAL_RD_REQ_ERR:
441 entry->status = IB_WC_REM_INV_RD_REQ_ERR;
442 break;
443 case SYNDROME_REMOTE_ABORTED_ERR:
444 entry->status = IB_WC_REM_ABORT_ERR;
445 break;
446 case SYNDROME_INVAL_EECN_ERR:
447 entry->status = IB_WC_INV_EECN_ERR;
448 break;
449 case SYNDROME_INVAL_EEC_STATE_ERR:
450 entry->status = IB_WC_INV_EEC_STATE_ERR;
451 break;
452 default:
453 entry->status = IB_WC_GENERAL_ERR;
454 break;
455 }
456
457 entry->vendor_err = cqe->vendor_err;
458
459 /*
460 * Mem-free HCAs always generate one CQE per WQE, even in the
461 * error case, so we don't have to check the doorbell count, etc.
462 */
463 if (mthca_is_memfree(dev))
464 return;
465
466 mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe);
467
468 /*
469 * If we're at the end of the WQE chain, or we've used up our
470 * doorbell count, free the CQE. Otherwise just update it for
471 * the next poll operation.
472 */
473 if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
474 return;
475
476 be16_add_cpu(&cqe->db_cnt, -dbd);
477 cqe->wqe = new_wqe;
478 cqe->syndrome = SYNDROME_WR_FLUSH_ERR;
479
480 *free_cqe = 0;
481 }
482
mthca_poll_one(struct mthca_dev * dev,struct mthca_cq * cq,struct mthca_qp ** cur_qp,int * freed,struct ib_wc * entry)483 static inline int mthca_poll_one(struct mthca_dev *dev,
484 struct mthca_cq *cq,
485 struct mthca_qp **cur_qp,
486 int *freed,
487 struct ib_wc *entry)
488 {
489 struct mthca_wq *wq;
490 struct mthca_cqe *cqe;
491 int wqe_index;
492 int is_error;
493 int is_send;
494 int free_cqe = 1;
495 int err = 0;
496 u16 checksum;
497
498 cqe = next_cqe_sw(cq);
499 if (!cqe)
500 return -EAGAIN;
501
502 /*
503 * Make sure we read CQ entry contents after we've checked the
504 * ownership bit.
505 */
506 rmb();
507
508 if (0) {
509 mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n",
510 cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),
511 be32_to_cpu(cqe->wqe));
512 dump_cqe(dev, cqe);
513 }
514
515 is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
516 MTHCA_ERROR_CQE_OPCODE_MASK;
517 is_send = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80;
518
519 if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) {
520 /*
521 * We do not have to take the QP table lock here,
522 * because CQs will be locked while QPs are removed
523 * from the table.
524 */
525 *cur_qp = mthca_array_get(&dev->qp_table.qp,
526 be32_to_cpu(cqe->my_qpn) &
527 (dev->limits.num_qps - 1));
528 if (!*cur_qp) {
529 mthca_warn(dev, "CQ entry for unknown QP %06x\n",
530 be32_to_cpu(cqe->my_qpn) & 0xffffff);
531 err = -EINVAL;
532 goto out;
533 }
534 }
535
536 entry->qp = &(*cur_qp)->ibqp;
537
538 if (is_send) {
539 wq = &(*cur_qp)->sq;
540 wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset)
541 >> wq->wqe_shift);
542 entry->wr_id = (*cur_qp)->wrid[wqe_index +
543 (*cur_qp)->rq.max];
544 } else if ((*cur_qp)->ibqp.srq) {
545 struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);
546 u32 wqe = be32_to_cpu(cqe->wqe);
547 wq = NULL;
548 wqe_index = wqe >> srq->wqe_shift;
549 entry->wr_id = srq->wrid[wqe_index];
550 mthca_free_srq_wqe(srq, wqe);
551 } else {
552 s32 wqe;
553 wq = &(*cur_qp)->rq;
554 wqe = be32_to_cpu(cqe->wqe);
555 wqe_index = wqe >> wq->wqe_shift;
556 /*
557 * WQE addr == base - 1 might be reported in receive completion
558 * with error instead of (rq size - 1) by Sinai FW 1.0.800 and
559 * Arbel FW 5.1.400. This bug should be fixed in later FW revs.
560 */
561 if (unlikely(wqe_index < 0))
562 wqe_index = wq->max - 1;
563 entry->wr_id = (*cur_qp)->wrid[wqe_index];
564 }
565
566 if (wq) {
567 if (wq->last_comp < wqe_index)
568 wq->tail += wqe_index - wq->last_comp;
569 else
570 wq->tail += wqe_index + wq->max - wq->last_comp;
571
572 wq->last_comp = wqe_index;
573 }
574
575 if (is_error) {
576 handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
577 (struct mthca_err_cqe *) cqe,
578 entry, &free_cqe);
579 goto out;
580 }
581
582 if (is_send) {
583 entry->wc_flags = 0;
584 switch (cqe->opcode) {
585 case MTHCA_OPCODE_RDMA_WRITE:
586 entry->opcode = IB_WC_RDMA_WRITE;
587 break;
588 case MTHCA_OPCODE_RDMA_WRITE_IMM:
589 entry->opcode = IB_WC_RDMA_WRITE;
590 entry->wc_flags |= IB_WC_WITH_IMM;
591 break;
592 case MTHCA_OPCODE_SEND:
593 entry->opcode = IB_WC_SEND;
594 break;
595 case MTHCA_OPCODE_SEND_IMM:
596 entry->opcode = IB_WC_SEND;
597 entry->wc_flags |= IB_WC_WITH_IMM;
598 break;
599 case MTHCA_OPCODE_RDMA_READ:
600 entry->opcode = IB_WC_RDMA_READ;
601 entry->byte_len = be32_to_cpu(cqe->byte_cnt);
602 break;
603 case MTHCA_OPCODE_ATOMIC_CS:
604 entry->opcode = IB_WC_COMP_SWAP;
605 entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
606 break;
607 case MTHCA_OPCODE_ATOMIC_FA:
608 entry->opcode = IB_WC_FETCH_ADD;
609 entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
610 break;
611 default:
612 entry->opcode = MTHCA_OPCODE_INVALID;
613 break;
614 }
615 } else {
616 entry->byte_len = be32_to_cpu(cqe->byte_cnt);
617 switch (cqe->opcode & 0x1f) {
618 case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE:
619 case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE:
620 entry->wc_flags = IB_WC_WITH_IMM;
621 entry->ex.imm_data = cqe->imm_etype_pkey_eec;
622 entry->opcode = IB_WC_RECV;
623 break;
624 case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
625 case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
626 entry->wc_flags = IB_WC_WITH_IMM;
627 entry->ex.imm_data = cqe->imm_etype_pkey_eec;
628 entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
629 break;
630 default:
631 entry->wc_flags = 0;
632 entry->opcode = IB_WC_RECV;
633 break;
634 }
635 entry->slid = be16_to_cpu(cqe->rlid);
636 entry->sl = cqe->sl_ipok >> 4;
637 entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff;
638 entry->dlid_path_bits = cqe->g_mlpath & 0x7f;
639 entry->pkey_index = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16;
640 entry->wc_flags |= cqe->g_mlpath & 0x80 ? IB_WC_GRH : 0;
641 checksum = (be32_to_cpu(cqe->rqpn) >> 24) |
642 ((be32_to_cpu(cqe->my_ee) >> 16) & 0xff00);
643 entry->wc_flags |= (cqe->sl_ipok & 1 && checksum == 0xffff) ?
644 IB_WC_IP_CSUM_OK : 0;
645 }
646
647 entry->status = IB_WC_SUCCESS;
648
649 out:
650 if (likely(free_cqe)) {
651 set_cqe_hw(cqe);
652 ++(*freed);
653 ++cq->cons_index;
654 }
655
656 return err;
657 }
658
mthca_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * entry)659 int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
660 struct ib_wc *entry)
661 {
662 struct mthca_dev *dev = to_mdev(ibcq->device);
663 struct mthca_cq *cq = to_mcq(ibcq);
664 struct mthca_qp *qp = NULL;
665 unsigned long flags;
666 int err = 0;
667 int freed = 0;
668 int npolled;
669
670 spin_lock_irqsave(&cq->lock, flags);
671
672 npolled = 0;
673 repoll:
674 while (npolled < num_entries) {
675 err = mthca_poll_one(dev, cq, &qp,
676 &freed, entry + npolled);
677 if (err)
678 break;
679 ++npolled;
680 }
681
682 if (freed) {
683 wmb();
684 update_cons_index(dev, cq, freed);
685 }
686
687 /*
688 * If a CQ resize is in progress and we discovered that the
689 * old buffer is empty, then peek in the new buffer, and if
690 * it's not empty, switch to the new buffer and continue
691 * polling there.
692 */
693 if (unlikely(err == -EAGAIN && cq->resize_buf &&
694 cq->resize_buf->state == CQ_RESIZE_READY)) {
695 /*
696 * In Tavor mode, the hardware keeps the producer
697 * index modulo the CQ size. Since we might be making
698 * the CQ bigger, we need to mask our consumer index
699 * using the size of the old CQ buffer before looking
700 * in the new CQ buffer.
701 */
702 if (!mthca_is_memfree(dev))
703 cq->cons_index &= cq->ibcq.cqe;
704
705 if (cqe_sw(get_cqe_from_buf(&cq->resize_buf->buf,
706 cq->cons_index & cq->resize_buf->cqe))) {
707 struct mthca_cq_buf tbuf;
708 int tcqe;
709
710 tbuf = cq->buf;
711 tcqe = cq->ibcq.cqe;
712 cq->buf = cq->resize_buf->buf;
713 cq->ibcq.cqe = cq->resize_buf->cqe;
714
715 cq->resize_buf->buf = tbuf;
716 cq->resize_buf->cqe = tcqe;
717 cq->resize_buf->state = CQ_RESIZE_SWAPPED;
718
719 goto repoll;
720 }
721 }
722
723 spin_unlock_irqrestore(&cq->lock, flags);
724
725 return err == 0 || err == -EAGAIN ? npolled : err;
726 }
727
mthca_tavor_arm_cq(struct ib_cq * cq,enum ib_cq_notify_flags flags)728 int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags)
729 {
730 u32 dbhi = ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
731 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :
732 MTHCA_TAVOR_CQ_DB_REQ_NOT) |
733 to_mcq(cq)->cqn;
734
735 mthca_write64(dbhi, 0xffffffff, to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL,
736 MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock));
737
738 return 0;
739 }
740
mthca_arbel_arm_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)741 int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
742 {
743 struct mthca_cq *cq = to_mcq(ibcq);
744 __be32 db_rec[2];
745 u32 dbhi;
746 u32 sn = cq->arm_sn & 3;
747
748 db_rec[0] = cpu_to_be32(cq->cons_index);
749 db_rec[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) |
750 ((flags & IB_CQ_SOLICITED_MASK) ==
751 IB_CQ_SOLICITED ? 1 : 2));
752
753 mthca_write_db_rec(db_rec, cq->arm_db);
754
755 /*
756 * Make sure that the doorbell record in host memory is
757 * written before ringing the doorbell via PCI MMIO.
758 */
759 wmb();
760
761 dbhi = (sn << 28) |
762 ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
763 MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL :
764 MTHCA_ARBEL_CQ_DB_REQ_NOT) | cq->cqn;
765
766 mthca_write64(dbhi, cq->cons_index,
767 to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL,
768 MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock));
769
770 return 0;
771 }
772
mthca_init_cq(struct mthca_dev * dev,int nent,struct mthca_ucontext * ctx,u32 pdn,struct mthca_cq * cq)773 int mthca_init_cq(struct mthca_dev *dev, int nent,
774 struct mthca_ucontext *ctx, u32 pdn,
775 struct mthca_cq *cq)
776 {
777 struct mthca_mailbox *mailbox;
778 struct mthca_cq_context *cq_context;
779 int err = -ENOMEM;
780
781 cq->ibcq.cqe = nent - 1;
782 cq->is_kernel = !ctx;
783
784 cq->cqn = mthca_alloc(&dev->cq_table.alloc);
785 if (cq->cqn == -1)
786 return -ENOMEM;
787
788 if (mthca_is_memfree(dev)) {
789 err = mthca_table_get(dev, dev->cq_table.table, cq->cqn);
790 if (err)
791 goto err_out;
792
793 if (cq->is_kernel) {
794 cq->arm_sn = 1;
795
796 err = -ENOMEM;
797
798 cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI,
799 cq->cqn, &cq->set_ci_db);
800 if (cq->set_ci_db_index < 0)
801 goto err_out_icm;
802
803 cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM,
804 cq->cqn, &cq->arm_db);
805 if (cq->arm_db_index < 0)
806 goto err_out_ci;
807 }
808 }
809
810 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
811 if (IS_ERR(mailbox))
812 goto err_out_arm;
813
814 cq_context = mailbox->buf;
815
816 if (cq->is_kernel) {
817 err = mthca_alloc_cq_buf(dev, &cq->buf, nent);
818 if (err)
819 goto err_out_mailbox;
820 }
821
822 spin_lock_init(&cq->lock);
823 cq->refcount = 1;
824 init_waitqueue_head(&cq->wait);
825 mutex_init(&cq->mutex);
826
827 memset(cq_context, 0, sizeof *cq_context);
828 cq_context->flags = cpu_to_be32(MTHCA_CQ_STATUS_OK |
829 MTHCA_CQ_STATE_DISARMED |
830 MTHCA_CQ_FLAG_TR);
831 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);
832 if (ctx)
833 cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index);
834 else
835 cq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
836 cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
837 cq_context->comp_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn);
838 cq_context->pd = cpu_to_be32(pdn);
839 cq_context->lkey = cpu_to_be32(cq->buf.mr.ibmr.lkey);
840 cq_context->cqn = cpu_to_be32(cq->cqn);
841
842 if (mthca_is_memfree(dev)) {
843 cq_context->ci_db = cpu_to_be32(cq->set_ci_db_index);
844 cq_context->state_db = cpu_to_be32(cq->arm_db_index);
845 }
846
847 err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn);
848 if (err) {
849 mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err);
850 goto err_out_free_mr;
851 }
852
853 spin_lock_irq(&dev->cq_table.lock);
854 if (mthca_array_set(&dev->cq_table.cq,
855 cq->cqn & (dev->limits.num_cqs - 1),
856 cq)) {
857 spin_unlock_irq(&dev->cq_table.lock);
858 goto err_out_free_mr;
859 }
860 spin_unlock_irq(&dev->cq_table.lock);
861
862 cq->cons_index = 0;
863
864 mthca_free_mailbox(dev, mailbox);
865
866 return 0;
867
868 err_out_free_mr:
869 if (cq->is_kernel)
870 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
871
872 err_out_mailbox:
873 mthca_free_mailbox(dev, mailbox);
874
875 err_out_arm:
876 if (cq->is_kernel && mthca_is_memfree(dev))
877 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
878
879 err_out_ci:
880 if (cq->is_kernel && mthca_is_memfree(dev))
881 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
882
883 err_out_icm:
884 mthca_table_put(dev, dev->cq_table.table, cq->cqn);
885
886 err_out:
887 mthca_free(&dev->cq_table.alloc, cq->cqn);
888
889 return err;
890 }
891
get_cq_refcount(struct mthca_dev * dev,struct mthca_cq * cq)892 static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq)
893 {
894 int c;
895
896 spin_lock_irq(&dev->cq_table.lock);
897 c = cq->refcount;
898 spin_unlock_irq(&dev->cq_table.lock);
899
900 return c;
901 }
902
mthca_free_cq(struct mthca_dev * dev,struct mthca_cq * cq)903 void mthca_free_cq(struct mthca_dev *dev,
904 struct mthca_cq *cq)
905 {
906 struct mthca_mailbox *mailbox;
907 int err;
908
909 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
910 if (IS_ERR(mailbox)) {
911 mthca_warn(dev, "No memory for mailbox to free CQ.\n");
912 return;
913 }
914
915 err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn);
916 if (err)
917 mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err);
918
919 if (0) {
920 __be32 *ctx = mailbox->buf;
921 int j;
922
923 printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",
924 cq->cqn, cq->cons_index,
925 cq->is_kernel ? !!next_cqe_sw(cq) : 0);
926 for (j = 0; j < 16; ++j)
927 printk(KERN_ERR "[%2x] %08x\n", j * 4, be32_to_cpu(ctx[j]));
928 }
929
930 spin_lock_irq(&dev->cq_table.lock);
931 mthca_array_clear(&dev->cq_table.cq,
932 cq->cqn & (dev->limits.num_cqs - 1));
933 --cq->refcount;
934 spin_unlock_irq(&dev->cq_table.lock);
935
936 if (dev->mthca_flags & MTHCA_FLAG_MSI_X)
937 synchronize_irq(dev->eq_table.eq[MTHCA_EQ_COMP].msi_x_vector);
938 else
939 synchronize_irq(dev->pdev->irq);
940
941 wait_event(cq->wait, !get_cq_refcount(dev, cq));
942
943 if (cq->is_kernel) {
944 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
945 if (mthca_is_memfree(dev)) {
946 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
947 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
948 }
949 }
950
951 mthca_table_put(dev, dev->cq_table.table, cq->cqn);
952 mthca_free(&dev->cq_table.alloc, cq->cqn);
953 mthca_free_mailbox(dev, mailbox);
954 }
955
mthca_init_cq_table(struct mthca_dev * dev)956 int mthca_init_cq_table(struct mthca_dev *dev)
957 {
958 int err;
959
960 spin_lock_init(&dev->cq_table.lock);
961
962 err = mthca_alloc_init(&dev->cq_table.alloc,
963 dev->limits.num_cqs,
964 (1 << 24) - 1,
965 dev->limits.reserved_cqs);
966 if (err)
967 return err;
968
969 err = mthca_array_init(&dev->cq_table.cq,
970 dev->limits.num_cqs);
971 if (err)
972 mthca_alloc_cleanup(&dev->cq_table.alloc);
973
974 return err;
975 }
976
mthca_cleanup_cq_table(struct mthca_dev * dev)977 void mthca_cleanup_cq_table(struct mthca_dev *dev)
978 {
979 mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs);
980 mthca_alloc_cleanup(&dev->cq_table.alloc);
981 }
982