Lines Matching full:cqe

174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe)  in cqe_sw()  argument
176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw()
181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw()
184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw() argument
186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; in set_cqe_hw()
191 __be32 *cqe = cqe_ptr; in dump_cqe() local
193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */ in dump_cqe()
194 mthca_dbg(dev, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", in dump_cqe()
195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), in dump_cqe()
196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), in dump_cqe()
197 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7])); in dump_cqe()
264 static inline int is_recv_cqe(struct mthca_cqe *cqe) in is_recv_cqe() argument
266 if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) == in is_recv_cqe()
268 return !(cqe->opcode & 0x01); in is_recv_cqe()
270 return !(cqe->is_send & 0x80); in is_recv_cqe()
276 struct mthca_cqe *cqe; in mthca_cq_clean() local
290 cqe_sw(get_cqe(cq, prod_index & cq->ibcq.cqe)); in mthca_cq_clean()
292 if (prod_index == cq->cons_index + cq->ibcq.cqe) in mthca_cq_clean()
304 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in mthca_cq_clean()
305 if (cqe->my_qpn == cpu_to_be32(qpn)) { in mthca_cq_clean()
306 if (srq && is_recv_cqe(cqe)) in mthca_cq_clean()
307 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); in mthca_cq_clean()
310 memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe), in mthca_cq_clean()
311 cqe, MTHCA_CQ_ENTRY_SIZE); in mthca_cq_clean()
316 set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe)); in mthca_cq_clean()
336 cq->ibcq.cqe < cq->resize_buf->cqe) { in mthca_cq_resize_copy_cqes()
337 cq->cons_index &= cq->ibcq.cqe; in mthca_cq_resize_copy_cqes()
338 if (cqe_sw(get_cqe(cq, cq->ibcq.cqe))) in mthca_cq_resize_copy_cqes()
339 cq->cons_index -= cq->ibcq.cqe + 1; in mthca_cq_resize_copy_cqes()
342 for (i = cq->cons_index; cqe_sw(get_cqe(cq, i & cq->ibcq.cqe)); ++i) in mthca_cq_resize_copy_cqes()
344 i & cq->resize_buf->cqe), in mthca_cq_resize_copy_cqes()
345 get_cqe(cq, i & cq->ibcq.cqe), MTHCA_CQ_ENTRY_SIZE); in mthca_cq_resize_copy_cqes()
366 void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe) in mthca_free_cq_buf() argument
368 mthca_buf_free(dev, (cqe + 1) * MTHCA_CQ_ENTRY_SIZE, &buf->queue, in mthca_free_cq_buf()
374 struct mthca_err_cqe *cqe, in handle_error_cqe() argument
380 if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) { in handle_error_cqe()
383 be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe), in handle_error_cqe()
385 dump_cqe(dev, cqe); in handle_error_cqe()
392 switch (cqe->syndrome) { in handle_error_cqe()
452 entry->vendor_err = cqe->vendor_err; in handle_error_cqe()
455 * Mem-free HCAs always generate one CQE per WQE, even in the in handle_error_cqe()
465 * doorbell count, free the CQE. Otherwise just update it for in handle_error_cqe()
468 if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd)) in handle_error_cqe()
471 be16_add_cpu(&cqe->db_cnt, -dbd); in handle_error_cqe()
472 cqe->wqe = new_wqe; in handle_error_cqe()
473 cqe->syndrome = SYNDROME_WR_FLUSH_ERR; in handle_error_cqe()
485 struct mthca_cqe *cqe; in mthca_poll_one() local
493 cqe = next_cqe_sw(cq); in mthca_poll_one()
494 if (!cqe) in mthca_poll_one()
504 mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n", in mthca_poll_one()
505 cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn), in mthca_poll_one()
506 be32_to_cpu(cqe->wqe)); in mthca_poll_one()
507 dump_cqe(dev, cqe); in mthca_poll_one()
510 is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) == in mthca_poll_one()
512 is_send = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80; in mthca_poll_one()
514 if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) { in mthca_poll_one()
521 be32_to_cpu(cqe->my_qpn) & in mthca_poll_one()
525 be32_to_cpu(cqe->my_qpn) & 0xffffff); in mthca_poll_one()
535 wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset) in mthca_poll_one()
541 u32 wqe = be32_to_cpu(cqe->wqe); in mthca_poll_one()
549 wqe = be32_to_cpu(cqe->wqe); in mthca_poll_one()
572 (struct mthca_err_cqe *) cqe, in mthca_poll_one()
579 switch (cqe->opcode) { in mthca_poll_one()
596 entry->byte_len = be32_to_cpu(cqe->byte_cnt); in mthca_poll_one()
611 entry->byte_len = be32_to_cpu(cqe->byte_cnt); in mthca_poll_one()
612 switch (cqe->opcode & 0x1f) { in mthca_poll_one()
616 entry->ex.imm_data = cqe->imm_etype_pkey_eec; in mthca_poll_one()
622 entry->ex.imm_data = cqe->imm_etype_pkey_eec; in mthca_poll_one()
630 entry->slid = be16_to_cpu(cqe->rlid); in mthca_poll_one()
631 entry->sl = cqe->sl_ipok >> 4; in mthca_poll_one()
632 entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff; in mthca_poll_one()
633 entry->dlid_path_bits = cqe->g_mlpath & 0x7f; in mthca_poll_one()
634 entry->pkey_index = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16; in mthca_poll_one()
635 entry->wc_flags |= cqe->g_mlpath & 0x80 ? IB_WC_GRH : 0; in mthca_poll_one()
636 checksum = (be32_to_cpu(cqe->rqpn) >> 24) | in mthca_poll_one()
637 ((be32_to_cpu(cqe->my_ee) >> 16) & 0xff00); in mthca_poll_one()
638 entry->wc_flags |= (cqe->sl_ipok & 1 && checksum == 0xffff) ? in mthca_poll_one()
646 set_cqe_hw(cqe); in mthca_poll_one()
698 cq->cons_index &= cq->ibcq.cqe; in mthca_poll_cq()
701 cq->cons_index & cq->resize_buf->cqe))) { in mthca_poll_cq()
706 tcqe = cq->ibcq.cqe; in mthca_poll_cq()
708 cq->ibcq.cqe = cq->resize_buf->cqe; in mthca_poll_cq()
711 cq->resize_buf->cqe = tcqe; in mthca_poll_cq()
776 cq->ibcq.cqe = nent - 1; in mthca_init_cq()
867 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); in mthca_init_cq()
941 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); in mthca_free_cq()