Lines Matching +full:supports +full:- +full:cqe

2  * Copyright (c) 2006-2016 Chelsio, Inc. All rights reserved.
14 * - Redistributions of source code must retain the above
17 * - Redistributions in binary form must reproduce the above
54 #define ROUND_UP(x, n) (((x) + (n) - 1u) & ~((n) - 1u))
55 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
82 #define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES)
83 #define T4_MAX_IQ_SIZE (65520 - 1)
84 #define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES)
85 #define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1)
86 #define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1)
87 #define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1)
89 #define T4_MAX_MR_SIZE (~0ULL - 1)
90 #define T4_PAGESIZE_MASK 0xffffffff000 /* 4KB-8TB */
95 __be32 rsvd1; /* flit 0 - hw owns */
100 u8 qp_err; /* flit 1 - sw owns */
112 #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - sizeof(struct fw_ri_isgl…
113 #define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - sizeof(struct fw_ri_i…
114 #define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_rdma_write_wr) - sizeof(struct …
115 #define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_rdma_write_wr) - sizeof(struct fw_…
116 #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - sizeof(struct fw_ri_im…
145 wqe->send.opcode = (u8)opcode; in init_wr_hdr()
146 wqe->send.flags = flags; in init_wr_hdr()
147 wqe->send.wrid = wrid; in init_wr_hdr()
148 wqe->send.r1[0] = 0; in init_wr_hdr()
149 wqe->send.r1[1] = 0; in init_wr_hdr()
150 wqe->send.r1[2] = 0; in init_wr_hdr()
151 wqe->send.len16 = len16; in init_wr_hdr()
154 /* CQE/AE status codes */
195 * CQE defs
219 /* macros for flit 0 of the cqe */
246 #define SW_CQE(x) (G_CQE_SWCQE(be32toh((x)->header)))
247 #define CQE_QPID(x) (G_CQE_QPID(be32toh((x)->header)))
248 #define CQE_TYPE(x) (G_CQE_TYPE(be32toh((x)->header)))
251 #define CQE_STATUS(x) (G_CQE_STATUS(be32toh((x)->header)))
252 #define CQE_OPCODE(x) (G_CQE_OPCODE(be32toh((x)->header)))
255 (G_CQE_OPCODE(be32toh((x)->header)) == FW_RI_SEND) || \
256 (G_CQE_OPCODE(be32toh((x)->header)) == FW_RI_SEND_WITH_SE) || \
257 (G_CQE_OPCODE(be32toh((x)->header)) == FW_RI_SEND_WITH_INV) || \
258 (G_CQE_OPCODE(be32toh((x)->header)) == FW_RI_SEND_WITH_SE_INV))
260 #define CQE_LEN(x) (be32toh((x)->len))
263 #define CQE_WRID_STAG(x) (be32toh((x)->u.rcqe.stag))
264 #define CQE_WRID_MSN(x) (be32toh((x)->u.rcqe.msn))
267 #define CQE_WRID_SQ_IDX(x) (x)->u.scqe.cidx
270 #define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
271 #define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
273 /* macros for flit 3 of the cqe */
290 #define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64toh((x)->bits_type_ts)))
291 #define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64toh((x)->bits_type_ts)))
292 #define CQE_TS(x) (G_CQE_TS(be64toh((x)->bits_type_ts)))
296 struct t4_cqe cqe; member
365 return wq->rq.in_use; in t4_rqes_posted()
370 return wq->rq.in_use == 0; in t4_rq_empty()
375 return wq->rq.in_use == (wq->rq.size - 1); in t4_rq_full()
380 return wq->rq.size - 1 - wq->rq.in_use; in t4_rq_avail()
385 wq->rq.in_use++; in t4_rq_produce()
386 if (++wq->rq.pidx == wq->rq.size) in t4_rq_produce()
387 wq->rq.pidx = 0; in t4_rq_produce()
388 wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); in t4_rq_produce()
389 if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS) in t4_rq_produce()
390 wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS; in t4_rq_produce()
391 if (!wq->error) in t4_rq_produce()
392 wq->rq.queue[wq->rq.size].status.host_pidx = wq->rq.pidx; in t4_rq_produce()
397 wq->rq.in_use--; in t4_rq_consume()
398 wq->rq.msn++; in t4_rq_consume()
399 if (++wq->rq.cidx == wq->rq.size) in t4_rq_consume()
400 wq->rq.cidx = 0; in t4_rq_consume()
401 assert((wq->rq.cidx != wq->rq.pidx) || wq->rq.in_use == 0); in t4_rq_consume()
402 if (!wq->error) in t4_rq_consume()
403 wq->rq.queue[wq->rq.size].status.host_cidx = wq->rq.cidx; in t4_rq_consume()
408 return wq->sq.in_use == 0; in t4_sq_empty()
413 return wq->sq.in_use == (wq->sq.size - 1); in t4_sq_full()
418 return wq->sq.size - 1 - wq->sq.in_use; in t4_sq_avail()
423 return wq->sq.flags & T4_SQ_ONCHIP; in t4_sq_onchip()
428 wq->sq.in_use++; in t4_sq_produce()
429 if (++wq->sq.pidx == wq->sq.size) in t4_sq_produce()
430 wq->sq.pidx = 0; in t4_sq_produce()
431 wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); in t4_sq_produce()
432 if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS) in t4_sq_produce()
433 wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS; in t4_sq_produce()
434 if (!wq->error) { in t4_sq_produce()
437 wq->sq.queue[wq->sq.size].status.host_pidx = (wq->sq.pidx); in t4_sq_produce()
443 assert(wq->sq.in_use >= 1); in t4_sq_consume()
444 if (wq->sq.cidx == wq->sq.flush_cidx) in t4_sq_consume()
445 wq->sq.flush_cidx = -1; in t4_sq_consume()
446 wq->sq.in_use--; in t4_sq_consume()
447 if (++wq->sq.cidx == wq->sq.size) in t4_sq_consume()
448 wq->sq.cidx = 0; in t4_sq_consume()
449 assert((wq->sq.cidx != wq->sq.pidx) || wq->sq.in_use == 0); in t4_sq_consume()
450 if (!wq->error){ in t4_sq_consume()
453 wq->sq.queue[wq->sq.size].status.host_cidx = wq->sq.cidx; in t4_sq_consume()
469 len16--; in copy_wqe_to_udb()
481 if (t5_en_wc && inc == 1 && wq->sq.wc_reg_available) { in t4_ring_sq_db()
482 PDBG("%s: WC wq->sq.pidx = %d; len16=%d\n", in t4_ring_sq_db()
483 __func__, wq->sq.pidx, len16); in t4_ring_sq_db()
484 copy_wqe_to_udb(wq->sq.udb + 14, wqe); in t4_ring_sq_db()
486 PDBG("%s: DB wq->sq.pidx = %d; len16=%d\n", in t4_ring_sq_db()
487 __func__, wq->sq.pidx, len16); in t4_ring_sq_db()
488 writel(QID_V(wq->sq.bar2_qid) | PIDX_T5_V(inc), in t4_ring_sq_db()
489 wq->sq.udb); in t4_ring_sq_db()
503 *(volatile u32 *)&wq->sq.queue[wq->sq.size].flits[2+i] = i; in t4_ring_sq_db()
517 *(u32 *)&wq->sq.queue[wq->sq.size].flits[2] = i; in t4_ring_sq_db()
522 writel(QID_V(wq->sq.qid & wq->qid_mask) | PIDX_V(inc), wq->sq.udb); in t4_ring_sq_db()
530 if (t5_en_wc && inc == 1 && wq->sq.wc_reg_available) { in t4_ring_rq_db()
531 PDBG("%s: WC wq->rq.pidx = %d; len16=%d\n", in t4_ring_rq_db()
532 __func__, wq->rq.pidx, len16); in t4_ring_rq_db()
533 copy_wqe_to_udb(wq->rq.udb + 14, wqe); in t4_ring_rq_db()
535 PDBG("%s: DB wq->rq.pidx = %d; len16=%d\n", in t4_ring_rq_db()
536 __func__, wq->rq.pidx, len16); in t4_ring_rq_db()
537 writel(QID_V(wq->rq.bar2_qid) | PIDX_T5_V(inc), in t4_ring_rq_db()
538 wq->rq.udb); in t4_ring_rq_db()
546 writel(QID_V(wq->rq.qid & wq->qid_mask) | PIDX_V(inc), wq->rq.udb); in t4_ring_rq_db()
551 return wq->error || wq->rq.queue[wq->rq.size].status.qp_err; in t4_wq_in_error()
556 wq->rq.queue[wq->rq.size].status.qp_err = 1; in t4_set_wq_in_error()
564 * If iw_cxgb4 driver supports door bell drop recovery then its in t4_wq_db_enabled()
570 return ! *wq->db_offp; in t4_wq_db_enabled()
598 while (cq->cidx_inc > CIDXINC_M) { in t4_arm_cq()
600 INGRESSQID_V(cq->cqid & cq->qid_mask); in t4_arm_cq()
601 writel(val, cq->ugts); in t4_arm_cq()
602 cq->cidx_inc -= CIDXINC_M; in t4_arm_cq()
604 val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) | in t4_arm_cq()
605 INGRESSQID_V(cq->cqid & cq->qid_mask); in t4_arm_cq()
606 writel(val, cq->ugts); in t4_arm_cq()
607 cq->cidx_inc = 0; in t4_arm_cq()
613 cq->sw_in_use++; in t4_swcq_produce()
614 if (cq->sw_in_use == cq->size) { in t4_swcq_produce()
615 syslog(LOG_NOTICE, "cxgb4 sw cq overflow cqid %u\n", cq->cqid); in t4_swcq_produce()
616 cq->error = 1; in t4_swcq_produce()
619 if (++cq->sw_pidx == cq->size) in t4_swcq_produce()
620 cq->sw_pidx = 0; in t4_swcq_produce()
625 assert(cq->sw_in_use >= 1); in t4_swcq_consume()
626 cq->sw_in_use--; in t4_swcq_consume()
627 if (++cq->sw_cidx == cq->size) in t4_swcq_consume()
628 cq->sw_cidx = 0; in t4_swcq_consume()
633 cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; in t4_hwcq_consume()
634 if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) { in t4_hwcq_consume()
637 val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) | in t4_hwcq_consume()
638 INGRESSQID_V(cq->cqid & cq->qid_mask); in t4_hwcq_consume()
639 writel(val, cq->ugts); in t4_hwcq_consume()
640 cq->cidx_inc = 0; in t4_hwcq_consume()
642 if (++cq->cidx == cq->size) { in t4_hwcq_consume()
643 cq->cidx = 0; in t4_hwcq_consume()
644 cq->gen ^= 1; in t4_hwcq_consume()
646 ((struct t4_status_page *)&cq->queue[cq->size])->host_cidx = cq->cidx; in t4_hwcq_consume()
649 static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) in t4_valid_cqe() argument
651 return (CQE_GENBIT(cqe) == cq->gen); in t4_valid_cqe()
654 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) in t4_next_hw_cqe() argument
659 if (cq->cidx == 0) in t4_next_hw_cqe()
660 prev_cidx = cq->size - 1; in t4_next_hw_cqe()
662 prev_cidx = cq->cidx - 1; in t4_next_hw_cqe()
664 if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) { in t4_next_hw_cqe()
665 ret = -EOVERFLOW; in t4_next_hw_cqe()
666 syslog(LOG_NOTICE, "cxgb4 cq overflow cqid %u\n", cq->cqid); in t4_next_hw_cqe()
667 cq->error = 1; in t4_next_hw_cqe()
669 } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { in t4_next_hw_cqe()
671 *cqe = &cq->queue[cq->cidx]; in t4_next_hw_cqe()
674 ret = -ENODATA; in t4_next_hw_cqe()
680 if (cq->sw_in_use == cq->size) { in t4_next_sw_cqe()
681 syslog(LOG_NOTICE, "cxgb4 sw cq overflow cqid %u\n", cq->cqid); in t4_next_sw_cqe()
682 cq->error = 1; in t4_next_sw_cqe()
686 if (cq->sw_in_use) in t4_next_sw_cqe()
687 return &cq->sw_queue[cq->sw_cidx]; in t4_next_sw_cqe()
693 return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]); in t4_cq_notempty()
696 static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe) in t4_next_cqe() argument
700 if (cq->error) in t4_next_cqe()
701 ret = -ENODATA; in t4_next_cqe()
702 else if (cq->sw_in_use) in t4_next_cqe()
703 *cqe = &cq->sw_queue[cq->sw_cidx]; in t4_next_cqe()
704 else ret = t4_next_hw_cqe(cq, cqe); in t4_next_cqe()
710 return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err; in t4_cq_in_error()
715 ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1; in t4_set_cq_in_error()
720 ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 0; in t4_reset_cq_in_error()