1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 39 #ifdef TCP_OFFLOAD 40 #include <linux/slab.h> 41 42 #include "iw_cxgbe.h" 43 44 static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, 45 struct c4iw_qp *qhp, 46 struct t4_cqe *err_cqe, 47 enum ib_event_type ib_event) 48 { 49 struct ib_event event; 50 struct c4iw_qp_attributes attrs; 51 unsigned long flag; 52 53 if ((qhp->attr.state == C4IW_QP_STATE_ERROR) || 54 (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) { 55 CTR4(KTR_IW_CXGBE, "%s AE received after RTS - " 56 "qp state %d qpid 0x%x status 0x%x", __func__, 57 qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe)); 58 return; 59 } 60 61 printf("AE qpid 0x%x opcode %d status 0x%x " 62 "type %d wrid.hi 0x%x wrid.lo 0x%x\n", 63 CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), 64 CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), 65 CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); 66 67 if (qhp->attr.state == C4IW_QP_STATE_RTS) { 68 attrs.next_state = C4IW_QP_STATE_TERMINATE; 69 c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, 70 &attrs, 0); 71 } 72 73 event.event = ib_event; 74 event.device = chp->ibcq.device; 75 if (ib_event == IB_EVENT_CQ_ERR) 76 event.element.cq = &chp->ibcq; 77 else 78 event.element.qp = &qhp->ibqp; 79 if (qhp->ibqp.event_handler) 80 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); 81 82 spin_lock_irqsave(&chp->comp_handler_lock, flag); 83 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); 84 spin_unlock_irqrestore(&chp->comp_handler_lock, flag); 85 } 86 87 void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) 88 { 89 struct c4iw_cq *chp; 90 struct c4iw_qp *qhp; 91 u32 cqid; 92 93 spin_lock_irq(&dev->lock); 94 qhp = get_qhp(dev, CQE_QPID(err_cqe)); 95 if (!qhp) { 96 printf("BAD AE qpid 0x%x opcode %d " 97 "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n", 98 CQE_QPID(err_cqe), 99 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), 100 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), 101 CQE_WRID_LOW(err_cqe)); 102 spin_unlock_irq(&dev->lock); 103 goto out; 104 } 105 106 if (SQ_TYPE(err_cqe)) 107 cqid = qhp->attr.scq; 108 else 109 cqid = qhp->attr.rcq; 110 chp = get_chp(dev, cqid); 111 if (!chp) { 112 printf("BAD AE cqid 0x%x qpid 0x%x opcode %d " 113 "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n", 114 cqid, CQE_QPID(err_cqe), 115 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), 116 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), 117 CQE_WRID_LOW(err_cqe)); 118 spin_unlock_irq(&dev->lock); 119 goto out; 120 } 121 122 c4iw_qp_add_ref(&qhp->ibqp); 123 atomic_inc(&chp->refcnt); 124 spin_unlock_irq(&dev->lock); 125 126 /* Bad incoming write */ 127 if (RQ_TYPE(err_cqe) && 128 (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) { 129 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR); 130 goto done; 131 } 132 133 switch (CQE_STATUS(err_cqe)) { 134 135 /* Completion Events */ 136 case T4_ERR_SUCCESS: 137 printf(KERN_ERR MOD "AE with status 0!\n"); 138 break; 139 140 case T4_ERR_STAG: 141 case T4_ERR_PDID: 142 case T4_ERR_QPID: 143 case T4_ERR_ACCESS: 144 case T4_ERR_WRAP: 145 case T4_ERR_BOUND: 146 case T4_ERR_INVALIDATE_SHARED_MR: 147 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: 148 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR); 149 break; 150 151 /* Device Fatal Errors */ 152 case T4_ERR_ECC: 153 case T4_ERR_ECC_PSTAG: 154 case T4_ERR_INTERNAL_ERR: 155 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL); 156 break; 157 158 /* QP Fatal Errors */ 159 case T4_ERR_OUT_OF_RQE: 160 case T4_ERR_PBL_ADDR_BOUND: 161 case T4_ERR_CRC: 162 case T4_ERR_MARKER: 163 case T4_ERR_PDU_LEN_ERR: 164 case T4_ERR_DDP_VERSION: 165 case T4_ERR_RDMA_VERSION: 166 case T4_ERR_OPCODE: 167 case T4_ERR_DDP_QUEUE_NUM: 168 case T4_ERR_MSN: 169 case T4_ERR_TBIT: 170 case T4_ERR_MO: 171 case T4_ERR_MSN_GAP: 172 case T4_ERR_MSN_RANGE: 173 case T4_ERR_RQE_ADDR_BOUND: 174 case T4_ERR_IRD_OVERFLOW: 175 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL); 176 break; 177 178 default: 179 printf("Unknown T4 status 0x%x QPID 0x%x\n", 180 CQE_STATUS(err_cqe), qhp->wq.sq.qid); 181 post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL); 182 break; 183 } 184 done: 185 if (atomic_dec_and_test(&chp->refcnt)) 186 wake_up(&chp->wait); 187 c4iw_qp_rem_ref(&qhp->ibqp); 188 out: 189 return; 190 } 191 192 int c4iw_ev_handler(struct sge_iq *iq, const struct rsp_ctrl *rc) 193 { 194 struct c4iw_dev *dev = iq->adapter->iwarp_softc; 195 u32 qid = be32_to_cpu(rc->pldbuflen_qid); 196 struct c4iw_cq *chp; 197 unsigned long flag; 198 199 spin_lock_irqsave(&dev->lock, flag); 200 chp = get_chp(dev, qid); 201 if (chp) { 202 atomic_inc(&chp->refcnt); 203 spin_unlock_irqrestore(&dev->lock, flag); 204 205 spin_lock_irqsave(&chp->comp_handler_lock, flag); 206 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); 207 spin_unlock_irqrestore(&chp->comp_handler_lock, flag); 208 if (atomic_dec_and_test(&chp->refcnt)) 209 wake_up(&chp->wait); 210 } else { 211 CTR2(KTR_IW_CXGBE, "%s unknown cqid 0x%x", __func__, qid); 212 spin_unlock_irqrestore(&dev->lock, flag); 213 } 214 215 return 0; 216 } 217 #endif 218