1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */ 4 /* Kai Shen <kaishen@linux.alibaba.com> */ 5 /* Copyright (c) 2020-2022, Alibaba Group. */ 6 7 #ifndef __ERDMA_H__ 8 #define __ERDMA_H__ 9 10 #include <linux/bitfield.h> 11 #include <linux/netdevice.h> 12 #include <linux/pci.h> 13 #include <linux/xarray.h> 14 #include <rdma/ib_verbs.h> 15 16 #include "erdma_hw.h" 17 18 #define DRV_MODULE_NAME "erdma" 19 #define ERDMA_NODE_DESC "Elastic RDMA(iWARP) stack" 20 21 struct erdma_eq { 22 void *qbuf; 23 dma_addr_t qbuf_dma_addr; 24 25 spinlock_t lock; 26 27 u32 depth; 28 29 u16 ci; 30 u16 rsvd; 31 32 atomic64_t event_num; 33 atomic64_t notify_num; 34 35 void __iomem *db; 36 u64 *dbrec; 37 dma_addr_t dbrec_dma; 38 }; 39 40 struct erdma_cmdq_sq { 41 void *qbuf; 42 dma_addr_t qbuf_dma_addr; 43 44 spinlock_t lock; 45 46 u32 depth; 47 u16 ci; 48 u16 pi; 49 50 u16 wqebb_cnt; 51 52 u64 *dbrec; 53 dma_addr_t dbrec_dma; 54 }; 55 56 struct erdma_cmdq_cq { 57 void *qbuf; 58 dma_addr_t qbuf_dma_addr; 59 60 spinlock_t lock; 61 62 u32 depth; 63 u32 ci; 64 u32 cmdsn; 65 66 u64 *dbrec; 67 dma_addr_t dbrec_dma; 68 69 atomic64_t armed_num; 70 }; 71 72 enum { 73 ERDMA_CMD_STATUS_INIT, 74 ERDMA_CMD_STATUS_ISSUED, 75 ERDMA_CMD_STATUS_FINISHED, 76 ERDMA_CMD_STATUS_TIMEOUT 77 }; 78 79 struct erdma_comp_wait { 80 struct completion wait_event; 81 u32 cmd_status; 82 u32 ctx_id; 83 u16 sq_pi; 84 u8 comp_status; 85 u8 rsvd; 86 u32 comp_data[4]; 87 }; 88 89 enum { 90 ERDMA_CMDQ_STATE_OK_BIT = 0, 91 ERDMA_CMDQ_STATE_TIMEOUT_BIT = 1, 92 ERDMA_CMDQ_STATE_CTX_ERR_BIT = 2, 93 }; 94 95 #define ERDMA_CMDQ_TIMEOUT_MS 15000 96 #define ERDMA_REG_ACCESS_WAIT_MS 20 97 #define ERDMA_WAIT_DEV_DONE_CNT 500 98 99 struct erdma_cmdq { 100 unsigned long *comp_wait_bitmap; 101 struct erdma_comp_wait *wait_pool; 102 spinlock_t lock; 103 104 bool use_event; 105 106 struct erdma_cmdq_sq sq; 107 struct erdma_cmdq_cq cq; 108 struct erdma_eq eq; 109 110 unsigned long state; 111 112 struct semaphore credits; 113 u16 max_outstandings; 114 }; 115 116 #define COMPROMISE_CC ERDMA_CC_CUBIC 117 enum erdma_cc_alg { 118 ERDMA_CC_NEWRENO = 0, 119 ERDMA_CC_CUBIC, 120 ERDMA_CC_HPCC_RTT, 121 ERDMA_CC_HPCC_ECN, 122 ERDMA_CC_HPCC_INT, 123 ERDMA_CC_METHODS_NUM 124 }; 125 126 struct erdma_devattr { 127 u32 fw_version; 128 129 unsigned char peer_addr[ETH_ALEN]; 130 unsigned long cap_flags; 131 132 int numa_node; 133 enum erdma_cc_alg cc; 134 u32 irq_num; 135 136 u32 max_qp; 137 u32 max_send_wr; 138 u32 max_recv_wr; 139 u32 max_ord; 140 u32 max_ird; 141 142 u32 max_send_sge; 143 u32 max_recv_sge; 144 u32 max_sge_rd; 145 u32 max_cq; 146 u32 max_cqe; 147 u64 max_mr_size; 148 u32 max_mr; 149 u32 max_pd; 150 u32 max_mw; 151 u32 local_dma_key; 152 }; 153 154 #define ERDMA_IRQNAME_SIZE 50 155 156 struct erdma_irq { 157 char name[ERDMA_IRQNAME_SIZE]; 158 u32 msix_vector; 159 cpumask_t affinity_hint_mask; 160 }; 161 162 struct erdma_eq_cb { 163 bool ready; 164 void *dev; /* All EQs use this fields to get erdma_dev struct */ 165 struct erdma_irq irq; 166 struct erdma_eq eq; 167 struct tasklet_struct tasklet; 168 }; 169 170 struct erdma_resource_cb { 171 unsigned long *bitmap; 172 spinlock_t lock; 173 u32 next_alloc_idx; 174 u32 max_cap; 175 }; 176 177 enum { 178 ERDMA_RES_TYPE_PD = 0, 179 ERDMA_RES_TYPE_STAG_IDX = 1, 180 ERDMA_RES_CNT = 2, 181 }; 182 183 struct erdma_dev { 184 struct ib_device ibdev; 185 struct net_device *netdev; 186 struct pci_dev *pdev; 187 struct notifier_block netdev_nb; 188 struct workqueue_struct *reflush_wq; 189 190 resource_size_t func_bar_addr; 191 resource_size_t func_bar_len; 192 u8 __iomem *func_bar; 193 194 struct erdma_devattr attrs; 195 /* physical port state (only one port per device) */ 196 enum ib_port_state state; 197 u32 mtu; 198 199 /* cmdq and aeq use the same msix vector */ 200 struct erdma_irq comm_irq; 201 struct erdma_cmdq cmdq; 202 struct erdma_eq aeq; 203 struct erdma_eq_cb ceqs[ERDMA_NUM_MSIX_VEC - 1]; 204 205 spinlock_t lock; 206 struct erdma_resource_cb res_cb[ERDMA_RES_CNT]; 207 struct xarray qp_xa; 208 struct xarray cq_xa; 209 210 u32 next_alloc_qpn; 211 u32 next_alloc_cqn; 212 213 atomic_t num_ctx; 214 struct list_head cep_list; 215 216 struct dma_pool *db_pool; 217 struct dma_pool *resp_pool; 218 }; 219 220 static inline void *get_queue_entry(void *qbuf, u32 idx, u32 depth, u32 shift) 221 { 222 idx &= (depth - 1); 223 224 return qbuf + (idx << shift); 225 } 226 227 static inline struct erdma_dev *to_edev(struct ib_device *ibdev) 228 { 229 return container_of(ibdev, struct erdma_dev, ibdev); 230 } 231 232 static inline u32 erdma_reg_read32(struct erdma_dev *dev, u32 reg) 233 { 234 return readl(dev->func_bar + reg); 235 } 236 237 static inline u64 erdma_reg_read64(struct erdma_dev *dev, u32 reg) 238 { 239 return readq(dev->func_bar + reg); 240 } 241 242 static inline void erdma_reg_write32(struct erdma_dev *dev, u32 reg, u32 value) 243 { 244 writel(value, dev->func_bar + reg); 245 } 246 247 static inline void erdma_reg_write64(struct erdma_dev *dev, u32 reg, u64 value) 248 { 249 writeq(value, dev->func_bar + reg); 250 } 251 252 static inline u32 erdma_reg_read32_filed(struct erdma_dev *dev, u32 reg, 253 u32 filed_mask) 254 { 255 u32 val = erdma_reg_read32(dev, reg); 256 257 return FIELD_GET(filed_mask, val); 258 } 259 260 #define ERDMA_GET(val, name) FIELD_GET(ERDMA_CMD_##name##_MASK, val) 261 262 int erdma_cmdq_init(struct erdma_dev *dev); 263 void erdma_finish_cmdq_init(struct erdma_dev *dev); 264 void erdma_cmdq_destroy(struct erdma_dev *dev); 265 266 void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op); 267 int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size, 268 u64 *resp0, u64 *resp1); 269 void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq); 270 271 int erdma_ceqs_init(struct erdma_dev *dev); 272 void erdma_ceqs_uninit(struct erdma_dev *dev); 273 void notify_eq(struct erdma_eq *eq); 274 void *get_next_valid_eqe(struct erdma_eq *eq); 275 276 int erdma_aeq_init(struct erdma_dev *dev); 277 int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth); 278 void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq); 279 280 void erdma_aeq_event_handler(struct erdma_dev *dev); 281 void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb); 282 283 #endif 284