xref: /linux/drivers/infiniband/hw/erdma/erdma.h (revision 90e0d94d369d342e735a75174439482119b6c393)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 
3 /* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
4 /*          Kai Shen <kaishen@linux.alibaba.com> */
5 /* Copyright (c) 2020-2022, Alibaba Group. */
6 
7 #ifndef __ERDMA_H__
8 #define __ERDMA_H__
9 
10 #include <linux/bitfield.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/xarray.h>
14 #include <rdma/ib_verbs.h>
15 
16 #include "erdma_hw.h"
17 
18 #define DRV_MODULE_NAME "erdma"
19 #define ERDMA_NODE_DESC "Elastic RDMA(iWARP) stack"
20 
21 struct erdma_eq {
22 	void *qbuf;
23 	dma_addr_t qbuf_dma_addr;
24 
25 	spinlock_t lock;
26 
27 	u32 depth;
28 
29 	u16 ci;
30 	u16 rsvd;
31 
32 	atomic64_t event_num;
33 	atomic64_t notify_num;
34 
35 	u64 __iomem *db_addr;
36 	u64 *db_record;
37 };
38 
39 struct erdma_cmdq_sq {
40 	void *qbuf;
41 	dma_addr_t qbuf_dma_addr;
42 
43 	spinlock_t lock;
44 
45 	u32 depth;
46 	u16 ci;
47 	u16 pi;
48 
49 	u16 wqebb_cnt;
50 
51 	u64 *db_record;
52 };
53 
54 struct erdma_cmdq_cq {
55 	void *qbuf;
56 	dma_addr_t qbuf_dma_addr;
57 
58 	spinlock_t lock;
59 
60 	u32 depth;
61 	u32 ci;
62 	u32 cmdsn;
63 
64 	u64 *db_record;
65 
66 	atomic64_t armed_num;
67 };
68 
69 enum {
70 	ERDMA_CMD_STATUS_INIT,
71 	ERDMA_CMD_STATUS_ISSUED,
72 	ERDMA_CMD_STATUS_FINISHED,
73 	ERDMA_CMD_STATUS_TIMEOUT
74 };
75 
76 struct erdma_comp_wait {
77 	struct completion wait_event;
78 	u32 cmd_status;
79 	u32 ctx_id;
80 	u16 sq_pi;
81 	u8 comp_status;
82 	u8 rsvd;
83 	u32 comp_data[4];
84 };
85 
86 enum {
87 	ERDMA_CMDQ_STATE_OK_BIT = 0,
88 	ERDMA_CMDQ_STATE_TIMEOUT_BIT = 1,
89 	ERDMA_CMDQ_STATE_CTX_ERR_BIT = 2,
90 };
91 
92 #define ERDMA_CMDQ_TIMEOUT_MS 15000
93 #define ERDMA_REG_ACCESS_WAIT_MS 20
94 #define ERDMA_WAIT_DEV_DONE_CNT 500
95 
96 struct erdma_cmdq {
97 	unsigned long *comp_wait_bitmap;
98 	struct erdma_comp_wait *wait_pool;
99 	spinlock_t lock;
100 
101 	bool use_event;
102 
103 	struct erdma_cmdq_sq sq;
104 	struct erdma_cmdq_cq cq;
105 	struct erdma_eq eq;
106 
107 	unsigned long state;
108 
109 	struct semaphore credits;
110 	u16 max_outstandings;
111 };
112 
113 #define COMPROMISE_CC ERDMA_CC_CUBIC
114 enum erdma_cc_alg {
115 	ERDMA_CC_NEWRENO = 0,
116 	ERDMA_CC_CUBIC,
117 	ERDMA_CC_HPCC_RTT,
118 	ERDMA_CC_HPCC_ECN,
119 	ERDMA_CC_HPCC_INT,
120 	ERDMA_CC_METHODS_NUM
121 };
122 
123 struct erdma_devattr {
124 	u32 fw_version;
125 
126 	unsigned char peer_addr[ETH_ALEN];
127 
128 	int numa_node;
129 	enum erdma_cc_alg cc;
130 	u32 grp_num;
131 	u32 irq_num;
132 
133 	bool disable_dwqe;
134 	u16 dwqe_pages;
135 	u16 dwqe_entries;
136 
137 	u32 max_qp;
138 	u32 max_send_wr;
139 	u32 max_recv_wr;
140 	u32 max_ord;
141 	u32 max_ird;
142 
143 	u32 max_send_sge;
144 	u32 max_recv_sge;
145 	u32 max_sge_rd;
146 	u32 max_cq;
147 	u32 max_cqe;
148 	u64 max_mr_size;
149 	u32 max_mr;
150 	u32 max_pd;
151 	u32 max_mw;
152 	u32 local_dma_key;
153 };
154 
155 #define ERDMA_IRQNAME_SIZE 50
156 
157 struct erdma_irq {
158 	char name[ERDMA_IRQNAME_SIZE];
159 	u32 msix_vector;
160 	cpumask_t affinity_hint_mask;
161 };
162 
163 struct erdma_eq_cb {
164 	bool ready;
165 	void *dev; /* All EQs use this fields to get erdma_dev struct */
166 	struct erdma_irq irq;
167 	struct erdma_eq eq;
168 	struct tasklet_struct tasklet;
169 };
170 
171 struct erdma_resource_cb {
172 	unsigned long *bitmap;
173 	spinlock_t lock;
174 	u32 next_alloc_idx;
175 	u32 max_cap;
176 };
177 
178 enum {
179 	ERDMA_RES_TYPE_PD = 0,
180 	ERDMA_RES_TYPE_STAG_IDX = 1,
181 	ERDMA_RES_CNT = 2,
182 };
183 
184 #define ERDMA_EXTRA_BUFFER_SIZE ERDMA_DB_SIZE
185 #define WARPPED_BUFSIZE(size) ((size) + ERDMA_EXTRA_BUFFER_SIZE)
186 
187 struct erdma_dev {
188 	struct ib_device ibdev;
189 	struct net_device *netdev;
190 	struct pci_dev *pdev;
191 	struct notifier_block netdev_nb;
192 
193 	resource_size_t func_bar_addr;
194 	resource_size_t func_bar_len;
195 	u8 __iomem *func_bar;
196 
197 	struct erdma_devattr attrs;
198 	/* physical port state (only one port per device) */
199 	enum ib_port_state state;
200 	u32 mtu;
201 
202 	/* cmdq and aeq use the same msix vector */
203 	struct erdma_irq comm_irq;
204 	struct erdma_cmdq cmdq;
205 	struct erdma_eq aeq;
206 	struct erdma_eq_cb ceqs[ERDMA_NUM_MSIX_VEC - 1];
207 
208 	spinlock_t lock;
209 	struct erdma_resource_cb res_cb[ERDMA_RES_CNT];
210 	struct xarray qp_xa;
211 	struct xarray cq_xa;
212 
213 	u32 next_alloc_qpn;
214 	u32 next_alloc_cqn;
215 
216 	spinlock_t db_bitmap_lock;
217 	/* We provide max 64 uContexts that each has one SQ doorbell Page. */
218 	DECLARE_BITMAP(sdb_page, ERDMA_DWQE_TYPE0_CNT);
219 	/*
220 	 * We provide max 496 uContexts that each has one SQ normal Db,
221 	 * and one directWQE db。
222 	 */
223 	DECLARE_BITMAP(sdb_entry, ERDMA_DWQE_TYPE1_CNT);
224 
225 	atomic_t num_ctx;
226 	struct list_head cep_list;
227 };
228 
229 static inline void *get_queue_entry(void *qbuf, u32 idx, u32 depth, u32 shift)
230 {
231 	idx &= (depth - 1);
232 
233 	return qbuf + (idx << shift);
234 }
235 
236 static inline struct erdma_dev *to_edev(struct ib_device *ibdev)
237 {
238 	return container_of(ibdev, struct erdma_dev, ibdev);
239 }
240 
241 static inline u32 erdma_reg_read32(struct erdma_dev *dev, u32 reg)
242 {
243 	return readl(dev->func_bar + reg);
244 }
245 
246 static inline u64 erdma_reg_read64(struct erdma_dev *dev, u32 reg)
247 {
248 	return readq(dev->func_bar + reg);
249 }
250 
251 static inline void erdma_reg_write32(struct erdma_dev *dev, u32 reg, u32 value)
252 {
253 	writel(value, dev->func_bar + reg);
254 }
255 
256 static inline void erdma_reg_write64(struct erdma_dev *dev, u32 reg, u64 value)
257 {
258 	writeq(value, dev->func_bar + reg);
259 }
260 
261 static inline u32 erdma_reg_read32_filed(struct erdma_dev *dev, u32 reg,
262 					 u32 filed_mask)
263 {
264 	u32 val = erdma_reg_read32(dev, reg);
265 
266 	return FIELD_GET(filed_mask, val);
267 }
268 
269 int erdma_cmdq_init(struct erdma_dev *dev);
270 void erdma_finish_cmdq_init(struct erdma_dev *dev);
271 void erdma_cmdq_destroy(struct erdma_dev *dev);
272 
273 void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
274 int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
275 			u64 *resp0, u64 *resp1);
276 void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
277 
278 int erdma_ceqs_init(struct erdma_dev *dev);
279 void erdma_ceqs_uninit(struct erdma_dev *dev);
280 void notify_eq(struct erdma_eq *eq);
281 void *get_next_valid_eqe(struct erdma_eq *eq);
282 
283 int erdma_aeq_init(struct erdma_dev *dev);
284 void erdma_aeq_destroy(struct erdma_dev *dev);
285 
286 void erdma_aeq_event_handler(struct erdma_dev *dev);
287 void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb);
288 
289 #endif
290