xref: /linux/drivers/net/ethernet/qlogic/qed/qed_cxt.c (revision 0883c2c06fb5bcf5b9e008270827e63c09a88c1e)
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #include <linux/types.h>
10 #include <linux/bitops.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/list.h>
15 #include <linux/log2.h>
16 #include <linux/pci.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <linux/bitops.h>
20 #include "qed.h"
21 #include "qed_cxt.h"
22 #include "qed_dev_api.h"
23 #include "qed_hsi.h"
24 #include "qed_hw.h"
25 #include "qed_init_ops.h"
26 #include "qed_reg_addr.h"
27 #include "qed_sriov.h"
28 
29 /* Max number of connection types in HW (DQ/CDU etc.) */
30 #define MAX_CONN_TYPES		PROTOCOLID_COMMON
31 #define NUM_TASK_TYPES		2
32 #define NUM_TASK_PF_SEGMENTS	4
33 #define NUM_TASK_VF_SEGMENTS	1
34 
35 /* QM constants */
36 #define QM_PQ_ELEMENT_SIZE	4 /* in bytes */
37 
38 /* Doorbell-Queue constants */
39 #define DQ_RANGE_SHIFT		4
40 #define DQ_RANGE_ALIGN		BIT(DQ_RANGE_SHIFT)
41 
42 /* ILT constants */
43 #define ILT_DEFAULT_HW_P_SIZE		3
44 #define ILT_PAGE_IN_BYTES(hw_p_size)	(1U << ((hw_p_size) + 12))
45 #define ILT_CFG_REG(cli, reg)	PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
46 
47 /* ILT entry structure */
48 #define ILT_ENTRY_PHY_ADDR_MASK		0x000FFFFFFFFFFFULL
49 #define ILT_ENTRY_PHY_ADDR_SHIFT	0
50 #define ILT_ENTRY_VALID_MASK		0x1ULL
51 #define ILT_ENTRY_VALID_SHIFT		52
52 #define ILT_ENTRY_IN_REGS		2
53 #define ILT_REG_SIZE_IN_BYTES		4
54 
55 /* connection context union */
56 union conn_context {
57 	struct core_conn_context core_ctx;
58 	struct eth_conn_context eth_ctx;
59 };
60 
61 #define CONN_CXT_SIZE(p_hwfn) \
62 	ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
63 
64 /* PF per protocl configuration object */
65 struct qed_conn_type_cfg {
66 	u32 cid_count;
67 	u32 cid_start;
68 	u32 cids_per_vf;
69 };
70 
71 /* ILT Client configuration, Per connection type (protocol) resources. */
72 #define ILT_CLI_PF_BLOCKS	(1 + NUM_TASK_PF_SEGMENTS * 2)
73 #define ILT_CLI_VF_BLOCKS       (1 + NUM_TASK_VF_SEGMENTS * 2)
74 #define CDUC_BLK		(0)
75 
76 enum ilt_clients {
77 	ILT_CLI_CDUC,
78 	ILT_CLI_QM,
79 	ILT_CLI_MAX
80 };
81 
82 struct ilt_cfg_pair {
83 	u32 reg;
84 	u32 val;
85 };
86 
87 struct qed_ilt_cli_blk {
88 	u32 total_size; /* 0 means not active */
89 	u32 real_size_in_page;
90 	u32 start_line;
91 };
92 
93 struct qed_ilt_client_cfg {
94 	bool active;
95 
96 	/* ILT boundaries */
97 	struct ilt_cfg_pair first;
98 	struct ilt_cfg_pair last;
99 	struct ilt_cfg_pair p_size;
100 
101 	/* ILT client blocks for PF */
102 	struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
103 	u32 pf_total_lines;
104 
105 	/* ILT client blocks for VFs */
106 	struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
107 	u32 vf_total_lines;
108 };
109 
110 /* Per Path -
111  *      ILT shadow table
112  *      Protocol acquired CID lists
113  *      PF start line in ILT
114  */
115 struct qed_dma_mem {
116 	dma_addr_t p_phys;
117 	void *p_virt;
118 	size_t size;
119 };
120 
121 struct qed_cid_acquired_map {
122 	u32		start_cid;
123 	u32		max_count;
124 	unsigned long	*cid_map;
125 };
126 
127 struct qed_cxt_mngr {
128 	/* Per protocl configuration */
129 	struct qed_conn_type_cfg	conn_cfg[MAX_CONN_TYPES];
130 
131 	/* computed ILT structure */
132 	struct qed_ilt_client_cfg	clients[ILT_CLI_MAX];
133 
134 	/* total number of VFs for this hwfn -
135 	 * ALL VFs are symmetric in terms of HW resources
136 	 */
137 	u32				vf_count;
138 
139 	/* Acquired CIDs */
140 	struct qed_cid_acquired_map	acquired[MAX_CONN_TYPES];
141 
142 	/* ILT  shadow table */
143 	struct qed_dma_mem		*ilt_shadow;
144 	u32				pf_start_line;
145 };
146 
147 /* counts the iids for the CDU/CDUC ILT client configuration */
148 struct qed_cdu_iids {
149 	u32 pf_cids;
150 	u32 per_vf_cids;
151 };
152 
153 static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
154 			     struct qed_cdu_iids *iids)
155 {
156 	u32 type;
157 
158 	for (type = 0; type < MAX_CONN_TYPES; type++) {
159 		iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
160 		iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
161 	}
162 }
163 
164 static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
165 			    struct qed_qm_iids *iids)
166 {
167 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
168 	u32 vf_cids = 0, type;
169 
170 	for (type = 0; type < MAX_CONN_TYPES; type++) {
171 		iids->cids += p_mngr->conn_cfg[type].cid_count;
172 		vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
173 	}
174 
175 	iids->vf_cids += vf_cids * p_mngr->vf_count;
176 	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
177 		   "iids: CIDS %08x vf_cids %08x\n",
178 		   iids->cids, iids->vf_cids);
179 }
180 
181 /* set the iids count per protocol */
182 static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
183 					enum protocol_type type,
184 					u32 cid_count, u32 vf_cid_cnt)
185 {
186 	struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
187 	struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
188 
189 	p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
190 	p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
191 }
192 
193 u32 qed_cxt_get_proto_cid_count(struct qed_hwfn		*p_hwfn,
194 				enum protocol_type	type,
195 				u32			*vf_cid)
196 {
197 	if (vf_cid)
198 		*vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
199 
200 	return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
201 }
202 
203 static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
204 				 struct qed_ilt_cli_blk *p_blk,
205 				 u32 start_line, u32 total_size,
206 				 u32 elem_size)
207 {
208 	u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
209 
210 	/* verify thatits called only once for each block */
211 	if (p_blk->total_size)
212 		return;
213 
214 	p_blk->total_size = total_size;
215 	p_blk->real_size_in_page = 0;
216 	if (elem_size)
217 		p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
218 	p_blk->start_line = start_line;
219 }
220 
221 static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
222 				 struct qed_ilt_client_cfg *p_cli,
223 				 struct qed_ilt_cli_blk *p_blk,
224 				 u32 *p_line, enum ilt_clients client_id)
225 {
226 	if (!p_blk->total_size)
227 		return;
228 
229 	if (!p_cli->active)
230 		p_cli->first.val = *p_line;
231 
232 	p_cli->active = true;
233 	*p_line += DIV_ROUND_UP(p_blk->total_size,
234 				p_blk->real_size_in_page);
235 	p_cli->last.val = *p_line - 1;
236 
237 	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
238 		   "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
239 		   client_id, p_cli->first.val,
240 		   p_cli->last.val, p_blk->total_size,
241 		   p_blk->real_size_in_page, p_blk->start_line);
242 }
243 
244 int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
245 {
246 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
247 	struct qed_ilt_client_cfg *p_cli;
248 	struct qed_ilt_cli_blk *p_blk;
249 	struct qed_cdu_iids cdu_iids;
250 	struct qed_qm_iids qm_iids;
251 	u32 curr_line, total, i;
252 
253 	memset(&qm_iids, 0, sizeof(qm_iids));
254 	memset(&cdu_iids, 0, sizeof(cdu_iids));
255 
256 	p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
257 
258 	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
259 		   "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
260 		   p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
261 
262 	/* CDUC */
263 	p_cli = &p_mngr->clients[ILT_CLI_CDUC];
264 	curr_line = p_mngr->pf_start_line;
265 
266 	/* CDUC PF */
267 	p_cli->pf_total_lines = 0;
268 
269 	/* get the counters for the CDUC and QM clients  */
270 	qed_cxt_cdu_iids(p_mngr, &cdu_iids);
271 
272 	p_blk = &p_cli->pf_blks[CDUC_BLK];
273 
274 	total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
275 
276 	qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
277 			     total, CONN_CXT_SIZE(p_hwfn));
278 
279 	qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
280 	p_cli->pf_total_lines = curr_line - p_blk->start_line;
281 
282 	/* CDUC VF */
283 	p_blk = &p_cli->vf_blks[CDUC_BLK];
284 	total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
285 
286 	qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
287 			     total, CONN_CXT_SIZE(p_hwfn));
288 
289 	qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
290 	p_cli->vf_total_lines = curr_line - p_blk->start_line;
291 
292 	for (i = 1; i < p_mngr->vf_count; i++)
293 		qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
294 				     ILT_CLI_CDUC);
295 
296 	/* QM */
297 	p_cli = &p_mngr->clients[ILT_CLI_QM];
298 	p_blk = &p_cli->pf_blks[0];
299 
300 	qed_cxt_qm_iids(p_hwfn, &qm_iids);
301 	total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
302 				   qm_iids.vf_cids, 0,
303 				   p_hwfn->qm_info.num_pqs,
304 				   p_hwfn->qm_info.num_vf_pqs);
305 
306 	DP_VERBOSE(p_hwfn,
307 		   QED_MSG_ILT,
308 		   "QM ILT Info, (cids=%d, vf_cids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
309 		   qm_iids.cids,
310 		   qm_iids.vf_cids,
311 		   p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
312 
313 	qed_ilt_cli_blk_fill(p_cli, p_blk,
314 			     curr_line, total * 0x1000,
315 			     QM_PQ_ELEMENT_SIZE);
316 
317 	qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
318 	p_cli->pf_total_lines = curr_line - p_blk->start_line;
319 
320 	if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
321 	    RESC_NUM(p_hwfn, QED_ILT)) {
322 		DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
323 		       curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
324 		return -EINVAL;
325 	}
326 
327 	return 0;
328 }
329 
330 #define for_each_ilt_valid_client(pos, clients)	\
331 		for (pos = 0; pos < ILT_CLI_MAX; pos++)
332 
333 /* Total number of ILT lines used by this PF */
334 static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
335 {
336 	u32 size = 0;
337 	u32 i;
338 
339 	for_each_ilt_valid_client(i, ilt_clients) {
340 		if (!ilt_clients[i].active)
341 			continue;
342 		size += (ilt_clients[i].last.val -
343 			 ilt_clients[i].first.val + 1);
344 	}
345 
346 	return size;
347 }
348 
349 static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
350 {
351 	struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
352 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
353 	u32 ilt_size, i;
354 
355 	ilt_size = qed_cxt_ilt_shadow_size(p_cli);
356 
357 	for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
358 		struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
359 
360 		if (p_dma->p_virt)
361 			dma_free_coherent(&p_hwfn->cdev->pdev->dev,
362 					  p_dma->size, p_dma->p_virt,
363 					  p_dma->p_phys);
364 		p_dma->p_virt = NULL;
365 	}
366 	kfree(p_mngr->ilt_shadow);
367 }
368 
369 static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
370 			     struct qed_ilt_cli_blk *p_blk,
371 			     enum ilt_clients ilt_client,
372 			     u32 start_line_offset)
373 {
374 	struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
375 	u32 lines, line, sz_left;
376 
377 	if (!p_blk->total_size)
378 		return 0;
379 
380 	sz_left = p_blk->total_size;
381 	lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page);
382 	line = p_blk->start_line + start_line_offset -
383 	       p_hwfn->p_cxt_mngr->pf_start_line;
384 
385 	for (; lines; lines--) {
386 		dma_addr_t p_phys;
387 		void *p_virt;
388 		u32 size;
389 
390 		size = min_t(u32, sz_left,
391 			     p_blk->real_size_in_page);
392 		p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
393 					    size,
394 					    &p_phys,
395 					    GFP_KERNEL);
396 		if (!p_virt)
397 			return -ENOMEM;
398 		memset(p_virt, 0, size);
399 
400 		ilt_shadow[line].p_phys = p_phys;
401 		ilt_shadow[line].p_virt = p_virt;
402 		ilt_shadow[line].size = size;
403 
404 		DP_VERBOSE(p_hwfn, QED_MSG_ILT,
405 			   "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
406 			    line, (u64)p_phys, p_virt, size);
407 
408 		sz_left -= size;
409 		line++;
410 	}
411 
412 	return 0;
413 }
414 
415 static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
416 {
417 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
418 	struct qed_ilt_client_cfg *clients = p_mngr->clients;
419 	struct qed_ilt_cli_blk *p_blk;
420 	u32 size, i, j, k;
421 	int rc;
422 
423 	size = qed_cxt_ilt_shadow_size(clients);
424 	p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
425 				     GFP_KERNEL);
426 	if (!p_mngr->ilt_shadow) {
427 		DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
428 		rc = -ENOMEM;
429 		goto ilt_shadow_fail;
430 	}
431 
432 	DP_VERBOSE(p_hwfn, QED_MSG_ILT,
433 		   "Allocated 0x%x bytes for ilt shadow\n",
434 		   (u32)(size * sizeof(struct qed_dma_mem)));
435 
436 	for_each_ilt_valid_client(i, clients) {
437 		if (!clients[i].active)
438 			continue;
439 		for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
440 			p_blk = &clients[i].pf_blks[j];
441 			rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
442 			if (rc != 0)
443 				goto ilt_shadow_fail;
444 		}
445 		for (k = 0; k < p_mngr->vf_count; k++) {
446 			for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
447 				u32 lines = clients[i].vf_total_lines * k;
448 
449 				p_blk = &clients[i].vf_blks[j];
450 				rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
451 				if (rc != 0)
452 					goto ilt_shadow_fail;
453 			}
454 		}
455 	}
456 
457 	return 0;
458 
459 ilt_shadow_fail:
460 	qed_ilt_shadow_free(p_hwfn);
461 	return rc;
462 }
463 
464 static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
465 {
466 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
467 	u32 type;
468 
469 	for (type = 0; type < MAX_CONN_TYPES; type++) {
470 		kfree(p_mngr->acquired[type].cid_map);
471 		p_mngr->acquired[type].max_count = 0;
472 		p_mngr->acquired[type].start_cid = 0;
473 	}
474 }
475 
476 static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
477 {
478 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
479 	u32 start_cid = 0;
480 	u32 type;
481 
482 	for (type = 0; type < MAX_CONN_TYPES; type++) {
483 		u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
484 		u32 size;
485 
486 		if (cid_cnt == 0)
487 			continue;
488 
489 		size = DIV_ROUND_UP(cid_cnt,
490 				    sizeof(unsigned long) * BITS_PER_BYTE) *
491 		       sizeof(unsigned long);
492 		p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
493 		if (!p_mngr->acquired[type].cid_map)
494 			goto cid_map_fail;
495 
496 		p_mngr->acquired[type].max_count = cid_cnt;
497 		p_mngr->acquired[type].start_cid = start_cid;
498 
499 		p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
500 
501 		DP_VERBOSE(p_hwfn, QED_MSG_CXT,
502 			   "Type %08x start: %08x count %08x\n",
503 			   type, p_mngr->acquired[type].start_cid,
504 			   p_mngr->acquired[type].max_count);
505 		start_cid += cid_cnt;
506 	}
507 
508 	return 0;
509 
510 cid_map_fail:
511 	qed_cid_map_free(p_hwfn);
512 	return -ENOMEM;
513 }
514 
515 int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
516 {
517 	struct qed_cxt_mngr *p_mngr;
518 	u32 i;
519 
520 	p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
521 	if (!p_mngr) {
522 		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
523 		return -ENOMEM;
524 	}
525 
526 	/* Initialize ILT client registers */
527 	p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
528 	p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
529 	p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
530 
531 	p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
532 	p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
533 	p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
534 
535 	/* default ILT page size for all clients is 32K */
536 	for (i = 0; i < ILT_CLI_MAX; i++)
537 		p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
538 
539 	if (p_hwfn->cdev->p_iov_info)
540 		p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
541 
542 	/* Set the cxt mangr pointer priori to further allocations */
543 	p_hwfn->p_cxt_mngr = p_mngr;
544 
545 	return 0;
546 }
547 
548 int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
549 {
550 	int rc;
551 
552 	/* Allocate the ILT shadow table */
553 	rc = qed_ilt_shadow_alloc(p_hwfn);
554 	if (rc) {
555 		DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
556 		goto tables_alloc_fail;
557 	}
558 
559 	/* Allocate and initialize the acquired cids bitmaps */
560 	rc = qed_cid_map_alloc(p_hwfn);
561 	if (rc) {
562 		DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
563 		goto tables_alloc_fail;
564 	}
565 
566 	return 0;
567 
568 tables_alloc_fail:
569 	qed_cxt_mngr_free(p_hwfn);
570 	return rc;
571 }
572 
573 void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
574 {
575 	if (!p_hwfn->p_cxt_mngr)
576 		return;
577 
578 	qed_cid_map_free(p_hwfn);
579 	qed_ilt_shadow_free(p_hwfn);
580 	kfree(p_hwfn->p_cxt_mngr);
581 
582 	p_hwfn->p_cxt_mngr = NULL;
583 }
584 
585 void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
586 {
587 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
588 	int type;
589 
590 	/* Reset acquired cids */
591 	for (type = 0; type < MAX_CONN_TYPES; type++) {
592 		u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
593 
594 		if (cid_cnt == 0)
595 			continue;
596 
597 		memset(p_mngr->acquired[type].cid_map, 0,
598 		       DIV_ROUND_UP(cid_cnt,
599 				    sizeof(unsigned long) * BITS_PER_BYTE) *
600 		       sizeof(unsigned long));
601 	}
602 }
603 
604 /* CDU Common */
605 #define CDUC_CXT_SIZE_SHIFT \
606 	CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
607 
608 #define CDUC_CXT_SIZE_MASK \
609 	(CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
610 
611 #define CDUC_BLOCK_WASTE_SHIFT \
612 	CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
613 
614 #define CDUC_BLOCK_WASTE_MASK \
615 	(CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
616 
617 #define CDUC_NCIB_SHIFT	\
618 	CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
619 
620 #define CDUC_NCIB_MASK \
621 	(CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
622 
623 static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
624 {
625 	u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
626 
627 	/* CDUC - connection configuration */
628 	page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
629 	cxt_size = CONN_CXT_SIZE(p_hwfn);
630 	elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
631 	block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
632 
633 	SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
634 	SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
635 	SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
636 	STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
637 }
638 
639 void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
640 {
641 	struct qed_qm_pf_rt_init_params params;
642 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
643 	struct qed_qm_iids iids;
644 
645 	memset(&iids, 0, sizeof(iids));
646 	qed_cxt_qm_iids(p_hwfn, &iids);
647 
648 	memset(&params, 0, sizeof(params));
649 	params.port_id = p_hwfn->port_id;
650 	params.pf_id = p_hwfn->rel_pf_id;
651 	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
652 	params.is_first_pf = p_hwfn->first_on_engine;
653 	params.num_pf_cids = iids.cids;
654 	params.num_vf_cids = iids.vf_cids;
655 	params.start_pq = qm_info->start_pq;
656 	params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
657 	params.num_vf_pqs = qm_info->num_vf_pqs;
658 	params.start_vport = qm_info->start_vport;
659 	params.num_vports = qm_info->num_vports;
660 	params.pf_wfq = qm_info->pf_wfq;
661 	params.pf_rl = qm_info->pf_rl;
662 	params.pq_params = qm_info->qm_pq_params;
663 	params.vport_params = qm_info->qm_vport_params;
664 
665 	qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
666 }
667 
668 /* CM PF */
669 static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
670 {
671 	union qed_qm_pq_params pq_params;
672 	u16 pq;
673 
674 	/* XCM pure-LB queue */
675 	memset(&pq_params, 0, sizeof(pq_params));
676 	pq_params.core.tc = LB_TC;
677 	pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
678 	STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
679 
680 	return 0;
681 }
682 
683 /* DQ PF */
684 static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
685 {
686 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
687 	u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
688 
689 	dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
690 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
691 
692 	dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
693 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
694 
695 	dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
696 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
697 
698 	dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
699 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
700 
701 	dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
702 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
703 
704 	dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
705 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
706 
707 	dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
708 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
709 
710 	dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
711 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
712 
713 	dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
714 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
715 
716 	dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
717 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
718 
719 	dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
720 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
721 
722 	dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
723 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
724 
725 	/* Connection types 6 & 7 are not in use, yet they must be configured
726 	 * as the highest possible connection. Not configuring them means the
727 	 * defaults will be  used, and with a large number of cids a bug may
728 	 * occur, if the defaults will be smaller than dq_pf_max_cid /
729 	 * dq_vf_max_cid.
730 	 */
731 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
732 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
733 
734 	STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
735 	STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
736 }
737 
738 static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
739 {
740 	struct qed_ilt_client_cfg *ilt_clients;
741 	int i;
742 
743 	ilt_clients = p_hwfn->p_cxt_mngr->clients;
744 	for_each_ilt_valid_client(i, ilt_clients) {
745 		if (!ilt_clients[i].active)
746 			continue;
747 		STORE_RT_REG(p_hwfn,
748 			     ilt_clients[i].first.reg,
749 			     ilt_clients[i].first.val);
750 		STORE_RT_REG(p_hwfn,
751 			     ilt_clients[i].last.reg,
752 			     ilt_clients[i].last.val);
753 		STORE_RT_REG(p_hwfn,
754 			     ilt_clients[i].p_size.reg,
755 			     ilt_clients[i].p_size.val);
756 	}
757 }
758 
759 static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
760 {
761 	struct qed_ilt_client_cfg *p_cli;
762 	u32 blk_factor;
763 
764 	/* For simplicty  we set the 'block' to be an ILT page */
765 	if (p_hwfn->cdev->p_iov_info) {
766 		struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
767 
768 		STORE_RT_REG(p_hwfn,
769 			     PSWRQ2_REG_VF_BASE_RT_OFFSET,
770 			     p_iov->first_vf_in_pf);
771 		STORE_RT_REG(p_hwfn,
772 			     PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
773 			     p_iov->first_vf_in_pf + p_iov->total_vfs);
774 	}
775 
776 	p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
777 	blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
778 	if (p_cli->active) {
779 		STORE_RT_REG(p_hwfn,
780 			     PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
781 			     blk_factor);
782 		STORE_RT_REG(p_hwfn,
783 			     PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
784 			     p_cli->pf_total_lines);
785 		STORE_RT_REG(p_hwfn,
786 			     PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
787 			     p_cli->vf_total_lines);
788 	}
789 }
790 
791 /* ILT (PSWRQ2) PF */
792 static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
793 {
794 	struct qed_ilt_client_cfg *clients;
795 	struct qed_cxt_mngr *p_mngr;
796 	struct qed_dma_mem *p_shdw;
797 	u32 line, rt_offst, i;
798 
799 	qed_ilt_bounds_init(p_hwfn);
800 	qed_ilt_vf_bounds_init(p_hwfn);
801 
802 	p_mngr = p_hwfn->p_cxt_mngr;
803 	p_shdw = p_mngr->ilt_shadow;
804 	clients = p_hwfn->p_cxt_mngr->clients;
805 
806 	for_each_ilt_valid_client(i, clients) {
807 		if (!clients[i].active)
808 			continue;
809 
810 		/** Client's 1st val and RT array are absolute, ILT shadows'
811 		 *  lines are relative.
812 		 */
813 		line = clients[i].first.val - p_mngr->pf_start_line;
814 		rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
815 			   clients[i].first.val * ILT_ENTRY_IN_REGS;
816 
817 		for (; line <= clients[i].last.val - p_mngr->pf_start_line;
818 		     line++, rt_offst += ILT_ENTRY_IN_REGS) {
819 			u64 ilt_hw_entry = 0;
820 
821 			/** p_virt could be NULL incase of dynamic
822 			 *  allocation
823 			 */
824 			if (p_shdw[line].p_virt) {
825 				SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
826 				SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
827 					  (p_shdw[line].p_phys >> 12));
828 
829 				DP_VERBOSE(p_hwfn, QED_MSG_ILT,
830 					   "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
831 					   rt_offst, line, i,
832 					   (u64)(p_shdw[line].p_phys >> 12));
833 			}
834 
835 			STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
836 		}
837 	}
838 }
839 
840 void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
841 {
842 	qed_cdu_init_common(p_hwfn);
843 }
844 
845 void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
846 {
847 	qed_qm_init_pf(p_hwfn);
848 	qed_cm_init_pf(p_hwfn);
849 	qed_dq_init_pf(p_hwfn);
850 	qed_ilt_init_pf(p_hwfn);
851 }
852 
853 int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
854 			enum protocol_type type,
855 			u32 *p_cid)
856 {
857 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
858 	u32 rel_cid;
859 
860 	if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
861 		DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
862 		return -EINVAL;
863 	}
864 
865 	rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
866 				      p_mngr->acquired[type].max_count);
867 
868 	if (rel_cid >= p_mngr->acquired[type].max_count) {
869 		DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
870 			  type);
871 		return -EINVAL;
872 	}
873 
874 	__set_bit(rel_cid, p_mngr->acquired[type].cid_map);
875 
876 	*p_cid = rel_cid + p_mngr->acquired[type].start_cid;
877 
878 	return 0;
879 }
880 
881 static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
882 				      u32 cid,
883 				      enum protocol_type *p_type)
884 {
885 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
886 	struct qed_cid_acquired_map *p_map;
887 	enum protocol_type p;
888 	u32 rel_cid;
889 
890 	/* Iterate over protocols and find matching cid range */
891 	for (p = 0; p < MAX_CONN_TYPES; p++) {
892 		p_map = &p_mngr->acquired[p];
893 
894 		if (!p_map->cid_map)
895 			continue;
896 		if (cid >= p_map->start_cid &&
897 		    cid < p_map->start_cid + p_map->max_count)
898 			break;
899 	}
900 	*p_type = p;
901 
902 	if (p == MAX_CONN_TYPES) {
903 		DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
904 		return false;
905 	}
906 
907 	rel_cid = cid - p_map->start_cid;
908 	if (!test_bit(rel_cid, p_map->cid_map)) {
909 		DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
910 		return false;
911 	}
912 	return true;
913 }
914 
915 void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
916 			 u32 cid)
917 {
918 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
919 	enum protocol_type type;
920 	bool b_acquired;
921 	u32 rel_cid;
922 
923 	/* Test acquired and find matching per-protocol map */
924 	b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
925 
926 	if (!b_acquired)
927 		return;
928 
929 	rel_cid = cid - p_mngr->acquired[type].start_cid;
930 	__clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
931 }
932 
933 int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
934 			 struct qed_cxt_info *p_info)
935 {
936 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
937 	u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
938 	enum protocol_type type;
939 	bool b_acquired;
940 
941 	/* Test acquired and find matching per-protocol map */
942 	b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
943 
944 	if (!b_acquired)
945 		return -EINVAL;
946 
947 	/* set the protocl type */
948 	p_info->type = type;
949 
950 	/* compute context virtual pointer */
951 	hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
952 
953 	conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
954 	cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
955 	line = p_info->iid / cxts_per_p;
956 
957 	/* Make sure context is allocated (dynamic allocation) */
958 	if (!p_mngr->ilt_shadow[line].p_virt)
959 		return -EINVAL;
960 
961 	p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
962 			p_info->iid % cxts_per_p * conn_cxt_size;
963 
964 	DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
965 		   "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
966 		   p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
967 
968 	return 0;
969 }
970 
971 int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
972 {
973 	struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params;
974 
975 	/* Set the number of required CORE connections */
976 	u32 core_cids = 1; /* SPQ */
977 
978 	qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
979 
980 	qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
981 				    p_params->num_cons, 1);
982 
983 	return 0;
984 }
985