xref: /titanic_44/usr/src/uts/common/io/chxge/com/mc5.c (revision d39a76e7b087a3d0927cbe6898dc0a6770fa6c68)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (C) 2003-2005 Chelsio Communications.  All rights reserved.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"	/* mc5.c */
27 
28 #include "common.h"
29 #include "regs.h"
30 #include "mc5.h"
31 
32 /* DBGI command mode */
33 enum {
34       	DBGI_MODE_MBUS,
35 	DBGI_MODE_LARA_7000,
36 	DBGI_MODE_LARA_8000,
37 	DBGI_MODE_NETL_4000,
38 	DBGI_MODE_NETL_5000,
39 	DBGI_MODE_IDT_52100
40 };
41 
42 /* Lara command register address and values (low 32 bits) */
43 #define MC5_LRA_CMDREG_ADR0		0x00180038
44 #define MC5_LRA_CMDREG_72KEY_DATA0	0x00000182
45 #define MC5_LRA_CMDREG_144KEY_DATA0	0x00AAAB82
46 
47 /* Lara config register address and values (low 32 bits) */
48 #define MC5_LRA_CFGREG_ADR0		0x0018003D
49 #define MC5_LRA_CFGREG_72KEY_DATA0	0x00000000
50 #define MC5_LRA_CFGREG_144KEY_DATA0	0x55555555
51 
52 /* Lara GMR base addresses (low 32 bits) */
53 #define MC5_LRA_GMRREG_BASE_ADR0_1	0x00180020
54 #define MC5_LRA_GMRREG_BASE_ADR0_2	0x00180060
55 
56 /* Lara 7000 data and mask array base addresses (low 32 bits) */
57 #define MC5_LRA_DATARY_BASE_ADR0	0x00000000
58 #define MC5_LRA_MSKARY_BASE_ADR0	0x00080000
59 
60 /* Lara commands */
61 #define MC5_LRA_CMD_READ		0x00000000
62 #define MC5_LRA_CMD_WRITE		0x00010001
63 #define MC5_LRA_CMD_SEARCH		0x00020002
64 #define MC5_LRA_CMD_LEARN		0x00030003
65 
66 /* IDT 75P52100 commands */
67 #define MC5_IDT_CMD_READ		0x0
68 #define MC5_IDT_CMD_WRITE		0x1
69 #define MC5_IDT_CMD_SEARCH		0x2
70 #define MC5_IDT_CMD_LEARN		0x3
71 #define MC5_IDT_CMD_NFA_SEARCH		0x4
72 
73 /* IDT LAR register address and value for 144-bit mode (low 32 bits) */
74 #define MC5_IDT_LAR_ADR0		0x180006
75 #define MC5_IDT_LAR_MODE144		0xffff0000
76 
77 /* IDT SCR and SSR addresses (low 32 bits) */
78 #define MC5_IDT_SCR_ADR0		0x180000
79 #define MC5_IDT_SSR0_ADR0		0x180002
80 #define MC5_IDT_SSR1_ADR0		0x180004
81 
82 /* IDT GMR base address (low 32 bits) */
83 #define MC5_IDT_GMR_BASE_ADR0		0x180020
84 
85 /* IDT data and mask array base addresses (low 32 bits) */
86 #define MC5_IDT_DATARY_BASE_ADR0	0x00000000
87 #define MC5_IDT_MSKARY_BASE_ADR0	0x00080000
88 
89 #define IDT_ELOOKUP_2Mb			0x7000
90 #define IDT_ELOOKUP_9Mb			0x16000
91 
92 enum {
93 	LARA_7000,
94 	LARA_8000,
95 	NETLOGIC_4000,
96 	NETLOGIC_5000,
97 	IDT75P52100
98 };
99 
100 static unsigned int tcam_part_size[] = {
101 	4718592, /* 4.5Mb */
102 	9437184, /* 9Mb */
103 	18874368 /* 18Mb */
104 };
105 
106 struct pemc5 {
107 	adapter_t *adapter;
108 	unsigned int tcam_size;
109 	unsigned int part_size;
110 	unsigned char part_type;
111 	unsigned char parity_enabled;
112 	unsigned char issue_syn;
113 	unsigned char mode;
114 	struct pemc5_intr_counts intr_counts;
115 #ifdef SUPPORT_MODE72
116 	u32 lip[MC5_LIP_NUM_OF_ENTRIES];
117 	unsigned int lip_index;
118 #endif
119 };
120 
121 #define MAX_WRITE_ATTEMPTS 5
122 
123 /*
124  * Issue a command to the TCAM and wait for its completion.  The address and
125  * any data required by the command must have been setup by the caller.
126  */
mc5_cmd_write(adapter_t * adapter,u32 cmd)127 static int mc5_cmd_write(adapter_t *adapter, u32 cmd)
128 {
129 	t1_write_reg_4(adapter, A_MC5_DBGI_REQ_CMD, cmd);
130 	return t1_wait_op_done(adapter, A_MC5_DBGI_RSP_STATUS,
131 		F_DBGI_RSP_VALID, 1, MAX_WRITE_ATTEMPTS, 1);
132 }
133 
134 
t1_mc5_get_tcam_size(struct pemc5 * mc5)135 unsigned int t1_mc5_get_tcam_size(struct pemc5 *mc5)
136 {
137 	return mc5->tcam_size;
138 }
139 
set_tcam_rtbl_base(struct pemc5 * mc5,unsigned int rtbl_base)140 static int set_tcam_rtbl_base(struct pemc5 *mc5, unsigned int rtbl_base)
141 {
142 	if (rtbl_base >= t1_mc5_get_tcam_size(mc5)) return -1;
143 	t1_write_reg_4(mc5->adapter, A_MC5_ROUTING_TABLE_INDEX, rtbl_base);
144 	return 0;
145 }
146 
t1_mc5_get_tcam_rtbl_base(struct pemc5 * mc5)147 unsigned int t1_mc5_get_tcam_rtbl_base(struct pemc5 *mc5)
148 {
149 	return t1_read_reg_4(mc5->adapter, A_MC5_ROUTING_TABLE_INDEX);
150 }
151 
t1_mc5_get_tcam_rtbl_size(struct pemc5 * mc5)152 unsigned int t1_mc5_get_tcam_rtbl_size(struct pemc5 *mc5)
153 {
154 	unsigned int tcam_size = t1_mc5_get_tcam_size(mc5);
155 	unsigned int tcam_rtable_base = t1_mc5_get_tcam_rtbl_base(mc5);
156 
157 	return tcam_size - tcam_rtable_base;
158 }
159 
set_tcam_server_base(struct pemc5 * mc5,unsigned int server_base)160 static int set_tcam_server_base(struct pemc5 *mc5, unsigned int server_base)
161 {
162 	if (server_base >= t1_mc5_get_tcam_size(mc5)) return -1;
163 	t1_write_reg_4(mc5->adapter, A_MC5_SERVER_INDEX, server_base);
164 	return 0;
165 }
166 
t1_mc5_get_tcam_server_base(struct pemc5 * mc5)167 unsigned int t1_mc5_get_tcam_server_base(struct pemc5 *mc5)
168 {
169 	return t1_read_reg_4(mc5->adapter, A_MC5_SERVER_INDEX);
170 }
171 
t1_mc5_get_tcam_server_size(struct pemc5 * mc5)172 unsigned int t1_mc5_get_tcam_server_size(struct pemc5 *mc5)
173 {
174 	unsigned int tcam_rtable_base = t1_mc5_get_tcam_rtbl_base(mc5);
175 	unsigned int tcam_server_base = t1_mc5_get_tcam_server_base(mc5);
176 
177 	return tcam_rtable_base - tcam_server_base;
178 }
179 
dbgi_wr_addr3(adapter_t * adapter,u32 v1,u32 v2,u32 v3)180 static inline void dbgi_wr_addr3(adapter_t *adapter, u32 v1, u32 v2, u32 v3)
181 {
182 	t1_write_reg_4(adapter, A_MC5_DBGI_REQ_ADDR0, v1);
183 	t1_write_reg_4(adapter, A_MC5_DBGI_REQ_ADDR1, v2);
184 	t1_write_reg_4(adapter, A_MC5_DBGI_REQ_ADDR2, v3);
185 }
186 
dbgi_wr_data3(adapter_t * adapter,u32 v1,u32 v2,u32 v3)187 static inline void dbgi_wr_data3(adapter_t *adapter, u32 v1, u32 v2, u32 v3)
188 {
189 	t1_write_reg_4(adapter, A_MC5_DBGI_REQ_DATA0, v1);
190 	t1_write_reg_4(adapter, A_MC5_DBGI_REQ_DATA1, v2);
191 	t1_write_reg_4(adapter, A_MC5_DBGI_REQ_DATA2, v3);
192 }
193 
dbgi_rd_rsp3(adapter_t * adapter,u32 * v1,u32 * v2,u32 * v3)194 static inline void dbgi_rd_rsp3(adapter_t *adapter, u32 *v1, u32 *v2, u32 *v3)
195 {
196 	*v1 = t1_read_reg_4(adapter, A_MC5_DBGI_RSP_DATA0);
197 	*v2 = t1_read_reg_4(adapter, A_MC5_DBGI_RSP_DATA1);
198 	*v3 = t1_read_reg_4(adapter, A_MC5_DBGI_RSP_DATA2);
199 }
200 
201 /*
202  * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
203  * command cmd.  The data to be written must have been set up by the caller.
204  * Returns -1 on failure, 0 on success.
205  */
mc5_write(adapter_t * adapter,u32 addr_lo,u32 cmd)206 static int mc5_write(adapter_t *adapter, u32 addr_lo, u32 cmd)
207 {
208 	t1_write_reg_4(adapter, A_MC5_DBGI_REQ_ADDR0, addr_lo);
209 	if (mc5_cmd_write(adapter, cmd) == 0)
210 		return 0;
211 	CH_ERR("%s: MC5 timeout writing to TCAM address 0x%x\n",
212 	       adapter_name(adapter), addr_lo);
213 	return -1;
214 }
215 
init_mask_data_array(struct pemc5 * mc5,u32 mask_array_base,u32 data_array_base,u32 write_cmd)216 static int init_mask_data_array(struct pemc5 *mc5, u32 mask_array_base,
217 				u32 data_array_base, u32 write_cmd)
218 {
219 	unsigned int i;
220 	adapter_t *adap = mc5->adapter;
221 
222 	/*
223 	 * We need the size of the TCAM data and mask arrays in terms of
224 	 * 72-bit entries.
225 	 */
226 	unsigned int size72 = tcam_part_size[mc5->part_size] / 72;
227 	unsigned int server_base = t1_mc5_get_tcam_server_base(mc5);
228 	if (mc5->mode == MC5_MODE_144_BIT)
229 		server_base *= 2;  /* 1 144-bit entry is 2 72-bit entries */
230 
231 	/* Clear the data array */
232 	dbgi_wr_data3(adap, 0, 0, 0);
233 	for (i = 0; i < size72; i++)
234 		if (mc5_write(adap, data_array_base + i, write_cmd))
235 			return -1;
236 
237 	/* Initialize the mask array. */
238 	dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
239 	for (i = 0; i < size72; i++) {
240 		if (i == server_base)   /* entering server or routing region */
241 			t1_write_reg_4(adap, A_MC5_DBGI_REQ_DATA0,
242 				       mc5->mode == MC5_MODE_144_BIT ?
243 				       0xfffffff9 : 0xfffffffd);
244 		if (mc5_write(adap, mask_array_base + i, write_cmd))
245 			return -1;
246 	}
247 	return 0;
248 }
249 
init_lara7000(struct pemc5 * mc5)250 static int init_lara7000(struct pemc5 *mc5)
251 {
252 	int i;
253 	adapter_t *adap = mc5->adapter;
254 
255 	t1_write_reg_4(adap, A_MC5_RSP_LATENCY,
256 		       t1_is_asic(adap) ? 0x0a0a0a0a : 0x09090909);
257 
258 	if (mc5->parity_enabled) {
259 		t1_write_reg_4(adap, A_MC5_AOPEN_SRCH_CMD, 0x20022);
260 		t1_write_reg_4(adap, A_MC5_SYN_SRCH_CMD, 0x20022);
261 		t1_write_reg_4(adap, A_MC5_ACK_SRCH_CMD, 0x20022);
262 	}
263 
264 	/* Set DBGI command mode for Lara TCAM. */
265 	t1_write_reg_4(adap, A_MC5_DBGI_CONFIG, DBGI_MODE_LARA_7000);
266 
267 	dbgi_wr_data3(adap, mc5->mode == MC5_MODE_144_BIT ?
268 		      MC5_LRA_CMDREG_144KEY_DATA0 : MC5_LRA_CMDREG_72KEY_DATA0,
269 		      0, 0);
270 	if (mc5_write(adap, MC5_LRA_CMDREG_ADR0, MC5_LRA_CMD_WRITE))
271 		goto err;
272 
273 	dbgi_wr_data3(adap, mc5->mode == MC5_MODE_144_BIT ?
274 		      MC5_LRA_CFGREG_144KEY_DATA0 : MC5_LRA_CFGREG_72KEY_DATA0,
275 		      0, 0);
276 	if (mc5_write(adap, MC5_LRA_CFGREG_ADR0, MC5_LRA_CMD_WRITE))
277 		goto err;
278 
279 	/* Global Mask Registers (GMR) 0-15 */
280 	for (i = 0; i < 16; i++) {
281 		if (i == 8 || i == 9)
282 			dbgi_wr_data3(adap, mc5->mode == MC5_MODE_72_BIT ?
283 				      0xfffffffd : 0xfffffff9, 0xffffffff,
284 				      0xff);
285 		else
286 			dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
287 
288 		if (mc5_write(adap, MC5_LRA_GMRREG_BASE_ADR0_1 + i,
289 			      MC5_LRA_CMD_WRITE))
290 			goto err;
291 	}
292 
293 	/* Global Mask Registers (GMR) 16-31 */
294 	for (i = 0; i < 16; i++) {
295 		if (i <= 1 && mc5->mode == MC5_MODE_72_BIT)
296 			dbgi_wr_data3(adap, 0xfffffffd, 0xffffc003, 0xff);
297 		else if (i == 0)
298 			dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
299 		else if (i == 1)
300 			dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
301 		else
302 			dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
303 
304 		if (mc5_write(adap, MC5_LRA_GMRREG_BASE_ADR0_2 + i,
305 			      MC5_LRA_CMD_WRITE))
306 			goto err;
307 	}
308 	return init_mask_data_array(mc5, MC5_LRA_MSKARY_BASE_ADR0,
309 				    MC5_LRA_DATARY_BASE_ADR0,
310 				    MC5_LRA_CMD_WRITE);
311  err:
312 	return -EIO;
313 }
314 
init_idt52100(struct pemc5 * mc5)315 static int init_idt52100(struct pemc5 *mc5)
316 {
317 	int i;
318 	adapter_t *adap = mc5->adapter;
319 
320 	t1_write_reg_4(adap, A_MC5_RSP_LATENCY, 0x151515);
321 	t1_write_reg_4(adap, A_MC5_PART_ID_INDEX, 2);
322 
323 	/*
324 	 * Use GMRs 8-9 for ACK and AOPEN searches, GMRs 12-13 for SYN search,
325 	 * and GMRs 14-15 for ELOOKUP.
326 	 */
327 	t1_write_reg_4(adap, A_MC5_POPEN_DATA_WR_CMD, MC5_IDT_CMD_WRITE);
328 	t1_write_reg_4(adap, A_MC5_POPEN_MASK_WR_CMD, MC5_IDT_CMD_WRITE);
329 	t1_write_reg_4(adap, A_MC5_AOPEN_SRCH_CMD, MC5_IDT_CMD_SEARCH);
330 	t1_write_reg_4(adap, A_MC5_AOPEN_LRN_CMD, MC5_IDT_CMD_LEARN);
331 	t1_write_reg_4(adap, A_MC5_SYN_SRCH_CMD, MC5_IDT_CMD_SEARCH | 0x6000);
332 	t1_write_reg_4(adap, A_MC5_SYN_LRN_CMD, MC5_IDT_CMD_LEARN);
333 	t1_write_reg_4(adap, A_MC5_ACK_SRCH_CMD, MC5_IDT_CMD_SEARCH);
334 	t1_write_reg_4(adap, A_MC5_ACK_LRN_CMD, MC5_IDT_CMD_LEARN);
335 	t1_write_reg_4(adap, A_MC5_ILOOKUP_CMD, MC5_IDT_CMD_SEARCH);
336 	t1_write_reg_4(adap, A_MC5_ELOOKUP_CMD, MC5_IDT_CMD_SEARCH | 0x7000);
337 	t1_write_reg_4(adap, A_MC5_DATA_WRITE_CMD, MC5_IDT_CMD_WRITE);
338 	t1_write_reg_4(adap, A_MC5_DATA_READ_CMD, MC5_IDT_CMD_READ);
339 
340 	/* Set DBGI command mode for IDT TCAM. */
341 	t1_write_reg_4(adap, A_MC5_DBGI_CONFIG, DBGI_MODE_IDT_52100);
342 
343 	/* Set up LAR */
344 	dbgi_wr_data3(adap, MC5_IDT_LAR_MODE144, 0, 0);
345 	if (mc5_write(adap, MC5_IDT_LAR_ADR0, MC5_IDT_CMD_WRITE))
346 		goto err;
347 
348 	/* Set up SSRs */
349 	dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0);
350 	if (mc5_write(adap, MC5_IDT_SSR0_ADR0, MC5_IDT_CMD_WRITE) ||
351 	    mc5_write(adap, MC5_IDT_SSR1_ADR0, MC5_IDT_CMD_WRITE))
352 		goto err;
353 
354 	/* Set up GMRs */
355 	for (i = 0; i < 32; ++i) {
356 		if (i >= 12 && i < 15)
357 			dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
358 		else if (i == 15)
359 			dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
360 		else
361 			dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
362 
363 		if (mc5_write(adap, MC5_IDT_GMR_BASE_ADR0 + i,
364 			      MC5_IDT_CMD_WRITE))
365 			goto err;
366 	}
367 
368 	/* Set up SCR */
369 	dbgi_wr_data3(adap, 1, 0, 0);
370 	if (mc5_write(adap, MC5_IDT_SCR_ADR0, MC5_IDT_CMD_WRITE))
371 		goto err;
372 
373 	return init_mask_data_array(mc5, MC5_IDT_MSKARY_BASE_ADR0,
374 				    MC5_IDT_DATARY_BASE_ADR0,
375 				    MC5_IDT_CMD_WRITE);
376  err:
377 	return -EIO;
378 }
379 
380 /* Put MC5 in DBGI mode. */
mc5_dbgi_mode_enable(struct pemc5 * mc5)381 static inline void mc5_dbgi_mode_enable(struct pemc5 *mc5)
382 {
383 	t1_write_reg_4(mc5->adapter, A_MC5_CONFIG,
384 		       V_MODE(mc5->mode == MC5_MODE_72_BIT) |
385 		       F_DBGI_ENABLE | V_NUM_LIP(MC5_LIP_NUM_OF_ENTRIES - 1));
386 }
387 
388 /* Put MC5 in M-Bus mode. */
mc5_dbgi_mode_disable(struct pemc5 * mc5)389 static void mc5_dbgi_mode_disable(struct pemc5 *mc5)
390 {
391 	t1_write_reg_4(mc5->adapter, A_MC5_CONFIG,
392 		       V_MODE(mc5->mode == MC5_MODE_72_BIT) |
393 		       V_COMPRESSION_ENABLE(mc5->mode == MC5_MODE_72_BIT) |
394 		       V_PARITY_ENABLE(mc5->parity_enabled) |
395 		       V_SYN_ISSUE_MODE(mc5->issue_syn) | F_M_BUS_ENABLE |
396 		       V_NUM_LIP(MC5_LIP_NUM_OF_ENTRIES - 1));
397 }
398 
399 /*
400  * Initialization that requires the OS and protocol layers to already
401  * be intialized goes here.
402  */
t1_mc5_init(struct pemc5 * mc5,unsigned int nservers,unsigned int nroutes,int parity,int syn)403 int t1_mc5_init(struct pemc5 *mc5, unsigned int nservers,
404 		unsigned int nroutes, int parity, int syn)
405 {
406 	u32 cfg;
407 	int err = 0;
408 	unsigned int tcam_size = t1_mc5_get_tcam_size(mc5);
409 	adapter_t *adap = mc5->adapter;
410 
411 	/* Reset the TCAM */
412 	cfg = t1_read_reg_4(adap, A_MC5_CONFIG) & ~F_MODE;
413 	cfg |= V_MODE(mc5->mode == MC5_MODE_72_BIT) | F_TCAM_RESET;
414 	t1_write_reg_4(adap, A_MC5_CONFIG, cfg);
415 	if (t1_wait_op_done(adap, A_MC5_CONFIG, F_TCAM_READY, 1, 500, 0)) {
416 		CH_ERR("%s: TCAM reset timed out\n", adapter_name(adap));
417 		return -1;
418 	}
419 
420 	if (set_tcam_rtbl_base(mc5, tcam_size - nroutes) ||
421 	    set_tcam_server_base(mc5, tcam_size - nroutes - nservers))
422 		return -EINVAL;
423 
424 #ifdef SUPPORT_MODE72
425 	if (mc5->mode == MC5_MODE_72_BIT)
426 		t1_mc5_lip_write_entries(mc5);
427 #endif
428 	mc5->issue_syn = (unsigned char)syn;
429 	mc5->parity_enabled = (unsigned char)parity;
430 
431 	/* All the TCAM addresses we access have only the low 32 bits non 0 */
432 	t1_write_reg_4(adap, A_MC5_DBGI_REQ_ADDR1, 0);
433 	t1_write_reg_4(adap, A_MC5_DBGI_REQ_ADDR2, 0);
434 
435 	mc5_dbgi_mode_enable(mc5);
436 
437 	switch (mc5->part_type) {
438 	case LARA_7000:
439 		err = init_lara7000(mc5);
440 		break;
441 	case IDT75P52100:
442 		err = init_idt52100(mc5);
443 		break;
444 	default:
445 		CH_ERR("%s: unsupported TCAM type\n", adapter_name(adap));
446 		err = -EINVAL;
447 		break;
448 	}
449 
450 	mc5_dbgi_mode_disable(mc5);
451 	return err;
452 }
453 
454 /*
455  *	read_mc5_range - dump a part of the memory managed by MC5
456  *	@mc5: the MC5 handle
457  *	@start: the start address for the dump
458  *	@n: number of 72-bit words to read
459  *	@buf: result buffer
460  *
461  *	Read n 72-bit words from MC5 memory from the given start location.
462  */
t1_read_mc5_range(struct pemc5 * mc5,unsigned int start,unsigned int n,u32 * buf)463 int t1_read_mc5_range(struct pemc5 *mc5, unsigned int start,
464 		      unsigned int n, u32 *buf)
465 {
466 	u32 read_cmd;
467 	/* int err = 0; */
468 	adapter_t *adap = mc5->adapter;
469 
470 	if (mc5->part_type == LARA_7000)
471 		read_cmd = MC5_LRA_CMD_READ;
472 	else if (mc5->part_type == IDT75P52100)
473 		read_cmd = MC5_IDT_CMD_READ;
474 	else
475 		return -EINVAL;
476 
477 	mc5_dbgi_mode_enable(mc5);
478 
479 	while (n--) {
480 		t1_write_reg_4(adap, A_MC5_DBGI_REQ_ADDR0, start++);
481 		if (mc5_cmd_write(adap, read_cmd)) {
482 			/* err = -EIO; */
483 			break;
484 		}
485 		dbgi_rd_rsp3(adap, buf + 2, buf + 1, buf);
486 		buf += 3;
487 	}
488 
489 	mc5_dbgi_mode_disable(mc5);
490 	return 0;
491 }
492 
493 #define MC5_INT_MASK (F_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR | \
494 	F_MC5_INT_HIT_IN_RT_REGION_ERR | F_MC5_INT_LIP0_ERR | \
495 	F_MC5_INT_LIP_MISS_ERR | F_MC5_INT_PARITY_ERR | \
496 	F_MC5_INT_ACTIVE_REGION_FULL | F_MC5_INT_NFA_SRCH_ERR | \
497 	F_MC5_INT_UNKNOWN_CMD | F_MC5_INT_DEL_ACT_EMPTY)
498 #define MC5_INT_FATAL (F_MC5_INT_PARITY_ERR | F_MC5_INT_REQUESTQ_PARITY_ERR | \
499 	F_MC5_INT_DISPATCHQ_PARITY_ERR)
500 
t1_mc5_intr_enable(struct pemc5 * mc5)501 void t1_mc5_intr_enable(struct pemc5 *mc5)
502 {
503 	u32 mask = MC5_INT_MASK;
504 
505 	if (!mc5->parity_enabled)
506 		mask &= ~F_MC5_INT_PARITY_ERR;
507 
508 #ifdef CONFIG_CHELSIO_T1_1G
509 	if (!t1_is_asic(mc5->adapter)) {
510 		/*
511 		 * Enable child block for MC5.
512 		 *
513 		 * NOTE: Assumes TP parent interrupt block is enabled.
514 		 *       MC5 requires TP parent block to be enabled.
515 		 */
516 		t1_write_reg_4(mc5->adapter, A_MC5_INT_ENABLE, mask);
517 	} else
518 #endif
519 	{
520 		u32 pl_intr = t1_read_reg_4(mc5->adapter, A_PL_ENABLE);
521 
522 		t1_write_reg_4(mc5->adapter, A_PL_ENABLE,
523 			       pl_intr | F_PL_INTR_MC5);
524 		t1_write_reg_4(mc5->adapter, A_MC5_INT_ENABLE,
525 			       mask | F_MC5_INT_REQUESTQ_PARITY_ERR |
526 			       F_MC5_INT_DISPATCHQ_PARITY_ERR);
527 	}
528 }
529 
t1_mc5_intr_disable(struct pemc5 * mc5)530 void t1_mc5_intr_disable(struct pemc5 *mc5)
531 {
532 #ifdef CONFIG_CHELSIO_T1_1G
533 	if (!t1_is_asic(mc5->adapter))
534 		t1_write_reg_4(mc5->adapter, A_MC5_INT_ENABLE, 0);
535 	else
536 #endif
537 	{
538 		u32 pl_intr = t1_read_reg_4(mc5->adapter, A_PL_ENABLE);
539 
540 		t1_write_reg_4(mc5->adapter, A_PL_ENABLE,
541 			       pl_intr & ~F_PL_INTR_MC5);
542 		t1_write_reg_4(mc5->adapter, A_MC5_INT_ENABLE, 0);
543 	}
544 }
545 
t1_mc5_intr_clear(struct pemc5 * mc5)546 void t1_mc5_intr_clear(struct pemc5 *mc5)
547 {
548 #ifdef CONFIG_CHELSIO_T1_1G
549 	if (!t1_is_asic(mc5->adapter)) {
550 		t1_write_reg_4(mc5->adapter, A_MC5_INT_CAUSE, 0xffffffff);
551 	} else
552 #endif
553 	{
554 		t1_write_reg_4(mc5->adapter, A_PL_CAUSE, F_PL_INTR_MC5);
555 		t1_write_reg_4(mc5->adapter, A_MC5_INT_CAUSE, 0xffffffff);
556 	}
557 }
558 
559 /*
560  * We don't really do anything with MC5 interrupts, just record them.
561  */
t1_mc5_intr_handler(struct pemc5 * mc5)562 void t1_mc5_intr_handler(struct pemc5 *mc5)
563 {
564 	adapter_t *adap = mc5->adapter;
565 	u32 cause = t1_read_reg_4(adap, A_MC5_INT_CAUSE);
566 
567 	if (cause & F_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR)
568 		mc5->intr_counts.hit_out_active_region_err++;
569 
570 	if (cause & F_MC5_INT_HIT_IN_ACTIVE_REGION_ERR)
571 		mc5->intr_counts.hit_in_active_region_err++;
572 
573 	if (cause & F_MC5_INT_HIT_IN_RT_REGION_ERR)
574 		mc5->intr_counts.hit_in_routing_region_err++;
575 
576 	if (cause & F_MC5_INT_MISS_ERR)
577 		mc5->intr_counts.miss_err++;
578 
579 	if (cause & F_MC5_INT_LIP0_ERR)
580 		mc5->intr_counts.lip_equal_zero_err++;
581 
582 	if (cause & F_MC5_INT_LIP_MISS_ERR)
583 		mc5->intr_counts.lip_miss_err++;
584 
585 	if ((cause & F_MC5_INT_PARITY_ERR) && mc5->parity_enabled) {
586 		CH_ALERT("%s: MC5 parity error\n", adapter_name(adap));
587 		mc5->intr_counts.parity_err++;
588 	}
589 
590 	if (cause & F_MC5_INT_ACTIVE_REGION_FULL)
591 		mc5->intr_counts.active_region_full_err++;
592 
593 	if (cause & F_MC5_INT_NFA_SRCH_ERR)
594 		mc5->intr_counts.next_free_addr_srch_err++;
595 
596 	if (cause & F_MC5_INT_SYN_COOKIE)
597 		mc5->intr_counts.syn_cookie++;
598 
599 	if (cause & F_MC5_INT_SYN_COOKIE_BAD)
600 		mc5->intr_counts.syn_cookie_bad_message++;
601 
602 	if (cause & F_MC5_INT_SYN_COOKIE_OFF)
603 		mc5->intr_counts.syn_cookie_off_message++;
604 
605 	if (cause & F_MC5_INT_UNKNOWN_CMD)
606 		mc5->intr_counts.receive_unknown_cmd++;
607 
608 	if (cause & F_MC5_INT_REQUESTQ_PARITY_ERR) {
609 		CH_ALERT("%s: MC5 request queue parity error\n",
610 			 adapter_name(adap));
611 		mc5->intr_counts.parity_in_request_q_err++;
612 	}
613 
614 	if (cause & F_MC5_INT_DISPATCHQ_PARITY_ERR) {
615 		CH_ALERT("%s: MC5 dispatch queue parity error\n",
616 			 adapter_name(adap));
617 		mc5->intr_counts.parity_in_dispatch_q_err++;
618 	}
619 
620 	if (cause & F_MC5_INT_DEL_ACT_EMPTY)
621 		mc5->intr_counts.del_and_act_is_empty++;
622 
623 	if (cause & MC5_INT_FATAL)
624 		t1_fatal_err(adap);
625 
626 	t1_write_reg_4(adap, A_MC5_INT_CAUSE, cause);
627 }
628 
t1_mc5_get_intr_counts(struct pemc5 * mc5)629 const struct pemc5_intr_counts *t1_mc5_get_intr_counts(struct pemc5 *mc5)
630 {
631 	return &mc5->intr_counts;
632 }
633 
t1_mc5_create(adapter_t * adapter,int mode)634 struct pemc5 * __devinit t1_mc5_create(adapter_t *adapter, int mode)
635 {
636 	struct pemc5 *mc5;
637 	u32 cfg, bits_per_entry;
638 
639 	if (mode != MC5_MODE_144_BIT && mode != MC5_MODE_72_BIT)
640 		return NULL;
641 
642 	mc5 = t1_os_malloc_wait_zero(sizeof(*mc5));
643 	if (!mc5) return NULL;
644 
645 	mc5->adapter = adapter;
646 	mc5->mode = (unsigned char) mode;
647 
648 	cfg = t1_read_reg_4(adapter, A_MC5_CONFIG);
649 	mc5->part_size = G_TCAM_PART_SIZE(cfg);
650 	mc5->part_type = (unsigned char) G_TCAM_PART_TYPE(cfg);
651 	if (cfg & F_TCAM_PART_TYPE_HI)
652 		mc5->part_type |= 4;
653 
654 	/*
655 	 * Calculate the size of the TCAM based on the total memory, mode, and
656 	 * count information retrieved from the hardware.
657 	 */
658 	bits_per_entry = mode == MC5_MODE_144_BIT ? 144 : 72;
659 	mc5->tcam_size = tcam_part_size[mc5->part_size] / bits_per_entry;
660 
661 	return mc5;
662 }
663 
t1_mc5_destroy(struct pemc5 * mc5)664 void t1_mc5_destroy(struct pemc5 *mc5)
665 {
666 	t1_os_free((void *)mc5, sizeof(*mc5));
667 }
668 
669 #ifdef SUPPORT_MODE72
mc5_cmp(const void * pi,const void * pj)670 static int mc5_cmp(const void *pi, const void *pj)
671 {
672 	const u32 *pii = (const u32 *)pi;
673 	const u32 *pjj = (const u32 *)pj;
674 
675 	if (*pii < *pjj)
676 		return -1;
677 
678 	return *pii > *pjj;
679 }
680 
681 /*
682  * DESC: Write local IP addresses to the TCAM
683  *
684  * NOTES: IP addresses should be in host byte order. So, an IP address:
685  *        of 10.0.0.140 == (data = 0x0A00008C)
686  */
mc5_set_lip_entries(struct pemc5 * mc5,u32 * p,int num_of_lip_addresses)687 static int mc5_set_lip_entries(struct pemc5 *mc5, u32 *p,
688 			       int num_of_lip_addresses)
689 {
690 	int i;
691 
692 	/*
693 	 * Disable compression and M bus mode so that the TP core
694 	 * doesn't access the TCAM  while we are writing.
695 	 */
696 	u32 cfg = t1_read_reg_4(mc5->adapter, A_MC5_CONFIG);
697 	t1_write_reg_4(mc5->adapter, A_MC5_CONFIG,
698 		       cfg & ~(F_M_BUS_ENABLE | F_COMPRESSION_ENABLE));
699 
700 	/* MC5 should now be ready to program the LIP addresses. */
701 	for (i = 0; i < num_of_lip_addresses; i++) {
702 		t1_write_reg_4(mc5->adapter, A_MC5_LIP_RAM_DATA, p[i]);
703 		t1_write_reg_4(mc5->adapter, A_MC5_LIP_RAM_ADDR, 0x100 + i);
704 	}
705 
706 	/* Restore MC5 mode. */
707 	t1_write_reg_4(mc5->adapter, A_MC5_CONFIG, cfg | F_COMPRESSION_ENABLE);
708 	return 0;
709 }
710 
711 /*
712  * The purpose of this routine is to write all of the local IP addresses
713  * into the TCAM in sorted order. This is a requirement from the TCAM.
714  */
t1_mc5_lip_write_entries(struct pemc5 * mc5)715 void t1_mc5_lip_write_entries(struct pemc5 *mc5)
716 {
717 	u32 filler = 0;
718 	int i;
719 
720 	if (mc5->lip_index) {
721 		qsort(mc5->lip, mc5->lip_index, sizeof(u32), mc5_cmp);
722 		filler = mc5->lip[mc5->lip_index - 1];
723 	}
724 	for (i = mc5->lip_index; i < MC5_LIP_NUM_OF_ENTRIES; i++)
725 		mc5->lip[i] = filler;
726 	mc5_set_lip_entries(mc5, mc5->lip, MC5_LIP_NUM_OF_ENTRIES);
727 }
728 
t1_mc5_lip_clear_entries(struct pemc5 * mc5)729 void t1_mc5_lip_clear_entries(struct pemc5 *mc5)
730 {
731 	mc5->lip_index = 0;
732 }
733 
734 /*
735  * Add a local IP address to the LIP table.
736  */
t1_mc5_lip_add_entry(struct pemc5 * mc5,u32 lip)737 int t1_mc5_lip_add_entry(struct pemc5 *mc5, u32 lip)
738 {
739 	if (mc5->lip_index >= MC5_LIP_NUM_OF_ENTRIES) return 1;
740 	mc5->lip[mc5->lip_index++] = lip;
741 	return 0;
742 }
743 #endif
744