1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (C) 2003-2005 Chelsio Communications. All rights reserved.
24 */
25
26 #include "common.h"
27 #include "regs.h"
28 #include "mc5.h"
29
30 /* DBGI command mode */
31 enum {
32 DBGI_MODE_MBUS,
33 DBGI_MODE_LARA_7000,
34 DBGI_MODE_LARA_8000,
35 DBGI_MODE_NETL_4000,
36 DBGI_MODE_NETL_5000,
37 DBGI_MODE_IDT_52100
38 };
39
40 /* Lara command register address and values (low 32 bits) */
41 #define MC5_LRA_CMDREG_ADR0 0x00180038
42 #define MC5_LRA_CMDREG_72KEY_DATA0 0x00000182
43 #define MC5_LRA_CMDREG_144KEY_DATA0 0x00AAAB82
44
45 /* Lara config register address and values (low 32 bits) */
46 #define MC5_LRA_CFGREG_ADR0 0x0018003D
47 #define MC5_LRA_CFGREG_72KEY_DATA0 0x00000000
48 #define MC5_LRA_CFGREG_144KEY_DATA0 0x55555555
49
50 /* Lara GMR base addresses (low 32 bits) */
51 #define MC5_LRA_GMRREG_BASE_ADR0_1 0x00180020
52 #define MC5_LRA_GMRREG_BASE_ADR0_2 0x00180060
53
54 /* Lara 7000 data and mask array base addresses (low 32 bits) */
55 #define MC5_LRA_DATARY_BASE_ADR0 0x00000000
56 #define MC5_LRA_MSKARY_BASE_ADR0 0x00080000
57
58 /* Lara commands */
59 #define MC5_LRA_CMD_READ 0x00000000
60 #define MC5_LRA_CMD_WRITE 0x00010001
61 #define MC5_LRA_CMD_SEARCH 0x00020002
62 #define MC5_LRA_CMD_LEARN 0x00030003
63
64 /* IDT 75P52100 commands */
65 #define MC5_IDT_CMD_READ 0x0
66 #define MC5_IDT_CMD_WRITE 0x1
67 #define MC5_IDT_CMD_SEARCH 0x2
68 #define MC5_IDT_CMD_LEARN 0x3
69 #define MC5_IDT_CMD_NFA_SEARCH 0x4
70
71 /* IDT LAR register address and value for 144-bit mode (low 32 bits) */
72 #define MC5_IDT_LAR_ADR0 0x180006
73 #define MC5_IDT_LAR_MODE144 0xffff0000
74
75 /* IDT SCR and SSR addresses (low 32 bits) */
76 #define MC5_IDT_SCR_ADR0 0x180000
77 #define MC5_IDT_SSR0_ADR0 0x180002
78 #define MC5_IDT_SSR1_ADR0 0x180004
79
80 /* IDT GMR base address (low 32 bits) */
81 #define MC5_IDT_GMR_BASE_ADR0 0x180020
82
83 /* IDT data and mask array base addresses (low 32 bits) */
84 #define MC5_IDT_DATARY_BASE_ADR0 0x00000000
85 #define MC5_IDT_MSKARY_BASE_ADR0 0x00080000
86
87 #define IDT_ELOOKUP_2Mb 0x7000
88 #define IDT_ELOOKUP_9Mb 0x16000
89
90 enum {
91 LARA_7000,
92 LARA_8000,
93 NETLOGIC_4000,
94 NETLOGIC_5000,
95 IDT75P52100
96 };
97
98 static unsigned int tcam_part_size[] = {
99 4718592, /* 4.5Mb */
100 9437184, /* 9Mb */
101 18874368 /* 18Mb */
102 };
103
104 struct pemc5 {
105 adapter_t *adapter;
106 unsigned int tcam_size;
107 unsigned int part_size;
108 unsigned char part_type;
109 unsigned char parity_enabled;
110 unsigned char issue_syn;
111 unsigned char mode;
112 struct pemc5_intr_counts intr_counts;
113 #ifdef SUPPORT_MODE72
114 u32 lip[MC5_LIP_NUM_OF_ENTRIES];
115 unsigned int lip_index;
116 #endif
117 };
118
119 #define MAX_WRITE_ATTEMPTS 5
120
121 /*
122 * Issue a command to the TCAM and wait for its completion. The address and
123 * any data required by the command must have been setup by the caller.
124 */
mc5_cmd_write(adapter_t * adapter,u32 cmd)125 static int mc5_cmd_write(adapter_t *adapter, u32 cmd)
126 {
127 t1_write_reg_4(adapter, A_MC5_DBGI_REQ_CMD, cmd);
128 return t1_wait_op_done(adapter, A_MC5_DBGI_RSP_STATUS,
129 F_DBGI_RSP_VALID, 1, MAX_WRITE_ATTEMPTS, 1);
130 }
131
132
t1_mc5_get_tcam_size(struct pemc5 * mc5)133 unsigned int t1_mc5_get_tcam_size(struct pemc5 *mc5)
134 {
135 return mc5->tcam_size;
136 }
137
set_tcam_rtbl_base(struct pemc5 * mc5,unsigned int rtbl_base)138 static int set_tcam_rtbl_base(struct pemc5 *mc5, unsigned int rtbl_base)
139 {
140 if (rtbl_base >= t1_mc5_get_tcam_size(mc5)) return -1;
141 t1_write_reg_4(mc5->adapter, A_MC5_ROUTING_TABLE_INDEX, rtbl_base);
142 return 0;
143 }
144
t1_mc5_get_tcam_rtbl_base(struct pemc5 * mc5)145 unsigned int t1_mc5_get_tcam_rtbl_base(struct pemc5 *mc5)
146 {
147 return t1_read_reg_4(mc5->adapter, A_MC5_ROUTING_TABLE_INDEX);
148 }
149
t1_mc5_get_tcam_rtbl_size(struct pemc5 * mc5)150 unsigned int t1_mc5_get_tcam_rtbl_size(struct pemc5 *mc5)
151 {
152 unsigned int tcam_size = t1_mc5_get_tcam_size(mc5);
153 unsigned int tcam_rtable_base = t1_mc5_get_tcam_rtbl_base(mc5);
154
155 return tcam_size - tcam_rtable_base;
156 }
157
set_tcam_server_base(struct pemc5 * mc5,unsigned int server_base)158 static int set_tcam_server_base(struct pemc5 *mc5, unsigned int server_base)
159 {
160 if (server_base >= t1_mc5_get_tcam_size(mc5)) return -1;
161 t1_write_reg_4(mc5->adapter, A_MC5_SERVER_INDEX, server_base);
162 return 0;
163 }
164
t1_mc5_get_tcam_server_base(struct pemc5 * mc5)165 unsigned int t1_mc5_get_tcam_server_base(struct pemc5 *mc5)
166 {
167 return t1_read_reg_4(mc5->adapter, A_MC5_SERVER_INDEX);
168 }
169
t1_mc5_get_tcam_server_size(struct pemc5 * mc5)170 unsigned int t1_mc5_get_tcam_server_size(struct pemc5 *mc5)
171 {
172 unsigned int tcam_rtable_base = t1_mc5_get_tcam_rtbl_base(mc5);
173 unsigned int tcam_server_base = t1_mc5_get_tcam_server_base(mc5);
174
175 return tcam_rtable_base - tcam_server_base;
176 }
177
dbgi_wr_addr3(adapter_t * adapter,u32 v1,u32 v2,u32 v3)178 static inline void dbgi_wr_addr3(adapter_t *adapter, u32 v1, u32 v2, u32 v3)
179 {
180 t1_write_reg_4(adapter, A_MC5_DBGI_REQ_ADDR0, v1);
181 t1_write_reg_4(adapter, A_MC5_DBGI_REQ_ADDR1, v2);
182 t1_write_reg_4(adapter, A_MC5_DBGI_REQ_ADDR2, v3);
183 }
184
dbgi_wr_data3(adapter_t * adapter,u32 v1,u32 v2,u32 v3)185 static inline void dbgi_wr_data3(adapter_t *adapter, u32 v1, u32 v2, u32 v3)
186 {
187 t1_write_reg_4(adapter, A_MC5_DBGI_REQ_DATA0, v1);
188 t1_write_reg_4(adapter, A_MC5_DBGI_REQ_DATA1, v2);
189 t1_write_reg_4(adapter, A_MC5_DBGI_REQ_DATA2, v3);
190 }
191
dbgi_rd_rsp3(adapter_t * adapter,u32 * v1,u32 * v2,u32 * v3)192 static inline void dbgi_rd_rsp3(adapter_t *adapter, u32 *v1, u32 *v2, u32 *v3)
193 {
194 *v1 = t1_read_reg_4(adapter, A_MC5_DBGI_RSP_DATA0);
195 *v2 = t1_read_reg_4(adapter, A_MC5_DBGI_RSP_DATA1);
196 *v3 = t1_read_reg_4(adapter, A_MC5_DBGI_RSP_DATA2);
197 }
198
199 /*
200 * Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
201 * command cmd. The data to be written must have been set up by the caller.
202 * Returns -1 on failure, 0 on success.
203 */
mc5_write(adapter_t * adapter,u32 addr_lo,u32 cmd)204 static int mc5_write(adapter_t *adapter, u32 addr_lo, u32 cmd)
205 {
206 t1_write_reg_4(adapter, A_MC5_DBGI_REQ_ADDR0, addr_lo);
207 if (mc5_cmd_write(adapter, cmd) == 0)
208 return 0;
209 CH_ERR("%s: MC5 timeout writing to TCAM address 0x%x\n",
210 adapter_name(adapter), addr_lo);
211 return -1;
212 }
213
init_mask_data_array(struct pemc5 * mc5,u32 mask_array_base,u32 data_array_base,u32 write_cmd)214 static int init_mask_data_array(struct pemc5 *mc5, u32 mask_array_base,
215 u32 data_array_base, u32 write_cmd)
216 {
217 unsigned int i;
218 adapter_t *adap = mc5->adapter;
219
220 /*
221 * We need the size of the TCAM data and mask arrays in terms of
222 * 72-bit entries.
223 */
224 unsigned int size72 = tcam_part_size[mc5->part_size] / 72;
225 unsigned int server_base = t1_mc5_get_tcam_server_base(mc5);
226 if (mc5->mode == MC5_MODE_144_BIT)
227 server_base *= 2; /* 1 144-bit entry is 2 72-bit entries */
228
229 /* Clear the data array */
230 dbgi_wr_data3(adap, 0, 0, 0);
231 for (i = 0; i < size72; i++)
232 if (mc5_write(adap, data_array_base + i, write_cmd))
233 return -1;
234
235 /* Initialize the mask array. */
236 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
237 for (i = 0; i < size72; i++) {
238 if (i == server_base) /* entering server or routing region */
239 t1_write_reg_4(adap, A_MC5_DBGI_REQ_DATA0,
240 mc5->mode == MC5_MODE_144_BIT ?
241 0xfffffff9 : 0xfffffffd);
242 if (mc5_write(adap, mask_array_base + i, write_cmd))
243 return -1;
244 }
245 return 0;
246 }
247
init_lara7000(struct pemc5 * mc5)248 static int init_lara7000(struct pemc5 *mc5)
249 {
250 int i;
251 adapter_t *adap = mc5->adapter;
252
253 t1_write_reg_4(adap, A_MC5_RSP_LATENCY,
254 t1_is_asic(adap) ? 0x0a0a0a0a : 0x09090909);
255
256 if (mc5->parity_enabled) {
257 t1_write_reg_4(adap, A_MC5_AOPEN_SRCH_CMD, 0x20022);
258 t1_write_reg_4(adap, A_MC5_SYN_SRCH_CMD, 0x20022);
259 t1_write_reg_4(adap, A_MC5_ACK_SRCH_CMD, 0x20022);
260 }
261
262 /* Set DBGI command mode for Lara TCAM. */
263 t1_write_reg_4(adap, A_MC5_DBGI_CONFIG, DBGI_MODE_LARA_7000);
264
265 dbgi_wr_data3(adap, mc5->mode == MC5_MODE_144_BIT ?
266 MC5_LRA_CMDREG_144KEY_DATA0 : MC5_LRA_CMDREG_72KEY_DATA0,
267 0, 0);
268 if (mc5_write(adap, MC5_LRA_CMDREG_ADR0, MC5_LRA_CMD_WRITE))
269 goto err;
270
271 dbgi_wr_data3(adap, mc5->mode == MC5_MODE_144_BIT ?
272 MC5_LRA_CFGREG_144KEY_DATA0 : MC5_LRA_CFGREG_72KEY_DATA0,
273 0, 0);
274 if (mc5_write(adap, MC5_LRA_CFGREG_ADR0, MC5_LRA_CMD_WRITE))
275 goto err;
276
277 /* Global Mask Registers (GMR) 0-15 */
278 for (i = 0; i < 16; i++) {
279 if (i == 8 || i == 9)
280 dbgi_wr_data3(adap, mc5->mode == MC5_MODE_72_BIT ?
281 0xfffffffd : 0xfffffff9, 0xffffffff,
282 0xff);
283 else
284 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
285
286 if (mc5_write(adap, MC5_LRA_GMRREG_BASE_ADR0_1 + i,
287 MC5_LRA_CMD_WRITE))
288 goto err;
289 }
290
291 /* Global Mask Registers (GMR) 16-31 */
292 for (i = 0; i < 16; i++) {
293 if (i <= 1 && mc5->mode == MC5_MODE_72_BIT)
294 dbgi_wr_data3(adap, 0xfffffffd, 0xffffc003, 0xff);
295 else if (i == 0)
296 dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
297 else if (i == 1)
298 dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
299 else
300 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
301
302 if (mc5_write(adap, MC5_LRA_GMRREG_BASE_ADR0_2 + i,
303 MC5_LRA_CMD_WRITE))
304 goto err;
305 }
306 return init_mask_data_array(mc5, MC5_LRA_MSKARY_BASE_ADR0,
307 MC5_LRA_DATARY_BASE_ADR0,
308 MC5_LRA_CMD_WRITE);
309 err:
310 return -EIO;
311 }
312
init_idt52100(struct pemc5 * mc5)313 static int init_idt52100(struct pemc5 *mc5)
314 {
315 int i;
316 adapter_t *adap = mc5->adapter;
317
318 t1_write_reg_4(adap, A_MC5_RSP_LATENCY, 0x151515);
319 t1_write_reg_4(adap, A_MC5_PART_ID_INDEX, 2);
320
321 /*
322 * Use GMRs 8-9 for ACK and AOPEN searches, GMRs 12-13 for SYN search,
323 * and GMRs 14-15 for ELOOKUP.
324 */
325 t1_write_reg_4(adap, A_MC5_POPEN_DATA_WR_CMD, MC5_IDT_CMD_WRITE);
326 t1_write_reg_4(adap, A_MC5_POPEN_MASK_WR_CMD, MC5_IDT_CMD_WRITE);
327 t1_write_reg_4(adap, A_MC5_AOPEN_SRCH_CMD, MC5_IDT_CMD_SEARCH);
328 t1_write_reg_4(adap, A_MC5_AOPEN_LRN_CMD, MC5_IDT_CMD_LEARN);
329 t1_write_reg_4(adap, A_MC5_SYN_SRCH_CMD, MC5_IDT_CMD_SEARCH | 0x6000);
330 t1_write_reg_4(adap, A_MC5_SYN_LRN_CMD, MC5_IDT_CMD_LEARN);
331 t1_write_reg_4(adap, A_MC5_ACK_SRCH_CMD, MC5_IDT_CMD_SEARCH);
332 t1_write_reg_4(adap, A_MC5_ACK_LRN_CMD, MC5_IDT_CMD_LEARN);
333 t1_write_reg_4(adap, A_MC5_ILOOKUP_CMD, MC5_IDT_CMD_SEARCH);
334 t1_write_reg_4(adap, A_MC5_ELOOKUP_CMD, MC5_IDT_CMD_SEARCH | 0x7000);
335 t1_write_reg_4(adap, A_MC5_DATA_WRITE_CMD, MC5_IDT_CMD_WRITE);
336 t1_write_reg_4(adap, A_MC5_DATA_READ_CMD, MC5_IDT_CMD_READ);
337
338 /* Set DBGI command mode for IDT TCAM. */
339 t1_write_reg_4(adap, A_MC5_DBGI_CONFIG, DBGI_MODE_IDT_52100);
340
341 /* Set up LAR */
342 dbgi_wr_data3(adap, MC5_IDT_LAR_MODE144, 0, 0);
343 if (mc5_write(adap, MC5_IDT_LAR_ADR0, MC5_IDT_CMD_WRITE))
344 goto err;
345
346 /* Set up SSRs */
347 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0);
348 if (mc5_write(adap, MC5_IDT_SSR0_ADR0, MC5_IDT_CMD_WRITE) ||
349 mc5_write(adap, MC5_IDT_SSR1_ADR0, MC5_IDT_CMD_WRITE))
350 goto err;
351
352 /* Set up GMRs */
353 for (i = 0; i < 32; ++i) {
354 if (i >= 12 && i < 15)
355 dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
356 else if (i == 15)
357 dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
358 else
359 dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
360
361 if (mc5_write(adap, MC5_IDT_GMR_BASE_ADR0 + i,
362 MC5_IDT_CMD_WRITE))
363 goto err;
364 }
365
366 /* Set up SCR */
367 dbgi_wr_data3(adap, 1, 0, 0);
368 if (mc5_write(adap, MC5_IDT_SCR_ADR0, MC5_IDT_CMD_WRITE))
369 goto err;
370
371 return init_mask_data_array(mc5, MC5_IDT_MSKARY_BASE_ADR0,
372 MC5_IDT_DATARY_BASE_ADR0,
373 MC5_IDT_CMD_WRITE);
374 err:
375 return -EIO;
376 }
377
378 /* Put MC5 in DBGI mode. */
mc5_dbgi_mode_enable(struct pemc5 * mc5)379 static inline void mc5_dbgi_mode_enable(struct pemc5 *mc5)
380 {
381 t1_write_reg_4(mc5->adapter, A_MC5_CONFIG,
382 V_MODE(mc5->mode == MC5_MODE_72_BIT) |
383 F_DBGI_ENABLE | V_NUM_LIP(MC5_LIP_NUM_OF_ENTRIES - 1));
384 }
385
386 /* Put MC5 in M-Bus mode. */
mc5_dbgi_mode_disable(struct pemc5 * mc5)387 static void mc5_dbgi_mode_disable(struct pemc5 *mc5)
388 {
389 t1_write_reg_4(mc5->adapter, A_MC5_CONFIG,
390 V_MODE(mc5->mode == MC5_MODE_72_BIT) |
391 V_COMPRESSION_ENABLE(mc5->mode == MC5_MODE_72_BIT) |
392 V_PARITY_ENABLE(mc5->parity_enabled) |
393 V_SYN_ISSUE_MODE(mc5->issue_syn) | F_M_BUS_ENABLE |
394 V_NUM_LIP(MC5_LIP_NUM_OF_ENTRIES - 1));
395 }
396
397 /*
398 * Initialization that requires the OS and protocol layers to already
399 * be intialized goes here.
400 */
t1_mc5_init(struct pemc5 * mc5,unsigned int nservers,unsigned int nroutes,int parity,int syn)401 int t1_mc5_init(struct pemc5 *mc5, unsigned int nservers,
402 unsigned int nroutes, int parity, int syn)
403 {
404 u32 cfg;
405 int err = 0;
406 unsigned int tcam_size = t1_mc5_get_tcam_size(mc5);
407 adapter_t *adap = mc5->adapter;
408
409 /* Reset the TCAM */
410 cfg = t1_read_reg_4(adap, A_MC5_CONFIG) & ~F_MODE;
411 cfg |= V_MODE(mc5->mode == MC5_MODE_72_BIT) | F_TCAM_RESET;
412 t1_write_reg_4(adap, A_MC5_CONFIG, cfg);
413 if (t1_wait_op_done(adap, A_MC5_CONFIG, F_TCAM_READY, 1, 500, 0)) {
414 CH_ERR("%s: TCAM reset timed out\n", adapter_name(adap));
415 return -1;
416 }
417
418 if (set_tcam_rtbl_base(mc5, tcam_size - nroutes) ||
419 set_tcam_server_base(mc5, tcam_size - nroutes - nservers))
420 return -EINVAL;
421
422 #ifdef SUPPORT_MODE72
423 if (mc5->mode == MC5_MODE_72_BIT)
424 t1_mc5_lip_write_entries(mc5);
425 #endif
426 mc5->issue_syn = (unsigned char)syn;
427 mc5->parity_enabled = (unsigned char)parity;
428
429 /* All the TCAM addresses we access have only the low 32 bits non 0 */
430 t1_write_reg_4(adap, A_MC5_DBGI_REQ_ADDR1, 0);
431 t1_write_reg_4(adap, A_MC5_DBGI_REQ_ADDR2, 0);
432
433 mc5_dbgi_mode_enable(mc5);
434
435 switch (mc5->part_type) {
436 case LARA_7000:
437 err = init_lara7000(mc5);
438 break;
439 case IDT75P52100:
440 err = init_idt52100(mc5);
441 break;
442 default:
443 CH_ERR("%s: unsupported TCAM type\n", adapter_name(adap));
444 err = -EINVAL;
445 break;
446 }
447
448 mc5_dbgi_mode_disable(mc5);
449 return err;
450 }
451
452 /*
453 * read_mc5_range - dump a part of the memory managed by MC5
454 * @mc5: the MC5 handle
455 * @start: the start address for the dump
456 * @n: number of 72-bit words to read
457 * @buf: result buffer
458 *
459 * Read n 72-bit words from MC5 memory from the given start location.
460 */
t1_read_mc5_range(struct pemc5 * mc5,unsigned int start,unsigned int n,u32 * buf)461 int t1_read_mc5_range(struct pemc5 *mc5, unsigned int start,
462 unsigned int n, u32 *buf)
463 {
464 u32 read_cmd;
465 /* int err = 0; */
466 adapter_t *adap = mc5->adapter;
467
468 if (mc5->part_type == LARA_7000)
469 read_cmd = MC5_LRA_CMD_READ;
470 else if (mc5->part_type == IDT75P52100)
471 read_cmd = MC5_IDT_CMD_READ;
472 else
473 return -EINVAL;
474
475 mc5_dbgi_mode_enable(mc5);
476
477 while (n--) {
478 t1_write_reg_4(adap, A_MC5_DBGI_REQ_ADDR0, start++);
479 if (mc5_cmd_write(adap, read_cmd)) {
480 /* err = -EIO; */
481 break;
482 }
483 dbgi_rd_rsp3(adap, buf + 2, buf + 1, buf);
484 buf += 3;
485 }
486
487 mc5_dbgi_mode_disable(mc5);
488 return 0;
489 }
490
491 #define MC5_INT_MASK (F_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR | \
492 F_MC5_INT_HIT_IN_RT_REGION_ERR | F_MC5_INT_LIP0_ERR | \
493 F_MC5_INT_LIP_MISS_ERR | F_MC5_INT_PARITY_ERR | \
494 F_MC5_INT_ACTIVE_REGION_FULL | F_MC5_INT_NFA_SRCH_ERR | \
495 F_MC5_INT_UNKNOWN_CMD | F_MC5_INT_DEL_ACT_EMPTY)
496 #define MC5_INT_FATAL (F_MC5_INT_PARITY_ERR | F_MC5_INT_REQUESTQ_PARITY_ERR | \
497 F_MC5_INT_DISPATCHQ_PARITY_ERR)
498
t1_mc5_intr_enable(struct pemc5 * mc5)499 void t1_mc5_intr_enable(struct pemc5 *mc5)
500 {
501 u32 mask = MC5_INT_MASK;
502
503 if (!mc5->parity_enabled)
504 mask &= ~F_MC5_INT_PARITY_ERR;
505
506 #ifdef CONFIG_CHELSIO_T1_1G
507 if (!t1_is_asic(mc5->adapter)) {
508 /*
509 * Enable child block for MC5.
510 *
511 * NOTE: Assumes TP parent interrupt block is enabled.
512 * MC5 requires TP parent block to be enabled.
513 */
514 t1_write_reg_4(mc5->adapter, A_MC5_INT_ENABLE, mask);
515 } else
516 #endif
517 {
518 u32 pl_intr = t1_read_reg_4(mc5->adapter, A_PL_ENABLE);
519
520 t1_write_reg_4(mc5->adapter, A_PL_ENABLE,
521 pl_intr | F_PL_INTR_MC5);
522 t1_write_reg_4(mc5->adapter, A_MC5_INT_ENABLE,
523 mask | F_MC5_INT_REQUESTQ_PARITY_ERR |
524 F_MC5_INT_DISPATCHQ_PARITY_ERR);
525 }
526 }
527
t1_mc5_intr_disable(struct pemc5 * mc5)528 void t1_mc5_intr_disable(struct pemc5 *mc5)
529 {
530 #ifdef CONFIG_CHELSIO_T1_1G
531 if (!t1_is_asic(mc5->adapter))
532 t1_write_reg_4(mc5->adapter, A_MC5_INT_ENABLE, 0);
533 else
534 #endif
535 {
536 u32 pl_intr = t1_read_reg_4(mc5->adapter, A_PL_ENABLE);
537
538 t1_write_reg_4(mc5->adapter, A_PL_ENABLE,
539 pl_intr & ~F_PL_INTR_MC5);
540 t1_write_reg_4(mc5->adapter, A_MC5_INT_ENABLE, 0);
541 }
542 }
543
t1_mc5_intr_clear(struct pemc5 * mc5)544 void t1_mc5_intr_clear(struct pemc5 *mc5)
545 {
546 #ifdef CONFIG_CHELSIO_T1_1G
547 if (!t1_is_asic(mc5->adapter)) {
548 t1_write_reg_4(mc5->adapter, A_MC5_INT_CAUSE, 0xffffffff);
549 } else
550 #endif
551 {
552 t1_write_reg_4(mc5->adapter, A_PL_CAUSE, F_PL_INTR_MC5);
553 t1_write_reg_4(mc5->adapter, A_MC5_INT_CAUSE, 0xffffffff);
554 }
555 }
556
557 /*
558 * We don't really do anything with MC5 interrupts, just record them.
559 */
t1_mc5_intr_handler(struct pemc5 * mc5)560 void t1_mc5_intr_handler(struct pemc5 *mc5)
561 {
562 adapter_t *adap = mc5->adapter;
563 u32 cause = t1_read_reg_4(adap, A_MC5_INT_CAUSE);
564
565 if (cause & F_MC5_INT_HIT_OUT_ACTIVE_REGION_ERR)
566 mc5->intr_counts.hit_out_active_region_err++;
567
568 if (cause & F_MC5_INT_HIT_IN_ACTIVE_REGION_ERR)
569 mc5->intr_counts.hit_in_active_region_err++;
570
571 if (cause & F_MC5_INT_HIT_IN_RT_REGION_ERR)
572 mc5->intr_counts.hit_in_routing_region_err++;
573
574 if (cause & F_MC5_INT_MISS_ERR)
575 mc5->intr_counts.miss_err++;
576
577 if (cause & F_MC5_INT_LIP0_ERR)
578 mc5->intr_counts.lip_equal_zero_err++;
579
580 if (cause & F_MC5_INT_LIP_MISS_ERR)
581 mc5->intr_counts.lip_miss_err++;
582
583 if ((cause & F_MC5_INT_PARITY_ERR) && mc5->parity_enabled) {
584 CH_ALERT("%s: MC5 parity error\n", adapter_name(adap));
585 mc5->intr_counts.parity_err++;
586 }
587
588 if (cause & F_MC5_INT_ACTIVE_REGION_FULL)
589 mc5->intr_counts.active_region_full_err++;
590
591 if (cause & F_MC5_INT_NFA_SRCH_ERR)
592 mc5->intr_counts.next_free_addr_srch_err++;
593
594 if (cause & F_MC5_INT_SYN_COOKIE)
595 mc5->intr_counts.syn_cookie++;
596
597 if (cause & F_MC5_INT_SYN_COOKIE_BAD)
598 mc5->intr_counts.syn_cookie_bad_message++;
599
600 if (cause & F_MC5_INT_SYN_COOKIE_OFF)
601 mc5->intr_counts.syn_cookie_off_message++;
602
603 if (cause & F_MC5_INT_UNKNOWN_CMD)
604 mc5->intr_counts.receive_unknown_cmd++;
605
606 if (cause & F_MC5_INT_REQUESTQ_PARITY_ERR) {
607 CH_ALERT("%s: MC5 request queue parity error\n",
608 adapter_name(adap));
609 mc5->intr_counts.parity_in_request_q_err++;
610 }
611
612 if (cause & F_MC5_INT_DISPATCHQ_PARITY_ERR) {
613 CH_ALERT("%s: MC5 dispatch queue parity error\n",
614 adapter_name(adap));
615 mc5->intr_counts.parity_in_dispatch_q_err++;
616 }
617
618 if (cause & F_MC5_INT_DEL_ACT_EMPTY)
619 mc5->intr_counts.del_and_act_is_empty++;
620
621 if (cause & MC5_INT_FATAL)
622 t1_fatal_err(adap);
623
624 t1_write_reg_4(adap, A_MC5_INT_CAUSE, cause);
625 }
626
t1_mc5_get_intr_counts(struct pemc5 * mc5)627 const struct pemc5_intr_counts *t1_mc5_get_intr_counts(struct pemc5 *mc5)
628 {
629 return &mc5->intr_counts;
630 }
631
t1_mc5_create(adapter_t * adapter,int mode)632 struct pemc5 * __devinit t1_mc5_create(adapter_t *adapter, int mode)
633 {
634 struct pemc5 *mc5;
635 u32 cfg, bits_per_entry;
636
637 if (mode != MC5_MODE_144_BIT && mode != MC5_MODE_72_BIT)
638 return NULL;
639
640 mc5 = t1_os_malloc_wait_zero(sizeof(*mc5));
641 if (!mc5) return NULL;
642
643 mc5->adapter = adapter;
644 mc5->mode = (unsigned char) mode;
645
646 cfg = t1_read_reg_4(adapter, A_MC5_CONFIG);
647 mc5->part_size = G_TCAM_PART_SIZE(cfg);
648 mc5->part_type = (unsigned char) G_TCAM_PART_TYPE(cfg);
649 if (cfg & F_TCAM_PART_TYPE_HI)
650 mc5->part_type |= 4;
651
652 /*
653 * Calculate the size of the TCAM based on the total memory, mode, and
654 * count information retrieved from the hardware.
655 */
656 bits_per_entry = mode == MC5_MODE_144_BIT ? 144 : 72;
657 mc5->tcam_size = tcam_part_size[mc5->part_size] / bits_per_entry;
658
659 return mc5;
660 }
661
t1_mc5_destroy(struct pemc5 * mc5)662 void t1_mc5_destroy(struct pemc5 *mc5)
663 {
664 t1_os_free((void *)mc5, sizeof(*mc5));
665 }
666
667 #ifdef SUPPORT_MODE72
mc5_cmp(const void * pi,const void * pj)668 static int mc5_cmp(const void *pi, const void *pj)
669 {
670 const u32 *pii = (const u32 *)pi;
671 const u32 *pjj = (const u32 *)pj;
672
673 if (*pii < *pjj)
674 return -1;
675
676 return *pii > *pjj;
677 }
678
679 /*
680 * DESC: Write local IP addresses to the TCAM
681 *
682 * NOTES: IP addresses should be in host byte order. So, an IP address:
683 * of 10.0.0.140 == (data = 0x0A00008C)
684 */
mc5_set_lip_entries(struct pemc5 * mc5,u32 * p,int num_of_lip_addresses)685 static int mc5_set_lip_entries(struct pemc5 *mc5, u32 *p,
686 int num_of_lip_addresses)
687 {
688 int i;
689
690 /*
691 * Disable compression and M bus mode so that the TP core
692 * doesn't access the TCAM while we are writing.
693 */
694 u32 cfg = t1_read_reg_4(mc5->adapter, A_MC5_CONFIG);
695 t1_write_reg_4(mc5->adapter, A_MC5_CONFIG,
696 cfg & ~(F_M_BUS_ENABLE | F_COMPRESSION_ENABLE));
697
698 /* MC5 should now be ready to program the LIP addresses. */
699 for (i = 0; i < num_of_lip_addresses; i++) {
700 t1_write_reg_4(mc5->adapter, A_MC5_LIP_RAM_DATA, p[i]);
701 t1_write_reg_4(mc5->adapter, A_MC5_LIP_RAM_ADDR, 0x100 + i);
702 }
703
704 /* Restore MC5 mode. */
705 t1_write_reg_4(mc5->adapter, A_MC5_CONFIG, cfg | F_COMPRESSION_ENABLE);
706 return 0;
707 }
708
709 /*
710 * The purpose of this routine is to write all of the local IP addresses
711 * into the TCAM in sorted order. This is a requirement from the TCAM.
712 */
t1_mc5_lip_write_entries(struct pemc5 * mc5)713 void t1_mc5_lip_write_entries(struct pemc5 *mc5)
714 {
715 u32 filler = 0;
716 int i;
717
718 if (mc5->lip_index) {
719 qsort(mc5->lip, mc5->lip_index, sizeof(u32), mc5_cmp);
720 filler = mc5->lip[mc5->lip_index - 1];
721 }
722 for (i = mc5->lip_index; i < MC5_LIP_NUM_OF_ENTRIES; i++)
723 mc5->lip[i] = filler;
724 mc5_set_lip_entries(mc5, mc5->lip, MC5_LIP_NUM_OF_ENTRIES);
725 }
726
t1_mc5_lip_clear_entries(struct pemc5 * mc5)727 void t1_mc5_lip_clear_entries(struct pemc5 *mc5)
728 {
729 mc5->lip_index = 0;
730 }
731
732 /*
733 * Add a local IP address to the LIP table.
734 */
t1_mc5_lip_add_entry(struct pemc5 * mc5,u32 lip)735 int t1_mc5_lip_add_entry(struct pemc5 *mc5, u32 lip)
736 {
737 if (mc5->lip_index >= MC5_LIP_NUM_OF_ENTRIES) return 1;
738 mc5->lip[mc5->lip_index++] = lip;
739 return 0;
740 }
741 #endif
742