1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <npi_fflp.h>
28 #include <npi_mac.h>
29 #include <nxge_defs.h>
30 #include <nxge_flow.h>
31 #include <nxge_fflp.h>
32 #include <nxge_impl.h>
33 #include <nxge_fflp_hash.h>
34 #include <nxge_common.h>
35
36
37 /*
38 * Function prototypes
39 */
40 static nxge_status_t nxge_fflp_vlan_tbl_clear_all(p_nxge_t);
41 static nxge_status_t nxge_fflp_tcam_invalidate_all(p_nxge_t);
42 static nxge_status_t nxge_fflp_tcam_init(p_nxge_t);
43 static nxge_status_t nxge_fflp_fcram_invalidate_all(p_nxge_t);
44 static nxge_status_t nxge_fflp_fcram_init(p_nxge_t);
45 static int nxge_flow_need_hash_lookup(p_nxge_t, flow_resource_t *);
46 static void nxge_fill_tcam_entry_tcp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
47 static void nxge_fill_tcam_entry_udp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
48 static void nxge_fill_tcam_entry_sctp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
49 static void nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t, flow_spec_t *,
50 tcam_entry_t *);
51 static void nxge_fill_tcam_entry_udp_ipv6(p_nxge_t, flow_spec_t *,
52 tcam_entry_t *);
53 static void nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t, flow_spec_t *,
54 tcam_entry_t *);
55 static uint8_t nxge_get_rdc_offset(p_nxge_t, uint8_t, uint64_t);
56 static uint8_t nxge_get_rdc_group(p_nxge_t, uint8_t, uint64_t);
57 static uint16_t nxge_tcam_get_index(p_nxge_t, uint16_t);
58 static uint32_t nxge_tcam_cls_to_flow(uint32_t);
59 static uint8_t nxge_iptun_pkt_type_to_pid(uint8_t);
60 static npi_status_t nxge_set_iptun_usr_cls_reg(p_nxge_t, uint64_t,
61 iptun_cfg_t *);
62 static boolean_t nxge_is_iptun_cls_present(p_nxge_t, uint8_t, int *);
63
64 /*
65 * functions used outside this file
66 */
67 nxge_status_t nxge_fflp_config_vlan_table(p_nxge_t, uint16_t);
68 nxge_status_t nxge_fflp_ip_class_config_all(p_nxge_t);
69 nxge_status_t nxge_add_flow(p_nxge_t, flow_resource_t *);
70 static nxge_status_t nxge_tcam_handle_ip_fragment(p_nxge_t);
71 nxge_status_t nxge_add_tcam_entry(p_nxge_t, flow_resource_t *);
72 nxge_status_t nxge_add_fcram_entry(p_nxge_t, flow_resource_t *);
73 nxge_status_t nxge_flow_get_hash(p_nxge_t, flow_resource_t *,
74 uint32_t *, uint16_t *);
75 int nxge_get_valid_tcam_cnt(p_nxge_t);
76 void nxge_get_tcam_entry_all(p_nxge_t, rx_class_cfg_t *);
77 void nxge_get_tcam_entry(p_nxge_t, flow_resource_t *);
78 void nxge_del_tcam_entry(p_nxge_t, uint32_t);
79 void nxge_add_iptun_class(p_nxge_t, iptun_cfg_t *, uint8_t *);
80 void nxge_cfg_iptun_hash(p_nxge_t, iptun_cfg_t *, uint8_t);
81 void nxge_del_iptun_class(p_nxge_t, uint8_t);
82 void nxge_get_iptun_class(p_nxge_t, iptun_cfg_t *, uint8_t);
83 void nxge_set_ip_cls_sym(p_nxge_t, uint8_t, uint8_t);
84 void nxge_get_ip_cls_sym(p_nxge_t, uint8_t, uint8_t *);
85
86
87 nxge_status_t
nxge_tcam_dump_entry(p_nxge_t nxgep,uint32_t location)88 nxge_tcam_dump_entry(p_nxge_t nxgep, uint32_t location)
89 {
90 tcam_entry_t tcam_rdptr;
91 uint64_t asc_ram = 0;
92 npi_handle_t handle;
93 npi_status_t status;
94
95 handle = nxgep->npi_reg_handle;
96
97 bzero((char *)&tcam_rdptr, sizeof (struct tcam_entry));
98 status = npi_fflp_tcam_entry_read(handle, (tcam_location_t)location,
99 (struct tcam_entry *)&tcam_rdptr);
100 if (status & NPI_FAILURE) {
101 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
102 " nxge_tcam_dump_entry:"
103 " tcam read failed at location %d ", location));
104 return (NXGE_ERROR);
105 }
106 status = npi_fflp_tcam_asc_ram_entry_read(handle,
107 (tcam_location_t)location, &asc_ram);
108
109 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "location %x\n"
110 " key: %llx %llx %llx %llx \n"
111 " mask: %llx %llx %llx %llx \n"
112 " ASC RAM %llx \n", location,
113 tcam_rdptr.key0, tcam_rdptr.key1,
114 tcam_rdptr.key2, tcam_rdptr.key3,
115 tcam_rdptr.mask0, tcam_rdptr.mask1,
116 tcam_rdptr.mask2, tcam_rdptr.mask3, asc_ram));
117 return (NXGE_OK);
118 }
119
120 void
nxge_get_tcam(p_nxge_t nxgep,p_mblk_t mp)121 nxge_get_tcam(p_nxge_t nxgep, p_mblk_t mp)
122 {
123 uint32_t tcam_loc;
124 int *lptr;
125 int location;
126
127 uint32_t start_location = 0;
128 uint32_t stop_location = nxgep->classifier.tcam_size;
129 lptr = (int *)mp->b_rptr;
130 location = *lptr;
131
132 if ((location >= nxgep->classifier.tcam_size) || (location < -1)) {
133 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
134 "nxge_tcam_dump: Invalid location %d \n", location));
135 return;
136 }
137 if (location == -1) {
138 start_location = 0;
139 stop_location = nxgep->classifier.tcam_size;
140 } else {
141 start_location = location;
142 stop_location = location + 1;
143 }
144 for (tcam_loc = start_location; tcam_loc < stop_location; tcam_loc++)
145 (void) nxge_tcam_dump_entry(nxgep, tcam_loc);
146 }
147
148 /*
149 * nxge_fflp_vlan_table_invalidate_all
150 * invalidates the vlan RDC table entries.
151 * INPUT
152 * nxge soft state data structure
153 * Return
154 * NXGE_OK
155 * NXGE_ERROR
156 *
157 */
158
159 static nxge_status_t
nxge_fflp_vlan_tbl_clear_all(p_nxge_t nxgep)160 nxge_fflp_vlan_tbl_clear_all(p_nxge_t nxgep)
161 {
162 vlan_id_t vlan_id;
163 npi_handle_t handle;
164 npi_status_t rs = NPI_SUCCESS;
165 vlan_id_t start = 0, stop = NXGE_MAX_VLANS;
166
167 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_vlan_tbl_clear_all "));
168 handle = nxgep->npi_reg_handle;
169 for (vlan_id = start; vlan_id < stop; vlan_id++) {
170 rs = npi_fflp_cfg_vlan_table_clear(handle, vlan_id);
171 if (rs != NPI_SUCCESS) {
172 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
173 "VLAN Table invalidate failed for vlan id %d ",
174 vlan_id));
175 return (NXGE_ERROR | rs);
176 }
177 }
178 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_vlan_tbl_clear_all "));
179 return (NXGE_OK);
180 }
181
182 /*
183 * The following functions are used by other modules to init
184 * the fflp module.
185 * these functions are the basic API used to init
186 * the fflp modules (tcam, fcram etc ......)
187 *
188 * The TCAM search future would be disabled by default.
189 */
190
191 static nxge_status_t
nxge_fflp_tcam_init(p_nxge_t nxgep)192 nxge_fflp_tcam_init(p_nxge_t nxgep)
193 {
194 uint8_t access_ratio;
195 tcam_class_t class;
196 npi_status_t rs = NPI_SUCCESS;
197 npi_handle_t handle;
198
199 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_tcam_init"));
200 handle = nxgep->npi_reg_handle;
201
202 rs = npi_fflp_cfg_tcam_disable(handle);
203 if (rs != NPI_SUCCESS) {
204 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed TCAM Disable\n"));
205 return (NXGE_ERROR | rs);
206 }
207
208 access_ratio = nxgep->param_arr[param_tcam_access_ratio].value;
209 rs = npi_fflp_cfg_tcam_access(handle, access_ratio);
210 if (rs != NPI_SUCCESS) {
211 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
212 "failed TCAM Access cfg\n"));
213 return (NXGE_ERROR | rs);
214 }
215
216 /* disable configurable classes */
217 /* disable the configurable ethernet classes; */
218 for (class = TCAM_CLASS_ETYPE_1;
219 class <= TCAM_CLASS_ETYPE_2; class++) {
220 rs = npi_fflp_cfg_enet_usr_cls_disable(handle, class);
221 if (rs != NPI_SUCCESS) {
222 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
223 "TCAM USR Ether Class config failed."));
224 return (NXGE_ERROR | rs);
225 }
226 }
227
228 /* disable the configurable ip classes; */
229 for (class = TCAM_CLASS_IP_USER_4;
230 class <= TCAM_CLASS_IP_USER_7; class++) {
231 rs = npi_fflp_cfg_ip_usr_cls_disable(handle, class);
232 if (rs != NPI_SUCCESS) {
233 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
234 "TCAM USR IP Class cnfg failed."));
235 return (NXGE_ERROR | rs);
236 }
237 }
238 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_tcam_init"));
239 return (NXGE_OK);
240 }
241
242 /*
243 * nxge_fflp_tcam_invalidate_all
244 * invalidates all the tcam entries.
245 * INPUT
246 * nxge soft state data structure
247 * Return
248 * NXGE_OK
249 * NXGE_ERROR
250 *
251 */
252
253
254 static nxge_status_t
nxge_fflp_tcam_invalidate_all(p_nxge_t nxgep)255 nxge_fflp_tcam_invalidate_all(p_nxge_t nxgep)
256 {
257 uint16_t location;
258 npi_status_t rs = NPI_SUCCESS;
259 npi_handle_t handle;
260 uint16_t start = 0, stop = nxgep->classifier.tcam_size;
261 p_nxge_hw_list_t hw_p;
262
263 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
264 "==> nxge_fflp_tcam_invalidate_all"));
265 handle = nxgep->npi_reg_handle;
266 if ((hw_p = nxgep->nxge_hw_p) == NULL) {
267 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
268 " nxge_fflp_tcam_invalidate_all:"
269 " common hardware not set", nxgep->niu_type));
270 return (NXGE_ERROR);
271 }
272 MUTEX_ENTER(&hw_p->nxge_tcam_lock);
273 for (location = start; location < stop; location++) {
274 rs = npi_fflp_tcam_entry_invalidate(handle, location);
275 if (rs != NPI_SUCCESS) {
276 MUTEX_EXIT(&hw_p->nxge_tcam_lock);
277 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
278 "TCAM invalidate failed at loc %d ", location));
279 return (NXGE_ERROR | rs);
280 }
281 }
282 MUTEX_EXIT(&hw_p->nxge_tcam_lock);
283 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
284 "<== nxge_fflp_tcam_invalidate_all"));
285 return (NXGE_OK);
286 }
287
288 /*
289 * nxge_fflp_fcram_entry_invalidate_all
290 * invalidates all the FCRAM entries.
291 * INPUT
292 * nxge soft state data structure
293 * Return
294 * NXGE_OK
295 * NXGE_ERROR
296 *
297 */
298
299 static nxge_status_t
nxge_fflp_fcram_invalidate_all(p_nxge_t nxgep)300 nxge_fflp_fcram_invalidate_all(p_nxge_t nxgep)
301 {
302 npi_handle_t handle;
303 npi_status_t rs = NPI_SUCCESS;
304 part_id_t pid = 0;
305 uint8_t base_mask, base_reloc;
306 fcram_entry_t fc;
307 uint32_t location;
308 uint32_t increment, last_location;
309
310 /*
311 * (1) configure and enable partition 0 with no relocation
312 * (2) Assume the FCRAM is used as IPv4 exact match entry cells
313 * (3) Invalidate these cells by clearing the valid bit in
314 * the subareas 0 and 4
315 * (4) disable the partition
316 *
317 */
318
319 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_fcram_invalidate_all"));
320
321 base_mask = base_reloc = 0x0;
322 handle = nxgep->npi_reg_handle;
323 rs = npi_fflp_cfg_fcram_partition(handle, pid, base_mask, base_reloc);
324
325 if (rs != NPI_SUCCESS) {
326 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed partition cfg\n"));
327 return (NXGE_ERROR | rs);
328 }
329 rs = npi_fflp_cfg_fcram_partition_disable(handle, pid);
330
331 if (rs != NPI_SUCCESS) {
332 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
333 "failed partition enable\n"));
334 return (NXGE_ERROR | rs);
335 }
336 fc.dreg[0].value = 0;
337 fc.hash_hdr_valid = 0;
338 fc.hash_hdr_ext = 1; /* specify as IPV4 exact match entry */
339 increment = sizeof (hash_ipv4_t);
340 last_location = FCRAM_SIZE * 0x40;
341
342 for (location = 0; location < last_location; location += increment) {
343 rs = npi_fflp_fcram_subarea_write(handle, pid,
344 location, fc.value[0]);
345 if (rs != NPI_SUCCESS) {
346 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
347 "failed write at location %x ", location));
348 return (NXGE_ERROR | rs);
349 }
350 }
351 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_fcram_invalidate_all"));
352 return (NXGE_OK);
353 }
354
355 static nxge_status_t
nxge_fflp_fcram_init(p_nxge_t nxgep)356 nxge_fflp_fcram_init(p_nxge_t nxgep)
357 {
358 fflp_fcram_output_drive_t strength;
359 fflp_fcram_qs_t qs;
360 npi_status_t rs = NPI_SUCCESS;
361 uint8_t access_ratio;
362 int partition;
363 npi_handle_t handle;
364 uint32_t min_time, max_time, sys_time;
365
366 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_fcram_init"));
367
368 /*
369 * Recommended values are needed.
370 */
371 min_time = FCRAM_REFRESH_DEFAULT_MIN_TIME;
372 max_time = FCRAM_REFRESH_DEFAULT_MAX_TIME;
373 sys_time = FCRAM_REFRESH_DEFAULT_SYS_TIME;
374
375 handle = nxgep->npi_reg_handle;
376 strength = FCRAM_OUTDR_NORMAL;
377 qs = FCRAM_QS_MODE_QS;
378 rs = npi_fflp_cfg_fcram_reset(handle, strength, qs);
379 if (rs != NPI_SUCCESS) {
380 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed FCRAM Reset. "));
381 return (NXGE_ERROR | rs);
382 }
383
384 access_ratio = nxgep->param_arr[param_fcram_access_ratio].value;
385 rs = npi_fflp_cfg_fcram_access(handle, access_ratio);
386 if (rs != NPI_SUCCESS) {
387 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed FCRAM Access ratio"
388 "configuration \n"));
389 return (NXGE_ERROR | rs);
390 }
391 rs = npi_fflp_cfg_fcram_refresh_time(handle, min_time,
392 max_time, sys_time);
393 if (rs != NPI_SUCCESS) {
394 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
395 "failed FCRAM refresh cfg"));
396 return (NXGE_ERROR);
397 }
398
399 /* disable all the partitions until explicitly enabled */
400 for (partition = 0; partition < FFLP_FCRAM_MAX_PARTITION; partition++) {
401 rs = npi_fflp_cfg_fcram_partition_disable(handle, partition);
402 if (rs != NPI_SUCCESS) {
403 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
404 "failed FCRAM partition"
405 " enable for partition %d ", partition));
406 return (NXGE_ERROR | rs);
407 }
408 }
409
410 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_fcram_init"));
411 return (NXGE_OK);
412 }
413
414 nxge_status_t
nxge_logical_mac_assign_rdc_table(p_nxge_t nxgep,uint8_t alt_mac)415 nxge_logical_mac_assign_rdc_table(p_nxge_t nxgep, uint8_t alt_mac)
416 {
417 npi_status_t rs = NPI_SUCCESS;
418 hostinfo_t mac_rdc;
419 npi_handle_t handle;
420 p_nxge_class_pt_cfg_t p_class_cfgp;
421
422 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
423 if (p_class_cfgp->mac_host_info[alt_mac].flag == 0) {
424 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
425 " nxge_logical_mac_assign_rdc_table"
426 " unconfigured alt MAC addr %d ", alt_mac));
427 return (NXGE_ERROR);
428 }
429 handle = nxgep->npi_reg_handle;
430 mac_rdc.value = 0;
431 mac_rdc.bits.w0.rdc_tbl_num =
432 p_class_cfgp->mac_host_info[alt_mac].rdctbl;
433 mac_rdc.bits.w0.mac_pref = p_class_cfgp->mac_host_info[alt_mac].mpr_npr;
434
435 rs = npi_mac_hostinfo_entry(handle, OP_SET,
436 nxgep->function_num, alt_mac, &mac_rdc);
437
438 if (rs != NPI_SUCCESS) {
439 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
440 "failed Assign RDC table"));
441 return (NXGE_ERROR | rs);
442 }
443 return (NXGE_OK);
444 }
445
446 nxge_status_t
nxge_main_mac_assign_rdc_table(p_nxge_t nxgep)447 nxge_main_mac_assign_rdc_table(p_nxge_t nxgep)
448 {
449 npi_status_t rs = NPI_SUCCESS;
450 hostinfo_t mac_rdc;
451 npi_handle_t handle;
452 int i;
453
454 handle = nxgep->npi_reg_handle;
455 mac_rdc.value = 0;
456 mac_rdc.bits.w0.rdc_tbl_num = nxgep->class_config.mac_rdcgrp;
457 mac_rdc.bits.w0.mac_pref = 1;
458 switch (nxgep->function_num) {
459 case 0:
460 case 1:
461 /*
462 * Tests indicate that it is OK not to re-initialize the
463 * hostinfo registers for the XMAC's alternate MAC
464 * addresses. But that is necessary for BMAC (case 2
465 * and case 3 below)
466 */
467 rs = npi_mac_hostinfo_entry(handle, OP_SET,
468 nxgep->function_num, XMAC_UNIQUE_HOST_INFO_ENTRY, &mac_rdc);
469 break;
470 case 2:
471 case 3:
472 rs = npi_mac_hostinfo_entry(handle, OP_SET,
473 nxgep->function_num, BMAC_UNIQUE_HOST_INFO_ENTRY, &mac_rdc);
474 for (i = 1; i <= BMAC_MAX_ALT_ADDR_ENTRY; i++)
475 rs |= npi_mac_hostinfo_entry(handle, OP_SET,
476 nxgep->function_num, i, &mac_rdc);
477 break;
478 default:
479 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
480 "failed Assign RDC table (invalid function #)"));
481 return (NXGE_ERROR);
482 }
483
484 if (rs != NPI_SUCCESS) {
485 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
486 "failed Assign RDC table"));
487 return (NXGE_ERROR | rs);
488 }
489 return (NXGE_OK);
490 }
491
492 /*
493 * Initialize hostinfo registers for alternate MAC addresses and
494 * multicast MAC address.
495 */
496 nxge_status_t
nxge_alt_mcast_mac_assign_rdc_table(p_nxge_t nxgep)497 nxge_alt_mcast_mac_assign_rdc_table(p_nxge_t nxgep)
498 {
499 npi_status_t rs = NPI_SUCCESS;
500 hostinfo_t mac_rdc;
501 npi_handle_t handle;
502
503 handle = nxgep->npi_reg_handle;
504 mac_rdc.value = 0;
505 mac_rdc.bits.w0.rdc_tbl_num = nxgep->class_config.mcast_rdcgrp;
506 mac_rdc.bits.w0.mac_pref = 1;
507 switch (nxgep->function_num) {
508 case 0:
509 case 1:
510 rs = npi_mac_hostinfo_entry(handle, OP_SET,
511 nxgep->function_num, XMAC_MULTI_HOST_INFO_ENTRY, &mac_rdc);
512 break;
513 case 2:
514 case 3:
515 rs = npi_mac_hostinfo_entry(handle, OP_SET,
516 nxgep->function_num, BMAC_MULTI_HOST_INFO_ENTRY, &mac_rdc);
517 break;
518 default:
519 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
520 "failed Assign RDC table (invalid function #)"));
521 return (NXGE_ERROR);
522 }
523
524 if (rs != NPI_SUCCESS) {
525 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
526 "failed Assign RDC table"));
527 return (NXGE_ERROR | rs);
528 }
529 return (NXGE_OK);
530 }
531
532 nxge_status_t
nxge_fflp_init_hostinfo(p_nxge_t nxgep)533 nxge_fflp_init_hostinfo(p_nxge_t nxgep)
534 {
535 nxge_status_t status = NXGE_OK;
536
537 status = nxge_alt_mcast_mac_assign_rdc_table(nxgep);
538 status |= nxge_main_mac_assign_rdc_table(nxgep);
539 return (status);
540 }
541
542 nxge_status_t
nxge_fflp_hw_reset(p_nxge_t nxgep)543 nxge_fflp_hw_reset(p_nxge_t nxgep)
544 {
545 npi_handle_t handle;
546 npi_status_t rs = NPI_SUCCESS;
547 nxge_status_t status = NXGE_OK;
548
549 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_hw_reset"));
550
551 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
552 status = nxge_fflp_fcram_init(nxgep);
553 if (status != NXGE_OK) {
554 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
555 " failed FCRAM init. "));
556 return (status);
557 }
558 }
559
560 status = nxge_fflp_tcam_init(nxgep);
561 if (status != NXGE_OK) {
562 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
563 "failed TCAM init."));
564 return (status);
565 }
566
567 handle = nxgep->npi_reg_handle;
568 rs = npi_fflp_cfg_llcsnap_enable(handle);
569 if (rs != NPI_SUCCESS) {
570 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
571 "failed LLCSNAP enable. "));
572 return (NXGE_ERROR | rs);
573 }
574
575 rs = npi_fflp_cfg_cam_errorcheck_disable(handle);
576 if (rs != NPI_SUCCESS) {
577 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
578 "failed CAM Error Check enable. "));
579 return (NXGE_ERROR | rs);
580 }
581
582 /* init the hash generators */
583 rs = npi_fflp_cfg_hash_h1poly(handle, 0);
584 if (rs != NPI_SUCCESS) {
585 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
586 "failed H1 Poly Init. "));
587 return (NXGE_ERROR | rs);
588 }
589
590 rs = npi_fflp_cfg_hash_h2poly(handle, 0);
591 if (rs != NPI_SUCCESS) {
592 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
593 "failed H2 Poly Init. "));
594 return (NXGE_ERROR | rs);
595 }
596
597 /* invalidate TCAM entries */
598 status = nxge_fflp_tcam_invalidate_all(nxgep);
599 if (status != NXGE_OK) {
600 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
601 "failed TCAM Entry Invalidate. "));
602 return (status);
603 }
604
605 /* invalidate FCRAM entries */
606 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
607 status = nxge_fflp_fcram_invalidate_all(nxgep);
608 if (status != NXGE_OK) {
609 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
610 "failed FCRAM Entry Invalidate."));
611 return (status);
612 }
613 }
614
615 /* invalidate VLAN RDC tables */
616 status = nxge_fflp_vlan_tbl_clear_all(nxgep);
617 if (status != NXGE_OK) {
618 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
619 "failed VLAN Table Invalidate. "));
620 return (status);
621 }
622 nxgep->classifier.state |= NXGE_FFLP_HW_RESET;
623
624 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_hw_reset"));
625 return (NXGE_OK);
626 }
627
628 nxge_status_t
nxge_cfg_ip_cls_flow_key(p_nxge_t nxgep,tcam_class_t l3_class,uint32_t class_config)629 nxge_cfg_ip_cls_flow_key(p_nxge_t nxgep, tcam_class_t l3_class,
630 uint32_t class_config)
631 {
632 flow_key_cfg_t fcfg;
633 npi_handle_t handle;
634 npi_status_t rs = NPI_SUCCESS;
635
636 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_cfg_ip_cls_flow_key"));
637 handle = nxgep->npi_reg_handle;
638 bzero(&fcfg, sizeof (flow_key_cfg_t));
639
640 if (class_config & NXGE_CLASS_FLOW_USE_PROTO)
641 fcfg.use_proto = 1;
642 if (class_config & NXGE_CLASS_FLOW_USE_DST_PORT)
643 fcfg.use_dport = 1;
644 if (class_config & NXGE_CLASS_FLOW_USE_SRC_PORT)
645 fcfg.use_sport = 1;
646 if (class_config & NXGE_CLASS_FLOW_USE_IPDST)
647 fcfg.use_daddr = 1;
648 if (class_config & NXGE_CLASS_FLOW_USE_IPSRC)
649 fcfg.use_saddr = 1;
650 if (class_config & NXGE_CLASS_FLOW_USE_VLAN)
651 fcfg.use_vlan = 1;
652 if (class_config & NXGE_CLASS_FLOW_USE_L2DA)
653 fcfg.use_l2da = 1;
654 if (class_config & NXGE_CLASS_FLOW_USE_PORTNUM)
655 fcfg.use_portnum = 1;
656 fcfg.ip_opts_exist = 0;
657
658 rs = npi_fflp_cfg_ip_cls_flow_key(handle, l3_class, &fcfg);
659 if (rs & NPI_FFLP_ERROR) {
660 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_ip_cls_flow_key"
661 " opt %x for class %d failed ", class_config, l3_class));
662 return (NXGE_ERROR | rs);
663 }
664 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_cfg_ip_cls_flow_key"));
665 return (NXGE_OK);
666 }
667
668 nxge_status_t
nxge_cfg_ip_cls_flow_key_get(p_nxge_t nxgep,tcam_class_t l3_class,uint32_t * class_config)669 nxge_cfg_ip_cls_flow_key_get(p_nxge_t nxgep, tcam_class_t l3_class,
670 uint32_t *class_config)
671 {
672 flow_key_cfg_t fcfg;
673 npi_handle_t handle;
674 npi_status_t rs = NPI_SUCCESS;
675 uint32_t ccfg = 0;
676
677 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_cfg_ip_cls_flow_key_get"));
678 handle = nxgep->npi_reg_handle;
679 bzero(&fcfg, sizeof (flow_key_cfg_t));
680
681 rs = npi_fflp_cfg_ip_cls_flow_key_get(handle, l3_class, &fcfg);
682 if (rs & NPI_FFLP_ERROR) {
683 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_ip_cls_flow_key"
684 " opt %x for class %d failed ", class_config, l3_class));
685 return (NXGE_ERROR | rs);
686 }
687
688 if (fcfg.use_proto)
689 ccfg |= NXGE_CLASS_FLOW_USE_PROTO;
690 if (fcfg.use_dport)
691 ccfg |= NXGE_CLASS_FLOW_USE_DST_PORT;
692 if (fcfg.use_sport)
693 ccfg |= NXGE_CLASS_FLOW_USE_SRC_PORT;
694 if (fcfg.use_daddr)
695 ccfg |= NXGE_CLASS_FLOW_USE_IPDST;
696 if (fcfg.use_saddr)
697 ccfg |= NXGE_CLASS_FLOW_USE_IPSRC;
698 if (fcfg.use_vlan)
699 ccfg |= NXGE_CLASS_FLOW_USE_VLAN;
700 if (fcfg.use_l2da)
701 ccfg |= NXGE_CLASS_FLOW_USE_L2DA;
702 if (fcfg.use_portnum)
703 ccfg |= NXGE_CLASS_FLOW_USE_PORTNUM;
704
705 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
706 " nxge_cfg_ip_cls_flow_key_get %x", ccfg));
707 *class_config = ccfg;
708
709 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
710 " <== nxge_cfg_ip_cls_flow_key_get"));
711 return (NXGE_OK);
712 }
713
714 static nxge_status_t
nxge_cfg_tcam_ip_class_get(p_nxge_t nxgep,tcam_class_t class,uint32_t * class_config)715 nxge_cfg_tcam_ip_class_get(p_nxge_t nxgep, tcam_class_t class,
716 uint32_t *class_config)
717 {
718 npi_status_t rs = NPI_SUCCESS;
719 tcam_key_cfg_t cfg;
720 npi_handle_t handle;
721 uint32_t ccfg = 0;
722
723 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_cfg_tcam_ip_class"));
724
725 bzero(&cfg, sizeof (tcam_key_cfg_t));
726 handle = nxgep->npi_reg_handle;
727
728 rs = npi_fflp_cfg_ip_cls_tcam_key_get(handle, class, &cfg);
729 if (rs & NPI_FFLP_ERROR) {
730 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_tcam_ip_class"
731 " opt %x for class %d failed ", class_config, class));
732 return (NXGE_ERROR | rs);
733 }
734 if (cfg.discard)
735 ccfg |= NXGE_CLASS_DISCARD;
736 if (cfg.lookup_enable)
737 ccfg |= NXGE_CLASS_TCAM_LOOKUP;
738 if (cfg.use_ip_daddr)
739 ccfg |= NXGE_CLASS_TCAM_USE_SRC_ADDR;
740 *class_config = ccfg;
741 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
742 " ==> nxge_cfg_tcam_ip_class %x", ccfg));
743 return (NXGE_OK);
744 }
745
746 static nxge_status_t
nxge_cfg_tcam_ip_class(p_nxge_t nxgep,tcam_class_t class,uint32_t class_config)747 nxge_cfg_tcam_ip_class(p_nxge_t nxgep, tcam_class_t class,
748 uint32_t class_config)
749 {
750 npi_status_t rs = NPI_SUCCESS;
751 tcam_key_cfg_t cfg;
752 npi_handle_t handle;
753 p_nxge_class_pt_cfg_t p_class_cfgp;
754
755 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_cfg_tcam_ip_class"));
756
757 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
758 p_class_cfgp->class_cfg[class] = class_config;
759
760 bzero(&cfg, sizeof (tcam_key_cfg_t));
761 handle = nxgep->npi_reg_handle;
762 cfg.discard = 0;
763 cfg.lookup_enable = 0;
764 cfg.use_ip_daddr = 0;
765 if (class_config & NXGE_CLASS_DISCARD)
766 cfg.discard = 1;
767 if (class_config & NXGE_CLASS_TCAM_LOOKUP)
768 cfg.lookup_enable = 1;
769 if (class_config & NXGE_CLASS_TCAM_USE_SRC_ADDR)
770 cfg.use_ip_daddr = 1;
771
772 rs = npi_fflp_cfg_ip_cls_tcam_key(handle, class, &cfg);
773 if (rs & NPI_FFLP_ERROR) {
774 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_tcam_ip_class"
775 " opt %x for class %d failed ", class_config, class));
776 return (NXGE_ERROR | rs);
777 }
778 return (NXGE_OK);
779 }
780
781 nxge_status_t
nxge_fflp_set_hash1(p_nxge_t nxgep,uint32_t h1)782 nxge_fflp_set_hash1(p_nxge_t nxgep, uint32_t h1)
783 {
784 npi_status_t rs = NPI_SUCCESS;
785 npi_handle_t handle;
786 p_nxge_class_pt_cfg_t p_class_cfgp;
787
788 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_init_h1"));
789 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
790 p_class_cfgp->init_h1 = h1;
791 handle = nxgep->npi_reg_handle;
792 rs = npi_fflp_cfg_hash_h1poly(handle, h1);
793 if (rs & NPI_FFLP_ERROR) {
794 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
795 " nxge_fflp_init_h1 %x failed ", h1));
796 return (NXGE_ERROR | rs);
797 }
798 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_init_h1"));
799 return (NXGE_OK);
800 }
801
802 nxge_status_t
nxge_fflp_set_hash2(p_nxge_t nxgep,uint16_t h2)803 nxge_fflp_set_hash2(p_nxge_t nxgep, uint16_t h2)
804 {
805 npi_status_t rs = NPI_SUCCESS;
806 npi_handle_t handle;
807 p_nxge_class_pt_cfg_t p_class_cfgp;
808
809 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_init_h2"));
810 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
811 p_class_cfgp->init_h2 = h2;
812
813 handle = nxgep->npi_reg_handle;
814 rs = npi_fflp_cfg_hash_h2poly(handle, h2);
815 if (rs & NPI_FFLP_ERROR) {
816 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
817 " nxge_fflp_init_h2 %x failed ", h2));
818 return (NXGE_ERROR | rs);
819 }
820 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_init_h2"));
821 return (NXGE_OK);
822 }
823
824 nxge_status_t
nxge_classify_init_sw(p_nxge_t nxgep)825 nxge_classify_init_sw(p_nxge_t nxgep)
826 {
827 nxge_classify_t *classify_ptr;
828
829 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_init_sw"));
830 classify_ptr = &nxgep->classifier;
831
832 if (classify_ptr->state & NXGE_FFLP_SW_INIT) {
833 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
834 "nxge_classify_init_sw already init"));
835 return (NXGE_OK);
836 }
837
838 classify_ptr->tcam_size = nxgep->nxge_hw_p->tcam_size / nxgep->nports;
839 classify_ptr->tcam_entries = (tcam_flow_spec_t *)nxgep->nxge_hw_p->tcam;
840 classify_ptr->tcam_top = nxgep->function_num;
841
842 /* Init defaults */
843 /*
844 * add hacks required for HW shortcomings for example, code to handle
845 * fragmented packets
846 */
847 nxge_init_h1_table();
848 nxge_crc_ccitt_init();
849 nxgep->classifier.tcam_location = nxgep->function_num;
850 nxgep->classifier.fragment_bug = 1;
851 classify_ptr->state |= NXGE_FFLP_SW_INIT;
852
853 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_init_sw"));
854 return (NXGE_OK);
855 }
856
857 nxge_status_t
nxge_classify_exit_sw(p_nxge_t nxgep)858 nxge_classify_exit_sw(p_nxge_t nxgep)
859 {
860 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_exit_sw"));
861 nxgep->classifier.state = NULL;
862 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_exit_sw"));
863 return (NXGE_OK);
864 }
865
866 /*
867 * Figures out the RDC Group for the entry
868 *
869 * The current implementation is just a place holder and it
870 * returns 0.
871 * The real location determining algorithm would consider
872 * the partition etc ... before deciding w
873 *
874 */
875
876 /* ARGSUSED */
877 static uint8_t
nxge_get_rdc_group(p_nxge_t nxgep,uint8_t class,uint64_t cookie)878 nxge_get_rdc_group(p_nxge_t nxgep, uint8_t class, uint64_t cookie)
879 {
880 int use_port_rdc_grp = 0;
881 uint8_t rdc_grp = 0;
882 p_nxge_dma_pt_cfg_t p_dma_cfgp;
883 p_nxge_hw_pt_cfg_t p_cfgp;
884 p_nxge_rdc_grp_t rdc_grp_p;
885
886 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
887 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
888 rdc_grp_p = &p_dma_cfgp->rdc_grps[use_port_rdc_grp];
889 rdc_grp = p_cfgp->def_mac_rxdma_grpid;
890
891 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
892 "nxge_get_rdc_group: grp 0x%x real_grp %x grpp $%p\n",
893 cookie, rdc_grp, rdc_grp_p));
894 return (rdc_grp);
895 }
896
897 /* ARGSUSED */
898 static uint8_t
nxge_get_rdc_offset(p_nxge_t nxgep,uint8_t class,uint64_t cookie)899 nxge_get_rdc_offset(p_nxge_t nxgep, uint8_t class, uint64_t cookie)
900 {
901 return ((uint8_t)cookie);
902 }
903
904 /* ARGSUSED */
905 static void
nxge_fill_tcam_entry_udp(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr)906 nxge_fill_tcam_entry_udp(p_nxge_t nxgep, flow_spec_t *flow_spec,
907 tcam_entry_t *tcam_ptr)
908 {
909 udpip4_spec_t *fspec_key;
910 udpip4_spec_t *fspec_mask;
911
912 fspec_key = (udpip4_spec_t *)&flow_spec->uh.udpip4spec;
913 fspec_mask = (udpip4_spec_t *)&flow_spec->um.udpip4spec;
914 TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
915 TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
916 TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
917 TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
918 TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
919 fspec_key->pdst, fspec_key->psrc);
920 TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
921 fspec_mask->pdst, fspec_mask->psrc);
922 TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
923 tcam_ptr->ip4_class_mask,
924 TCAM_CLASS_UDP_IPV4);
925 TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
926 tcam_ptr->ip4_proto_mask,
927 IPPROTO_UDP);
928 tcam_ptr->ip4_tos_key = fspec_key->tos;
929 tcam_ptr->ip4_tos_mask = fspec_mask->tos;
930 }
931
932 static void
nxge_fill_tcam_entry_udp_ipv6(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr)933 nxge_fill_tcam_entry_udp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
934 tcam_entry_t *tcam_ptr)
935 {
936 udpip6_spec_t *fspec_key;
937 udpip6_spec_t *fspec_mask;
938 p_nxge_class_pt_cfg_t p_class_cfgp;
939
940 fspec_key = (udpip6_spec_t *)&flow_spec->uh.udpip6spec;
941 fspec_mask = (udpip6_spec_t *)&flow_spec->um.udpip6spec;
942 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
943 if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
944 NXGE_CLASS_TCAM_USE_SRC_ADDR) {
945 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
946 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
947 } else {
948 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
949 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
950 }
951
952 TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
953 tcam_ptr->ip6_class_mask, TCAM_CLASS_UDP_IPV6);
954 TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
955 tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_UDP);
956 TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
957 fspec_key->pdst, fspec_key->psrc);
958 TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
959 fspec_mask->pdst, fspec_mask->psrc);
960 tcam_ptr->ip6_tos_key = fspec_key->tos;
961 tcam_ptr->ip6_tos_mask = fspec_mask->tos;
962 }
963
964 /* ARGSUSED */
965 static void
nxge_fill_tcam_entry_tcp(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr)966 nxge_fill_tcam_entry_tcp(p_nxge_t nxgep, flow_spec_t *flow_spec,
967 tcam_entry_t *tcam_ptr)
968 {
969 tcpip4_spec_t *fspec_key;
970 tcpip4_spec_t *fspec_mask;
971
972 fspec_key = (tcpip4_spec_t *)&flow_spec->uh.tcpip4spec;
973 fspec_mask = (tcpip4_spec_t *)&flow_spec->um.tcpip4spec;
974
975 TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
976 TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
977 TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
978 TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
979 TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
980 fspec_key->pdst, fspec_key->psrc);
981 TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
982 fspec_mask->pdst, fspec_mask->psrc);
983 TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
984 tcam_ptr->ip4_class_mask, TCAM_CLASS_TCP_IPV4);
985 TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
986 tcam_ptr->ip4_proto_mask, IPPROTO_TCP);
987 tcam_ptr->ip4_tos_key = fspec_key->tos;
988 tcam_ptr->ip4_tos_mask = fspec_mask->tos;
989 }
990
991 /* ARGSUSED */
992 static void
nxge_fill_tcam_entry_sctp(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr)993 nxge_fill_tcam_entry_sctp(p_nxge_t nxgep, flow_spec_t *flow_spec,
994 tcam_entry_t *tcam_ptr)
995 {
996 tcpip4_spec_t *fspec_key;
997 tcpip4_spec_t *fspec_mask;
998
999 fspec_key = (tcpip4_spec_t *)&flow_spec->uh.tcpip4spec;
1000 fspec_mask = (tcpip4_spec_t *)&flow_spec->um.tcpip4spec;
1001
1002 TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
1003 TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
1004 TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
1005 TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
1006 TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
1007 tcam_ptr->ip4_class_mask, TCAM_CLASS_SCTP_IPV4);
1008 TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
1009 tcam_ptr->ip4_proto_mask, IPPROTO_SCTP);
1010 TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
1011 fspec_key->pdst, fspec_key->psrc);
1012 TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
1013 fspec_mask->pdst, fspec_mask->psrc);
1014 tcam_ptr->ip4_tos_key = fspec_key->tos;
1015 tcam_ptr->ip4_tos_mask = fspec_mask->tos;
1016 }
1017
1018 static void
nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr)1019 nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
1020 tcam_entry_t *tcam_ptr)
1021 {
1022 tcpip6_spec_t *fspec_key;
1023 tcpip6_spec_t *fspec_mask;
1024 p_nxge_class_pt_cfg_t p_class_cfgp;
1025
1026 fspec_key = (tcpip6_spec_t *)&flow_spec->uh.tcpip6spec;
1027 fspec_mask = (tcpip6_spec_t *)&flow_spec->um.tcpip6spec;
1028
1029 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1030 if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
1031 NXGE_CLASS_TCAM_USE_SRC_ADDR) {
1032 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
1033 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
1034 } else {
1035 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
1036 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
1037 }
1038
1039 TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
1040 tcam_ptr->ip6_class_mask, TCAM_CLASS_TCP_IPV6);
1041 TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
1042 tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_TCP);
1043 TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
1044 fspec_key->pdst, fspec_key->psrc);
1045 TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
1046 fspec_mask->pdst, fspec_mask->psrc);
1047 tcam_ptr->ip6_tos_key = fspec_key->tos;
1048 tcam_ptr->ip6_tos_mask = fspec_mask->tos;
1049 }
1050
1051 static void
nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr)1052 nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
1053 tcam_entry_t *tcam_ptr)
1054 {
1055 tcpip6_spec_t *fspec_key;
1056 tcpip6_spec_t *fspec_mask;
1057 p_nxge_class_pt_cfg_t p_class_cfgp;
1058
1059 fspec_key = (tcpip6_spec_t *)&flow_spec->uh.tcpip6spec;
1060 fspec_mask = (tcpip6_spec_t *)&flow_spec->um.tcpip6spec;
1061 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1062
1063 if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
1064 NXGE_CLASS_TCAM_USE_SRC_ADDR) {
1065 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
1066 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
1067 } else {
1068 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
1069 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
1070 }
1071
1072 TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
1073 tcam_ptr->ip6_class_mask, TCAM_CLASS_SCTP_IPV6);
1074 TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
1075 tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_SCTP);
1076 TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
1077 fspec_key->pdst, fspec_key->psrc);
1078 TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
1079 fspec_mask->pdst, fspec_mask->psrc);
1080 tcam_ptr->ip6_tos_key = fspec_key->tos;
1081 tcam_ptr->ip6_tos_mask = fspec_mask->tos;
1082 }
1083
1084 /* ARGSUSED */
1085 static void
nxge_fill_tcam_entry_ah_esp(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr)1086 nxge_fill_tcam_entry_ah_esp(p_nxge_t nxgep, flow_spec_t *flow_spec,
1087 tcam_entry_t *tcam_ptr)
1088 {
1089 ahip4_spec_t *fspec_key;
1090 ahip4_spec_t *fspec_mask;
1091
1092 fspec_key = (ahip4_spec_t *)&flow_spec->uh.ahip4spec;
1093 fspec_mask = (ahip4_spec_t *)&flow_spec->um.ahip4spec;
1094
1095 TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
1096 TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
1097 TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
1098 TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
1099
1100 tcam_ptr->ip4_port_key = fspec_key->spi;
1101 tcam_ptr->ip4_port_mask = fspec_mask->spi;
1102
1103 TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
1104 tcam_ptr->ip4_class_mask,
1105 TCAM_CLASS_AH_ESP_IPV4);
1106
1107 if (flow_spec->flow_type == FSPEC_AHIP4) {
1108 TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
1109 tcam_ptr->ip4_proto_mask, IPPROTO_AH);
1110 } else {
1111 TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
1112 tcam_ptr->ip4_proto_mask, IPPROTO_ESP);
1113 }
1114 tcam_ptr->ip4_tos_key = fspec_key->tos;
1115 tcam_ptr->ip4_tos_mask = fspec_mask->tos;
1116 }
1117
1118 static void
nxge_fill_tcam_entry_ah_esp_ipv6(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr)1119 nxge_fill_tcam_entry_ah_esp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
1120 tcam_entry_t *tcam_ptr)
1121 {
1122 ahip6_spec_t *fspec_key;
1123 ahip6_spec_t *fspec_mask;
1124 p_nxge_class_pt_cfg_t p_class_cfgp;
1125
1126 fspec_key = (ahip6_spec_t *)&flow_spec->uh.ahip6spec;
1127 fspec_mask = (ahip6_spec_t *)&flow_spec->um.ahip6spec;
1128
1129 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1130 if (p_class_cfgp->class_cfg[TCAM_CLASS_AH_ESP_IPV6] &
1131 NXGE_CLASS_TCAM_USE_SRC_ADDR) {
1132 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6src);
1133 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6src);
1134 } else {
1135 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key->ip6dst);
1136 TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask->ip6dst);
1137 }
1138
1139 TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
1140 tcam_ptr->ip6_class_mask, TCAM_CLASS_AH_ESP_IPV6);
1141
1142 if (flow_spec->flow_type == FSPEC_AHIP6) {
1143 TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
1144 tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_AH);
1145 } else {
1146 TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
1147 tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_ESP);
1148 }
1149 tcam_ptr->ip6_port_key = fspec_key->spi;
1150 tcam_ptr->ip6_port_mask = fspec_mask->spi;
1151 tcam_ptr->ip6_tos_key = fspec_key->tos;
1152 tcam_ptr->ip6_tos_mask = fspec_mask->tos;
1153 }
1154
1155 /* ARGSUSED */
1156 static void
nxge_fill_tcam_entry_ip_usr(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr,tcam_class_t class)1157 nxge_fill_tcam_entry_ip_usr(p_nxge_t nxgep, flow_spec_t *flow_spec,
1158 tcam_entry_t *tcam_ptr, tcam_class_t class)
1159 {
1160 ip_user_spec_t *fspec_key;
1161 ip_user_spec_t *fspec_mask;
1162
1163 fspec_key = (ip_user_spec_t *)&flow_spec->uh.ip_usr_spec;
1164 fspec_mask = (ip_user_spec_t *)&flow_spec->um.ip_usr_spec;
1165
1166 if (fspec_key->ip_ver == FSPEC_IP4) {
1167 TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key->ip4dst);
1168 TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask->ip4dst);
1169 TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key->ip4src);
1170 TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask->ip4src);
1171
1172 tcam_ptr->ip4_port_key = fspec_key->l4_4_bytes;
1173 tcam_ptr->ip4_port_mask = fspec_mask->l4_4_bytes;
1174
1175 TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
1176 tcam_ptr->ip4_class_mask, class);
1177
1178 tcam_ptr->ip4_proto_key = fspec_key->proto;
1179 tcam_ptr->ip4_proto_mask = fspec_mask->proto;
1180
1181 tcam_ptr->ip4_tos_key = fspec_key->tos;
1182 tcam_ptr->ip4_tos_mask = fspec_mask->tos;
1183 }
1184 }
1185
1186
1187 nxge_status_t
nxge_flow_get_hash(p_nxge_t nxgep,flow_resource_t * flow_res,uint32_t * H1,uint16_t * H2)1188 nxge_flow_get_hash(p_nxge_t nxgep, flow_resource_t *flow_res,
1189 uint32_t *H1, uint16_t *H2)
1190 {
1191 flow_spec_t *flow_spec;
1192 uint32_t class_cfg;
1193 flow_template_t ft;
1194 p_nxge_class_pt_cfg_t p_class_cfgp;
1195
1196 int ft_size = sizeof (flow_template_t);
1197
1198 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_flow_get_hash"));
1199
1200 flow_spec = (flow_spec_t *)&flow_res->flow_spec;
1201 bzero((char *)&ft, ft_size);
1202 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1203
1204 switch (flow_spec->flow_type) {
1205 case FSPEC_TCPIP4:
1206 class_cfg = p_class_cfgp->class_cfg[TCAM_CLASS_TCP_IPV4];
1207 if (class_cfg & NXGE_CLASS_FLOW_USE_PROTO)
1208 ft.ip_proto = IPPROTO_TCP;
1209 if (class_cfg & NXGE_CLASS_FLOW_USE_IPSRC)
1210 ft.ip4_saddr = flow_res->flow_spec.uh.tcpip4spec.ip4src;
1211 if (class_cfg & NXGE_CLASS_FLOW_USE_IPDST)
1212 ft.ip4_daddr = flow_res->flow_spec.uh.tcpip4spec.ip4dst;
1213 if (class_cfg & NXGE_CLASS_FLOW_USE_SRC_PORT)
1214 ft.ip_src_port = flow_res->flow_spec.uh.tcpip4spec.psrc;
1215 if (class_cfg & NXGE_CLASS_FLOW_USE_DST_PORT)
1216 ft.ip_dst_port = flow_res->flow_spec.uh.tcpip4spec.pdst;
1217 break;
1218
1219 case FSPEC_UDPIP4:
1220 class_cfg = p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV4];
1221 if (class_cfg & NXGE_CLASS_FLOW_USE_PROTO)
1222 ft.ip_proto = IPPROTO_UDP;
1223 if (class_cfg & NXGE_CLASS_FLOW_USE_IPSRC)
1224 ft.ip4_saddr = flow_res->flow_spec.uh.udpip4spec.ip4src;
1225 if (class_cfg & NXGE_CLASS_FLOW_USE_IPDST)
1226 ft.ip4_daddr = flow_res->flow_spec.uh.udpip4spec.ip4dst;
1227 if (class_cfg & NXGE_CLASS_FLOW_USE_SRC_PORT)
1228 ft.ip_src_port = flow_res->flow_spec.uh.udpip4spec.psrc;
1229 if (class_cfg & NXGE_CLASS_FLOW_USE_DST_PORT)
1230 ft.ip_dst_port = flow_res->flow_spec.uh.udpip4spec.pdst;
1231 break;
1232
1233 default:
1234 return (NXGE_ERROR);
1235 }
1236
1237 *H1 = nxge_compute_h1(p_class_cfgp->init_h1,
1238 (uint32_t *)&ft, ft_size) & 0xfffff;
1239 *H2 = nxge_compute_h2(p_class_cfgp->init_h2,
1240 (uint8_t *)&ft, ft_size);
1241
1242 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_flow_get_hash"));
1243 return (NXGE_OK);
1244 }
1245
1246 nxge_status_t
nxge_add_fcram_entry(p_nxge_t nxgep,flow_resource_t * flow_res)1247 nxge_add_fcram_entry(p_nxge_t nxgep, flow_resource_t *flow_res)
1248 {
1249 uint32_t H1;
1250 uint16_t H2;
1251 nxge_status_t status = NXGE_OK;
1252
1253 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_add_fcram_entry"));
1254 status = nxge_flow_get_hash(nxgep, flow_res, &H1, &H2);
1255 if (status != NXGE_OK) {
1256 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1257 " nxge_add_fcram_entry failed "));
1258 return (status);
1259 }
1260
1261 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_add_fcram_entry"));
1262 return (NXGE_OK);
1263 }
1264
1265 /*
1266 * Already decided this flow goes into the tcam
1267 */
1268
1269 nxge_status_t
nxge_add_tcam_entry(p_nxge_t nxgep,flow_resource_t * flow_res)1270 nxge_add_tcam_entry(p_nxge_t nxgep, flow_resource_t *flow_res)
1271 {
1272 npi_handle_t handle;
1273 uint64_t channel_cookie;
1274 uint64_t flow_cookie;
1275 flow_spec_t *flow_spec;
1276 npi_status_t rs = NPI_SUCCESS;
1277 tcam_entry_t tcam_ptr;
1278 tcam_location_t location;
1279 uint8_t offset, rdc_grp;
1280 p_nxge_hw_list_t hw_p;
1281 uint64_t class;
1282
1283 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_add_tcam_entry"));
1284 handle = nxgep->npi_reg_handle;
1285
1286 bzero((void *)&tcam_ptr, sizeof (tcam_entry_t));
1287 flow_spec = (flow_spec_t *)&flow_res->flow_spec;
1288 flow_cookie = flow_res->flow_cookie;
1289 channel_cookie = flow_res->channel_cookie;
1290 location = (tcam_location_t)nxge_tcam_get_index(nxgep,
1291 (uint16_t)flow_res->location);
1292
1293 if ((hw_p = nxgep->nxge_hw_p) == NULL) {
1294 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1295 " nxge_add_tcam_entry: common hardware not set",
1296 nxgep->niu_type));
1297 return (NXGE_ERROR);
1298 }
1299
1300 if (flow_spec->flow_type == FSPEC_IP_USR) {
1301 int i;
1302 int add_usr_cls = 0;
1303 int ipv6 = 0;
1304 ip_user_spec_t *uspec = &flow_spec->uh.ip_usr_spec;
1305 ip_user_spec_t *umask = &flow_spec->um.ip_usr_spec;
1306 nxge_usr_l3_cls_t *l3_ucls_p;
1307
1308 MUTEX_ENTER(&hw_p->nxge_tcam_lock);
1309
1310 for (i = 0; i < NXGE_L3_PROG_CLS; i++) {
1311 l3_ucls_p = &hw_p->tcam_l3_prog_cls[i];
1312 if (l3_ucls_p->valid && l3_ucls_p->tcam_ref_cnt) {
1313 if (uspec->proto == l3_ucls_p->pid) {
1314 class = l3_ucls_p->cls;
1315 l3_ucls_p->tcam_ref_cnt++;
1316 add_usr_cls = 1;
1317 break;
1318 }
1319 } else if (l3_ucls_p->valid == 0) {
1320 /* Program new user IP class */
1321 switch (i) {
1322 case 0:
1323 class = TCAM_CLASS_IP_USER_4;
1324 break;
1325 case 1:
1326 class = TCAM_CLASS_IP_USER_5;
1327 break;
1328 case 2:
1329 class = TCAM_CLASS_IP_USER_6;
1330 break;
1331 case 3:
1332 class = TCAM_CLASS_IP_USER_7;
1333 break;
1334 default:
1335 break;
1336 }
1337 if (uspec->ip_ver == FSPEC_IP6)
1338 ipv6 = 1;
1339 rs = npi_fflp_cfg_ip_usr_cls_set(handle,
1340 (tcam_class_t)class, uspec->tos,
1341 umask->tos, uspec->proto, ipv6);
1342 if (rs != NPI_SUCCESS)
1343 goto fail;
1344
1345 rs = npi_fflp_cfg_ip_usr_cls_enable(handle,
1346 (tcam_class_t)class);
1347 if (rs != NPI_SUCCESS)
1348 goto fail;
1349
1350 l3_ucls_p->cls = class;
1351 l3_ucls_p->pid = uspec->proto;
1352 l3_ucls_p->tcam_ref_cnt++;
1353 l3_ucls_p->valid = 1;
1354 add_usr_cls = 1;
1355 break;
1356 } else if (l3_ucls_p->tcam_ref_cnt == 0 &&
1357 uspec->proto == l3_ucls_p->pid) {
1358 /*
1359 * The class has already been programmed,
1360 * probably for flow hash
1361 */
1362 class = l3_ucls_p->cls;
1363 if (uspec->ip_ver == FSPEC_IP6)
1364 ipv6 = 1;
1365 rs = npi_fflp_cfg_ip_usr_cls_set(handle,
1366 (tcam_class_t)class, uspec->tos,
1367 umask->tos, uspec->proto, ipv6);
1368 if (rs != NPI_SUCCESS)
1369 goto fail;
1370
1371 rs = npi_fflp_cfg_ip_usr_cls_enable(handle,
1372 (tcam_class_t)class);
1373 if (rs != NPI_SUCCESS)
1374 goto fail;
1375
1376 l3_ucls_p->pid = uspec->proto;
1377 l3_ucls_p->tcam_ref_cnt++;
1378 add_usr_cls = 1;
1379 break;
1380 }
1381 }
1382 if (!add_usr_cls) {
1383 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1384 "nxge_add_tcam_entry: Could not find/insert class"
1385 "for pid %d", uspec->proto));
1386 goto fail;
1387 }
1388 MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1389 }
1390
1391 switch (flow_spec->flow_type) {
1392 case FSPEC_TCPIP4:
1393 nxge_fill_tcam_entry_tcp(nxgep, flow_spec, &tcam_ptr);
1394 rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV4,
1395 flow_cookie);
1396 offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV4,
1397 channel_cookie);
1398 break;
1399
1400 case FSPEC_UDPIP4:
1401 nxge_fill_tcam_entry_udp(nxgep, flow_spec, &tcam_ptr);
1402 rdc_grp = nxge_get_rdc_group(nxgep,
1403 TCAM_CLASS_UDP_IPV4,
1404 flow_cookie);
1405 offset = nxge_get_rdc_offset(nxgep,
1406 TCAM_CLASS_UDP_IPV4,
1407 channel_cookie);
1408 break;
1409
1410 case FSPEC_TCPIP6:
1411 nxge_fill_tcam_entry_tcp_ipv6(nxgep,
1412 flow_spec, &tcam_ptr);
1413 rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV6,
1414 flow_cookie);
1415 offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV6,
1416 channel_cookie);
1417 break;
1418
1419 case FSPEC_UDPIP6:
1420 nxge_fill_tcam_entry_udp_ipv6(nxgep,
1421 flow_spec, &tcam_ptr);
1422 rdc_grp = nxge_get_rdc_group(nxgep,
1423 TCAM_CLASS_UDP_IPV6,
1424 flow_cookie);
1425 offset = nxge_get_rdc_offset(nxgep,
1426 TCAM_CLASS_UDP_IPV6,
1427 channel_cookie);
1428 break;
1429
1430 case FSPEC_SCTPIP4:
1431 nxge_fill_tcam_entry_sctp(nxgep, flow_spec, &tcam_ptr);
1432 rdc_grp = nxge_get_rdc_group(nxgep,
1433 TCAM_CLASS_SCTP_IPV4,
1434 flow_cookie);
1435 offset = nxge_get_rdc_offset(nxgep,
1436 TCAM_CLASS_SCTP_IPV4,
1437 channel_cookie);
1438 break;
1439
1440 case FSPEC_SCTPIP6:
1441 nxge_fill_tcam_entry_sctp_ipv6(nxgep,
1442 flow_spec, &tcam_ptr);
1443 rdc_grp = nxge_get_rdc_group(nxgep,
1444 TCAM_CLASS_SCTP_IPV6,
1445 flow_cookie);
1446 offset = nxge_get_rdc_offset(nxgep,
1447 TCAM_CLASS_SCTP_IPV6,
1448 channel_cookie);
1449 break;
1450
1451 case FSPEC_AHIP4:
1452 case FSPEC_ESPIP4:
1453 nxge_fill_tcam_entry_ah_esp(nxgep, flow_spec, &tcam_ptr);
1454 rdc_grp = nxge_get_rdc_group(nxgep,
1455 TCAM_CLASS_AH_ESP_IPV4,
1456 flow_cookie);
1457 offset = nxge_get_rdc_offset(nxgep,
1458 TCAM_CLASS_AH_ESP_IPV4,
1459 channel_cookie);
1460 break;
1461
1462 case FSPEC_AHIP6:
1463 case FSPEC_ESPIP6:
1464 nxge_fill_tcam_entry_ah_esp_ipv6(nxgep,
1465 flow_spec, &tcam_ptr);
1466 rdc_grp = nxge_get_rdc_group(nxgep,
1467 TCAM_CLASS_AH_ESP_IPV6,
1468 flow_cookie);
1469 offset = nxge_get_rdc_offset(nxgep,
1470 TCAM_CLASS_AH_ESP_IPV6,
1471 channel_cookie);
1472 break;
1473
1474 case FSPEC_IP_USR:
1475 nxge_fill_tcam_entry_ip_usr(nxgep, flow_spec, &tcam_ptr,
1476 (tcam_class_t)class);
1477 rdc_grp = nxge_get_rdc_group(nxgep,
1478 (tcam_class_t)class, flow_cookie);
1479 offset = nxge_get_rdc_offset(nxgep,
1480 (tcam_class_t)class, channel_cookie);
1481 break;
1482 default:
1483 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1484 "nxge_add_tcam_entry: Unknown flow spec 0x%x",
1485 flow_spec->flow_type));
1486 return (NXGE_ERROR);
1487 }
1488
1489 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1490 " nxge_add_tcam_entry write"
1491 " for location %d offset %d", location, offset));
1492
1493 MUTEX_ENTER(&hw_p->nxge_tcam_lock);
1494 rs = npi_fflp_tcam_entry_write(handle, location, &tcam_ptr);
1495
1496 if (rs & NPI_FFLP_ERROR) {
1497 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1498 " nxge_add_tcam_entry write"
1499 " failed for location %d", location));
1500 goto fail;
1501 }
1502
1503 tcam_ptr.match_action.value = 0;
1504 tcam_ptr.match_action.bits.ldw.rdctbl = rdc_grp;
1505 tcam_ptr.match_action.bits.ldw.offset = offset;
1506 tcam_ptr.match_action.bits.ldw.tres =
1507 TRES_TERM_OVRD_L2RDC;
1508 if (channel_cookie == NXGE_PKT_DISCARD)
1509 tcam_ptr.match_action.bits.ldw.disc = 1;
1510 rs = npi_fflp_tcam_asc_ram_entry_write(handle,
1511 location, tcam_ptr.match_action.value);
1512 if (rs & NPI_FFLP_ERROR) {
1513 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1514 " nxge_add_tcam_entry write"
1515 " failed for ASC RAM location %d", location));
1516 goto fail;
1517 }
1518 bcopy((void *) &tcam_ptr,
1519 (void *) &nxgep->classifier.tcam_entries[location].tce,
1520 sizeof (tcam_entry_t));
1521 nxgep->classifier.tcam_entry_cnt++;
1522 nxgep->classifier.tcam_entries[location].valid = 1;
1523
1524 MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1525 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_add_tcam_entry"));
1526 return (NXGE_OK);
1527 fail:
1528 MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1529 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_add_tcam_entry FAILED"));
1530 return (NXGE_ERROR);
1531 }
1532
1533 static nxge_status_t
nxge_tcam_handle_ip_fragment(p_nxge_t nxgep)1534 nxge_tcam_handle_ip_fragment(p_nxge_t nxgep)
1535 {
1536 tcam_entry_t tcam_ptr;
1537 tcam_location_t location;
1538 uint8_t class;
1539 uint32_t class_config;
1540 npi_handle_t handle;
1541 npi_status_t rs = NPI_SUCCESS;
1542 p_nxge_hw_list_t hw_p;
1543 nxge_status_t status = NXGE_OK;
1544
1545 handle = nxgep->npi_reg_handle;
1546 class = 0;
1547 bzero((void *)&tcam_ptr, sizeof (tcam_entry_t));
1548 tcam_ptr.ip4_noport_key = 1;
1549 tcam_ptr.ip4_noport_mask = 1;
1550 location = nxgep->function_num;
1551 nxgep->classifier.fragment_bug_location = location;
1552
1553 if ((hw_p = nxgep->nxge_hw_p) == NULL) {
1554 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1555 " nxge_tcam_handle_ip_fragment: common hardware not set",
1556 nxgep->niu_type));
1557 return (NXGE_ERROR);
1558 }
1559 MUTEX_ENTER(&hw_p->nxge_tcam_lock);
1560 rs = npi_fflp_tcam_entry_write(handle,
1561 location, &tcam_ptr);
1562
1563 if (rs & NPI_FFLP_ERROR) {
1564 MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1565 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1566 " nxge_tcam_handle_ip_fragment "
1567 " tcam_entry write"
1568 " failed for location %d", location));
1569 return (NXGE_ERROR);
1570 }
1571 tcam_ptr.match_action.bits.ldw.rdctbl = nxgep->class_config.mac_rdcgrp;
1572 tcam_ptr.match_action.bits.ldw.offset = 0; /* use the default */
1573 tcam_ptr.match_action.bits.ldw.tres =
1574 TRES_TERM_USE_OFFSET;
1575 rs = npi_fflp_tcam_asc_ram_entry_write(handle,
1576 location, tcam_ptr.match_action.value);
1577
1578 if (rs & NPI_FFLP_ERROR) {
1579 MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1580 NXGE_DEBUG_MSG((nxgep,
1581 FFLP_CTL,
1582 " nxge_tcam_handle_ip_fragment "
1583 " tcam_entry write"
1584 " failed for ASC RAM location %d", location));
1585 return (NXGE_ERROR);
1586 }
1587 bcopy((void *) &tcam_ptr,
1588 (void *) &nxgep->classifier.tcam_entries[location].tce,
1589 sizeof (tcam_entry_t));
1590 nxgep->classifier.tcam_entry_cnt++;
1591 nxgep->classifier.tcam_entries[location].valid = 1;
1592 for (class = TCAM_CLASS_TCP_IPV4;
1593 class <= TCAM_CLASS_SCTP_IPV6; class++) {
1594 class_config = nxgep->class_config.class_cfg[class];
1595 class_config |= NXGE_CLASS_TCAM_LOOKUP;
1596 status = nxge_fflp_ip_class_config(nxgep, class, class_config);
1597
1598 if (status & NPI_FFLP_ERROR) {
1599 MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1600 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1601 "nxge_tcam_handle_ip_fragment "
1602 "nxge_fflp_ip_class_config failed "
1603 " class %d config %x ", class, class_config));
1604 return (NXGE_ERROR);
1605 }
1606 }
1607
1608 rs = npi_fflp_cfg_tcam_enable(handle);
1609 if (rs & NPI_FFLP_ERROR) {
1610 MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1611 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1612 "nxge_tcam_handle_ip_fragment "
1613 " nxge_fflp_config_tcam_enable failed"));
1614 return (NXGE_ERROR);
1615 }
1616 MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1617 return (NXGE_OK);
1618 }
1619
1620 /* ARGSUSED */
1621 static int
nxge_flow_need_hash_lookup(p_nxge_t nxgep,flow_resource_t * flow_res)1622 nxge_flow_need_hash_lookup(p_nxge_t nxgep, flow_resource_t *flow_res)
1623 {
1624 return (0);
1625 }
1626
1627 nxge_status_t
nxge_add_flow(p_nxge_t nxgep,flow_resource_t * flow_res)1628 nxge_add_flow(p_nxge_t nxgep, flow_resource_t *flow_res)
1629 {
1630
1631 int insert_hash = 0;
1632 nxge_status_t status = NXGE_OK;
1633
1634 if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1635 /* determine whether to do TCAM or Hash flow */
1636 insert_hash = nxge_flow_need_hash_lookup(nxgep, flow_res);
1637 }
1638 if (insert_hash) {
1639 status = nxge_add_fcram_entry(nxgep, flow_res);
1640 } else {
1641 status = nxge_add_tcam_entry(nxgep, flow_res);
1642 }
1643 return (status);
1644 }
1645
1646 void
nxge_put_tcam(p_nxge_t nxgep,p_mblk_t mp)1647 nxge_put_tcam(p_nxge_t nxgep, p_mblk_t mp)
1648 {
1649 flow_resource_t *fs;
1650
1651 fs = (flow_resource_t *)mp->b_rptr;
1652 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1653 "nxge_put_tcam addr fs $%p type %x offset %x",
1654 fs, fs->flow_spec.flow_type, fs->channel_cookie));
1655 (void) nxge_add_tcam_entry(nxgep, fs);
1656 }
1657
1658 nxge_status_t
nxge_fflp_config_tcam_enable(p_nxge_t nxgep)1659 nxge_fflp_config_tcam_enable(p_nxge_t nxgep)
1660 {
1661 npi_handle_t handle = nxgep->npi_reg_handle;
1662 npi_status_t rs = NPI_SUCCESS;
1663
1664 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_config_tcam_enable"));
1665 rs = npi_fflp_cfg_tcam_enable(handle);
1666 if (rs & NPI_FFLP_ERROR) {
1667 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1668 " nxge_fflp_config_tcam_enable failed"));
1669 return (NXGE_ERROR | rs);
1670 }
1671 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_config_tcam_enable"));
1672 return (NXGE_OK);
1673 }
1674
1675 nxge_status_t
nxge_fflp_config_tcam_disable(p_nxge_t nxgep)1676 nxge_fflp_config_tcam_disable(p_nxge_t nxgep)
1677 {
1678 npi_handle_t handle = nxgep->npi_reg_handle;
1679 npi_status_t rs = NPI_SUCCESS;
1680
1681 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1682 " ==> nxge_fflp_config_tcam_disable"));
1683 rs = npi_fflp_cfg_tcam_disable(handle);
1684 if (rs & NPI_FFLP_ERROR) {
1685 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1686 " nxge_fflp_config_tcam_disable failed"));
1687 return (NXGE_ERROR | rs);
1688 }
1689 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1690 " <== nxge_fflp_config_tcam_disable"));
1691 return (NXGE_OK);
1692 }
1693
1694 nxge_status_t
nxge_fflp_config_hash_lookup_enable(p_nxge_t nxgep)1695 nxge_fflp_config_hash_lookup_enable(p_nxge_t nxgep)
1696 {
1697 npi_handle_t handle = nxgep->npi_reg_handle;
1698 npi_status_t rs = NPI_SUCCESS;
1699 p_nxge_dma_pt_cfg_t p_dma_cfgp;
1700 p_nxge_hw_pt_cfg_t p_cfgp;
1701 uint8_t partition;
1702
1703 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1704 " ==> nxge_fflp_config_hash_lookup_enable"));
1705 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
1706 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
1707
1708 for (partition = 0; partition < NXGE_MAX_RDC_GROUPS; partition++) {
1709 if (p_cfgp->grpids[partition]) {
1710 rs = npi_fflp_cfg_fcram_partition_enable(
1711 handle, partition);
1712 if (rs != NPI_SUCCESS) {
1713 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1714 " nxge_fflp_config_hash_lookup_enable"
1715 "failed FCRAM partition"
1716 " enable for partition %d ", partition));
1717 return (NXGE_ERROR | rs);
1718 }
1719 }
1720 }
1721
1722 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1723 " <== nxge_fflp_config_hash_lookup_enable"));
1724 return (NXGE_OK);
1725 }
1726
1727 nxge_status_t
nxge_fflp_config_hash_lookup_disable(p_nxge_t nxgep)1728 nxge_fflp_config_hash_lookup_disable(p_nxge_t nxgep)
1729 {
1730 npi_handle_t handle = nxgep->npi_reg_handle;
1731 npi_status_t rs = NPI_SUCCESS;
1732 p_nxge_dma_pt_cfg_t p_dma_cfgp;
1733 p_nxge_hw_pt_cfg_t p_cfgp;
1734 uint8_t partition;
1735
1736 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1737 " ==> nxge_fflp_config_hash_lookup_disable"));
1738 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
1739 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
1740
1741 for (partition = 0; partition < NXGE_MAX_RDC_GROUPS; partition++) {
1742 if (p_cfgp->grpids[partition]) {
1743 rs = npi_fflp_cfg_fcram_partition_disable(handle,
1744 partition);
1745 if (rs != NPI_SUCCESS) {
1746 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1747 " nxge_fflp_config_hash_lookup_disable"
1748 " failed FCRAM partition"
1749 " disable for partition %d ", partition));
1750 return (NXGE_ERROR | rs);
1751 }
1752 }
1753 }
1754
1755 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1756 " <== nxge_fflp_config_hash_lookup_disable"));
1757 return (NXGE_OK);
1758 }
1759
1760 nxge_status_t
nxge_fflp_config_llc_snap_enable(p_nxge_t nxgep)1761 nxge_fflp_config_llc_snap_enable(p_nxge_t nxgep)
1762 {
1763 npi_handle_t handle = nxgep->npi_reg_handle;
1764 npi_status_t rs = NPI_SUCCESS;
1765
1766 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1767 " ==> nxge_fflp_config_llc_snap_enable"));
1768 rs = npi_fflp_cfg_llcsnap_enable(handle);
1769 if (rs & NPI_FFLP_ERROR) {
1770 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1771 " nxge_fflp_config_llc_snap_enable failed"));
1772 return (NXGE_ERROR | rs);
1773 }
1774 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1775 " <== nxge_fflp_config_llc_snap_enable"));
1776 return (NXGE_OK);
1777 }
1778
1779 nxge_status_t
nxge_fflp_config_llc_snap_disable(p_nxge_t nxgep)1780 nxge_fflp_config_llc_snap_disable(p_nxge_t nxgep)
1781 {
1782 npi_handle_t handle = nxgep->npi_reg_handle;
1783 npi_status_t rs = NPI_SUCCESS;
1784
1785 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1786 " ==> nxge_fflp_config_llc_snap_disable"));
1787 rs = npi_fflp_cfg_llcsnap_disable(handle);
1788 if (rs & NPI_FFLP_ERROR) {
1789 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1790 " nxge_fflp_config_llc_snap_disable failed"));
1791 return (NXGE_ERROR | rs);
1792 }
1793 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1794 " <== nxge_fflp_config_llc_snap_disable"));
1795 return (NXGE_OK);
1796 }
1797
1798 nxge_status_t
nxge_fflp_ip_usr_class_config(p_nxge_t nxgep,tcam_class_t class,uint32_t config)1799 nxge_fflp_ip_usr_class_config(p_nxge_t nxgep, tcam_class_t class,
1800 uint32_t config)
1801 {
1802 npi_status_t rs = NPI_SUCCESS;
1803 npi_handle_t handle = nxgep->npi_reg_handle;
1804 uint8_t tos, tos_mask, proto, ver = 0;
1805 uint8_t class_enable = 0;
1806
1807 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_ip_usr_class_config"));
1808
1809 tos = (config & NXGE_CLASS_CFG_IP_TOS_MASK) >>
1810 NXGE_CLASS_CFG_IP_TOS_SHIFT;
1811 tos_mask = (config & NXGE_CLASS_CFG_IP_TOS_MASK_MASK) >>
1812 NXGE_CLASS_CFG_IP_TOS_MASK_SHIFT;
1813 proto = (config & NXGE_CLASS_CFG_IP_PROTO_MASK) >>
1814 NXGE_CLASS_CFG_IP_PROTO_SHIFT;
1815 if (config & NXGE_CLASS_CFG_IP_IPV6_MASK)
1816 ver = 1;
1817 if (config & NXGE_CLASS_CFG_IP_ENABLE_MASK)
1818 class_enable = 1;
1819 rs = npi_fflp_cfg_ip_usr_cls_set(handle, class, tos, tos_mask,
1820 proto, ver);
1821 if (rs & NPI_FFLP_ERROR) {
1822 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1823 " nxge_fflp_ip_usr_class_config"
1824 " for class %d failed ", class));
1825 return (NXGE_ERROR | rs);
1826 }
1827 if (class_enable)
1828 rs = npi_fflp_cfg_ip_usr_cls_enable(handle, class);
1829 else
1830 rs = npi_fflp_cfg_ip_usr_cls_disable(handle, class);
1831
1832 if (rs & NPI_FFLP_ERROR) {
1833 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1834 " nxge_fflp_ip_usr_class_config"
1835 " TCAM enable/disable for class %d failed ", class));
1836 return (NXGE_ERROR | rs);
1837 }
1838 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_usr_class_config"));
1839 return (NXGE_OK);
1840 }
1841
1842 nxge_status_t
nxge_fflp_ip_class_config(p_nxge_t nxgep,tcam_class_t class,uint32_t config)1843 nxge_fflp_ip_class_config(p_nxge_t nxgep, tcam_class_t class, uint32_t config)
1844 {
1845 uint32_t class_config;
1846 nxge_status_t t_status = NXGE_OK;
1847 nxge_status_t f_status = NXGE_OK;
1848 p_nxge_class_pt_cfg_t p_class_cfgp;
1849
1850 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_ip_class_config"));
1851
1852 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1853 class_config = p_class_cfgp->class_cfg[class];
1854
1855 if (class_config != config) {
1856 p_class_cfgp->class_cfg[class] = config;
1857 class_config = config;
1858 }
1859
1860 t_status = nxge_cfg_tcam_ip_class(nxgep, class, class_config);
1861 f_status = nxge_cfg_ip_cls_flow_key(nxgep, class, class_config);
1862
1863 if (t_status & NPI_FFLP_ERROR) {
1864 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1865 " nxge_fflp_ip_class_config %x"
1866 " for class %d tcam failed", config, class));
1867 return (t_status);
1868 }
1869 if (f_status & NPI_FFLP_ERROR) {
1870 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1871 " nxge_fflp_ip_class_config %x"
1872 " for class %d flow key failed", config, class));
1873 return (f_status);
1874 }
1875 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config"));
1876 return (NXGE_OK);
1877 }
1878
1879 nxge_status_t
nxge_fflp_ip_class_config_get(p_nxge_t nxgep,tcam_class_t class,uint32_t * config)1880 nxge_fflp_ip_class_config_get(p_nxge_t nxgep, tcam_class_t class,
1881 uint32_t *config)
1882 {
1883 uint32_t t_class_config, f_class_config;
1884 int t_status = NXGE_OK;
1885 int f_status = NXGE_OK;
1886
1887 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_ip_class_config"));
1888
1889 t_class_config = f_class_config = 0;
1890 t_status = nxge_cfg_tcam_ip_class_get(nxgep, class, &t_class_config);
1891 f_status = nxge_cfg_ip_cls_flow_key_get(nxgep, class, &f_class_config);
1892
1893 if (t_status & NPI_FFLP_ERROR) {
1894 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1895 " nxge_fflp_ip_class_config_get "
1896 " for class %d tcam failed", class));
1897 return (t_status);
1898 }
1899
1900 if (f_status & NPI_FFLP_ERROR) {
1901 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1902 " nxge_fflp_ip_class_config_get "
1903 " for class %d flow key failed", class));
1904 return (f_status);
1905 }
1906
1907 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1908 " nxge_fflp_ip_class_config tcam %x flow %x",
1909 t_class_config, f_class_config));
1910
1911 *config = t_class_config | f_class_config;
1912 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config_get"));
1913 return (NXGE_OK);
1914 }
1915
1916 nxge_status_t
nxge_fflp_ip_class_config_all(p_nxge_t nxgep)1917 nxge_fflp_ip_class_config_all(p_nxge_t nxgep)
1918 {
1919 uint32_t class_config;
1920 tcam_class_t class;
1921
1922 #ifdef NXGE_DEBUG
1923 int status = NXGE_OK;
1924 #endif
1925
1926 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_ip_class_config"));
1927 for (class = TCAM_CLASS_TCP_IPV4;
1928 class <= TCAM_CLASS_SCTP_IPV6; class++) {
1929 class_config = nxgep->class_config.class_cfg[class];
1930 #ifndef NXGE_DEBUG
1931 (void) nxge_fflp_ip_class_config(nxgep, class, class_config);
1932 #else
1933 status = nxge_fflp_ip_class_config(nxgep, class, class_config);
1934 if (status & NPI_FFLP_ERROR) {
1935 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1936 "nxge_fflp_ip_class_config failed "
1937 " class %d config %x ",
1938 class, class_config));
1939 }
1940 #endif
1941 }
1942 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config"));
1943 return (NXGE_OK);
1944 }
1945
1946 nxge_status_t
nxge_fflp_config_vlan_table(p_nxge_t nxgep,uint16_t vlan_id)1947 nxge_fflp_config_vlan_table(p_nxge_t nxgep, uint16_t vlan_id)
1948 {
1949 uint8_t port, rdc_grp;
1950 npi_handle_t handle;
1951 npi_status_t rs = NPI_SUCCESS;
1952 uint8_t priority = 1;
1953 p_nxge_mv_cfg_t vlan_table;
1954 p_nxge_class_pt_cfg_t p_class_cfgp;
1955 p_nxge_hw_list_t hw_p;
1956
1957 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_config_vlan_table"));
1958 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1959 handle = nxgep->npi_reg_handle;
1960 vlan_table = p_class_cfgp->vlan_tbl;
1961 port = nxgep->function_num;
1962
1963 if (vlan_table[vlan_id].flag == 0) {
1964 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1965 " nxge_fflp_config_vlan_table"
1966 " vlan id is not configured %d", vlan_id));
1967 return (NXGE_ERROR);
1968 }
1969
1970 if ((hw_p = nxgep->nxge_hw_p) == NULL) {
1971 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1972 " nxge_fflp_config_vlan_table:"
1973 " common hardware not set", nxgep->niu_type));
1974 return (NXGE_ERROR);
1975 }
1976 MUTEX_ENTER(&hw_p->nxge_vlan_lock);
1977 rdc_grp = vlan_table[vlan_id].rdctbl;
1978 rs = npi_fflp_cfg_enet_vlan_table_assoc(handle,
1979 port, vlan_id,
1980 rdc_grp, priority);
1981
1982 MUTEX_EXIT(&hw_p->nxge_vlan_lock);
1983 if (rs & NPI_FFLP_ERROR) {
1984 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1985 "nxge_fflp_config_vlan_table failed "
1986 " Port %d vlan_id %d rdc_grp %d",
1987 port, vlan_id, rdc_grp));
1988 return (NXGE_ERROR | rs);
1989 }
1990
1991 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_config_vlan_table"));
1992 return (NXGE_OK);
1993 }
1994
1995 nxge_status_t
nxge_fflp_update_hw(p_nxge_t nxgep)1996 nxge_fflp_update_hw(p_nxge_t nxgep)
1997 {
1998 nxge_status_t status = NXGE_OK;
1999 p_nxge_param_t pa;
2000 uint64_t cfgd_vlans;
2001 uint64_t *val_ptr;
2002 int i;
2003 int num_macs;
2004 uint8_t alt_mac;
2005 nxge_param_map_t *p_map;
2006 p_nxge_mv_cfg_t vlan_table;
2007 p_nxge_class_pt_cfg_t p_class_cfgp;
2008 p_nxge_dma_pt_cfg_t p_all_cfgp;
2009 p_nxge_hw_pt_cfg_t p_cfgp;
2010
2011 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_update_hw"));
2012
2013 p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
2014 p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2015 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
2016
2017 status = nxge_fflp_set_hash1(nxgep, p_class_cfgp->init_h1);
2018 if (status != NXGE_OK) {
2019 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
2020 "nxge_fflp_set_hash1 Failed"));
2021 return (NXGE_ERROR);
2022 }
2023
2024 status = nxge_fflp_set_hash2(nxgep, p_class_cfgp->init_h2);
2025 if (status != NXGE_OK) {
2026 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
2027 "nxge_fflp_set_hash2 Failed"));
2028 return (NXGE_ERROR);
2029 }
2030 vlan_table = p_class_cfgp->vlan_tbl;
2031
2032 /* configure vlan tables */
2033 pa = (p_nxge_param_t)&nxgep->param_arr[param_vlan_2rdc_grp];
2034 #if defined(__i386)
2035 val_ptr = (uint64_t *)(uint32_t)pa->value;
2036 #else
2037 val_ptr = (uint64_t *)pa->value;
2038 #endif
2039 cfgd_vlans = ((pa->type & NXGE_PARAM_ARRAY_CNT_MASK) >>
2040 NXGE_PARAM_ARRAY_CNT_SHIFT);
2041
2042 for (i = 0; i < cfgd_vlans; i++) {
2043 p_map = (nxge_param_map_t *)&val_ptr[i];
2044 if (vlan_table[p_map->param_id].flag) {
2045 status = nxge_fflp_config_vlan_table(nxgep,
2046 p_map->param_id);
2047 if (status != NXGE_OK) {
2048 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
2049 "nxge_fflp_config_vlan_table Failed"));
2050 return (NXGE_ERROR);
2051 }
2052 }
2053 }
2054
2055 /* config MAC addresses */
2056 num_macs = p_cfgp->max_macs;
2057 pa = (p_nxge_param_t)&nxgep->param_arr[param_mac_2rdc_grp];
2058 #if defined(__i386)
2059 val_ptr = (uint64_t *)(uint32_t)pa->value;
2060 #else
2061 val_ptr = (uint64_t *)pa->value;
2062 #endif
2063
2064 for (alt_mac = 0; alt_mac < num_macs; alt_mac++) {
2065 if (p_class_cfgp->mac_host_info[alt_mac].flag) {
2066 status = nxge_logical_mac_assign_rdc_table(nxgep,
2067 alt_mac);
2068 if (status != NXGE_OK) {
2069 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
2070 "nxge_logical_mac_assign_rdc_table"
2071 " Failed"));
2072 return (NXGE_ERROR);
2073 }
2074 }
2075 }
2076
2077 /* Config Hash values */
2078 /* config classes */
2079 status = nxge_fflp_ip_class_config_all(nxgep);
2080 if (status != NXGE_OK) {
2081 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2082 "nxge_fflp_ip_class_config_all Failed"));
2083 return (NXGE_ERROR);
2084 }
2085 return (NXGE_OK);
2086 }
2087
2088 nxge_status_t
nxge_classify_init_hw(p_nxge_t nxgep)2089 nxge_classify_init_hw(p_nxge_t nxgep)
2090 {
2091 nxge_status_t status = NXGE_OK;
2092
2093 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_init_hw"));
2094
2095 if (nxgep->classifier.state & NXGE_FFLP_HW_INIT) {
2096 NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
2097 "nxge_classify_init_hw already init"));
2098 return (NXGE_OK);
2099 }
2100
2101 /* Now do a real configuration */
2102 status = nxge_fflp_update_hw(nxgep);
2103 if (status != NXGE_OK) {
2104 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2105 "nxge_fflp_update_hw failed"));
2106 return (NXGE_ERROR);
2107 }
2108
2109 /* Init RDC tables? ? who should do that? rxdma or fflp ? */
2110 /* attach rdc table to the MAC port. */
2111 status = nxge_main_mac_assign_rdc_table(nxgep);
2112 if (status != NXGE_OK) {
2113 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2114 "nxge_main_mac_assign_rdc_table failed"));
2115 return (NXGE_ERROR);
2116 }
2117
2118 status = nxge_alt_mcast_mac_assign_rdc_table(nxgep);
2119 if (status != NXGE_OK) {
2120 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2121 "nxge_multicast_mac_assign_rdc_table failed"));
2122 return (NXGE_ERROR);
2123 }
2124
2125 if (nxgep->classifier.fragment_bug == 1) {
2126 status = nxge_tcam_handle_ip_fragment(nxgep);
2127 if (status != NXGE_OK) {
2128 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2129 "nxge_tcam_handle_ip_fragment failed"));
2130 return (NXGE_ERROR);
2131 }
2132 }
2133
2134 nxgep->classifier.state |= NXGE_FFLP_HW_INIT;
2135 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_init_hw"));
2136 return (NXGE_OK);
2137 }
2138
2139 nxge_status_t
nxge_fflp_handle_sys_errors(p_nxge_t nxgep)2140 nxge_fflp_handle_sys_errors(p_nxge_t nxgep)
2141 {
2142 npi_handle_t handle;
2143 p_nxge_fflp_stats_t statsp;
2144 uint8_t portn, rdc_grp;
2145 p_nxge_dma_pt_cfg_t p_dma_cfgp;
2146 p_nxge_hw_pt_cfg_t p_cfgp;
2147 vlan_par_err_t vlan_err;
2148 tcam_err_t tcam_err;
2149 hash_lookup_err_log1_t fcram1_err;
2150 hash_lookup_err_log2_t fcram2_err;
2151 hash_tbl_data_log_t fcram_err;
2152
2153 handle = nxgep->npi_handle;
2154 statsp = (p_nxge_fflp_stats_t)&nxgep->statsp->fflp_stats;
2155 portn = nxgep->mac.portnum;
2156
2157 /*
2158 * need to read the fflp error registers to figure out what the error
2159 * is
2160 */
2161 npi_fflp_vlan_error_get(handle, &vlan_err);
2162 npi_fflp_tcam_error_get(handle, &tcam_err);
2163
2164 if (vlan_err.bits.ldw.m_err || vlan_err.bits.ldw.err) {
2165 NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2166 " vlan table parity error on port %d"
2167 " addr: 0x%x data: 0x%x",
2168 portn, vlan_err.bits.ldw.addr,
2169 vlan_err.bits.ldw.data));
2170 statsp->vlan_parity_err++;
2171
2172 if (vlan_err.bits.ldw.m_err) {
2173 NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2174 " vlan table multiple errors on port %d",
2175 portn));
2176 }
2177 statsp->errlog.vlan = (uint32_t)vlan_err.value;
2178 NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
2179 NXGE_FM_EREPORT_FFLP_VLAN_PAR_ERR);
2180 npi_fflp_vlan_error_clear(handle);
2181 }
2182
2183 if (tcam_err.bits.ldw.err) {
2184 if (tcam_err.bits.ldw.p_ecc != 0) {
2185 NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2186 " TCAM ECC error on port %d"
2187 " TCAM entry: 0x%x syndrome: 0x%x",
2188 portn, tcam_err.bits.ldw.addr,
2189 tcam_err.bits.ldw.syndrome));
2190 statsp->tcam_ecc_err++;
2191 } else {
2192 NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2193 " TCAM Parity error on port %d"
2194 " addr: 0x%x parity value: 0x%x",
2195 portn, tcam_err.bits.ldw.addr,
2196 tcam_err.bits.ldw.syndrome));
2197 statsp->tcam_parity_err++;
2198 }
2199
2200 if (tcam_err.bits.ldw.mult) {
2201 NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2202 " TCAM Multiple errors on port %d", portn));
2203 } else {
2204 NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2205 " TCAM PIO error on port %d", portn));
2206 }
2207
2208 statsp->errlog.tcam = (uint32_t)tcam_err.value;
2209 NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
2210 NXGE_FM_EREPORT_FFLP_TCAM_ERR);
2211 npi_fflp_tcam_error_clear(handle);
2212 }
2213
2214 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2215 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
2216
2217 for (rdc_grp = 0; rdc_grp < NXGE_MAX_RDC_GROUPS; rdc_grp++) {
2218 if (p_cfgp->grpids[rdc_grp]) {
2219 npi_fflp_fcram_error_get(handle, &fcram_err, rdc_grp);
2220 if (fcram_err.bits.ldw.pio_err) {
2221 NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2222 " FCRAM PIO ECC error on port %d"
2223 " rdc group: %d Hash Table addr: 0x%x"
2224 " syndrome: 0x%x",
2225 portn, rdc_grp,
2226 fcram_err.bits.ldw.fcram_addr,
2227 fcram_err.bits.ldw.syndrome));
2228 statsp->hash_pio_err[rdc_grp]++;
2229 statsp->errlog.hash_pio[rdc_grp] =
2230 (uint32_t)fcram_err.value;
2231 NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
2232 NXGE_FM_EREPORT_FFLP_HASHT_DATA_ERR);
2233 npi_fflp_fcram_error_clear(handle, rdc_grp);
2234 }
2235 }
2236 }
2237
2238 npi_fflp_fcram_error_log1_get(handle, &fcram1_err);
2239 if (fcram1_err.bits.ldw.ecc_err) {
2240 char *multi_str = "";
2241 char *multi_bit_str = "";
2242
2243 npi_fflp_fcram_error_log2_get(handle, &fcram2_err);
2244 if (fcram1_err.bits.ldw.mult_lk) {
2245 multi_str = "multiple";
2246 }
2247 if (fcram1_err.bits.ldw.mult_bit) {
2248 multi_bit_str = "multiple bits";
2249 }
2250 statsp->hash_lookup_err++;
2251 NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2252 " FCRAM %s lookup %s ECC error on port %d"
2253 " H1: 0x%x Subarea: 0x%x Syndrome: 0x%x",
2254 multi_str, multi_bit_str, portn,
2255 fcram2_err.bits.ldw.h1,
2256 fcram2_err.bits.ldw.subarea,
2257 fcram2_err.bits.ldw.syndrome));
2258 NXGE_FM_REPORT_ERROR(nxgep, NULL, NULL,
2259 NXGE_FM_EREPORT_FFLP_HASHT_LOOKUP_ERR);
2260 }
2261 statsp->errlog.hash_lookup1 = (uint32_t)fcram1_err.value;
2262 statsp->errlog.hash_lookup2 = (uint32_t)fcram2_err.value;
2263 return (NXGE_OK);
2264 }
2265
2266 int
nxge_get_valid_tcam_cnt(p_nxge_t nxgep)2267 nxge_get_valid_tcam_cnt(p_nxge_t nxgep) {
2268 return ((nxgep->classifier.fragment_bug == 1) ?
2269 nxgep->classifier.tcam_entry_cnt - 1 :
2270 nxgep->classifier.tcam_entry_cnt);
2271 }
2272
2273 int
nxge_rxdma_channel_cnt(p_nxge_t nxgep)2274 nxge_rxdma_channel_cnt(p_nxge_t nxgep)
2275 {
2276 p_nxge_dma_pt_cfg_t p_dma_cfgp;
2277 p_nxge_hw_pt_cfg_t p_cfgp;
2278
2279 p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2280 p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
2281 return (p_cfgp->max_rdcs);
2282 }
2283
2284 /* ARGSUSED */
2285 int
nxge_rxclass_ioctl(p_nxge_t nxgep,queue_t * wq,mblk_t * mp)2286 nxge_rxclass_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
2287 {
2288 uint32_t cmd;
2289 rx_class_cfg_t *cfg_info = (rx_class_cfg_t *)mp->b_rptr;
2290
2291 if (nxgep == NULL) {
2292 return (-1);
2293 }
2294 cmd = cfg_info->cmd;
2295 switch (cmd) {
2296 default:
2297 return (-1);
2298
2299 case NXGE_RX_CLASS_GCHAN:
2300 cfg_info->data = nxge_rxdma_channel_cnt(nxgep);
2301 break;
2302 case NXGE_RX_CLASS_GRULE_CNT:
2303 MUTEX_ENTER(&nxgep->nxge_hw_p->nxge_tcam_lock);
2304 cfg_info->rule_cnt = nxge_get_valid_tcam_cnt(nxgep);
2305 MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2306 break;
2307 case NXGE_RX_CLASS_GRULE:
2308 nxge_get_tcam_entry(nxgep, &cfg_info->fs);
2309 break;
2310 case NXGE_RX_CLASS_GRULE_ALL:
2311 nxge_get_tcam_entry_all(nxgep, cfg_info);
2312 break;
2313 case NXGE_RX_CLASS_RULE_DEL:
2314 nxge_del_tcam_entry(nxgep, cfg_info->fs.location);
2315 break;
2316 case NXGE_RX_CLASS_RULE_INS:
2317 (void) nxge_add_tcam_entry(nxgep, &cfg_info->fs);
2318 break;
2319 }
2320 return (0);
2321 }
2322 /* ARGSUSED */
2323 int
nxge_rxhash_ioctl(p_nxge_t nxgep,queue_t * wq,mblk_t * mp)2324 nxge_rxhash_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
2325 {
2326 uint32_t cmd;
2327 cfg_cmd_t *cfg_info = (cfg_cmd_t *)mp->b_rptr;
2328
2329 if (nxgep == NULL) {
2330 return (-1);
2331 }
2332 cmd = cfg_info->cmd;
2333
2334 switch (cmd) {
2335 default:
2336 return (-1);
2337 case NXGE_IPTUN_CFG_ADD_CLS:
2338 nxge_add_iptun_class(nxgep, &cfg_info->iptun_cfg,
2339 &cfg_info->class_id);
2340 break;
2341 case NXGE_IPTUN_CFG_SET_HASH:
2342 nxge_cfg_iptun_hash(nxgep, &cfg_info->iptun_cfg,
2343 cfg_info->class_id);
2344 break;
2345 case NXGE_IPTUN_CFG_DEL_CLS:
2346 nxge_del_iptun_class(nxgep, cfg_info->class_id);
2347 break;
2348 case NXGE_IPTUN_CFG_GET_CLS:
2349 nxge_get_iptun_class(nxgep, &cfg_info->iptun_cfg,
2350 cfg_info->class_id);
2351 break;
2352 case NXGE_CLS_CFG_SET_SYM:
2353 nxge_set_ip_cls_sym(nxgep, cfg_info->class_id, cfg_info->sym);
2354 break;
2355 case NXGE_CLS_CFG_GET_SYM:
2356 nxge_get_ip_cls_sym(nxgep, cfg_info->class_id, &cfg_info->sym);
2357 break;
2358 }
2359 return (0);
2360 }
2361
2362 void
nxge_get_tcam_entry_all(p_nxge_t nxgep,rx_class_cfg_t * cfgp)2363 nxge_get_tcam_entry_all(p_nxge_t nxgep, rx_class_cfg_t *cfgp)
2364 {
2365 nxge_classify_t *clasp = &nxgep->classifier;
2366 uint16_t n_entries;
2367 int i, j, k;
2368 tcam_flow_spec_t *tcam_entryp;
2369
2370 cfgp->data = clasp->tcam_size;
2371 MUTEX_ENTER(&nxgep->nxge_hw_p->nxge_tcam_lock);
2372 n_entries = cfgp->rule_cnt;
2373
2374 for (i = 0, j = 0; j < cfgp->data; j++) {
2375 k = nxge_tcam_get_index(nxgep, j);
2376 tcam_entryp = &clasp->tcam_entries[k];
2377 if (tcam_entryp->valid != 1)
2378 continue;
2379 cfgp->rule_locs[i] = j;
2380 i++;
2381 };
2382 MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2383
2384 if (n_entries != i) {
2385 /* print warning, this should not happen */
2386 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_get_tcam_entry_all"
2387 "n_entries[%d] != i[%d]!!!", n_entries, i));
2388 }
2389 }
2390
2391
2392 /* Entries for the ports are interleaved in the TCAM */
2393 static uint16_t
nxge_tcam_get_index(p_nxge_t nxgep,uint16_t index)2394 nxge_tcam_get_index(p_nxge_t nxgep, uint16_t index)
2395 {
2396 /* One entry reserved for IP fragment rule */
2397 if (index >= (nxgep->classifier.tcam_size - 1))
2398 index = 0;
2399 if (nxgep->classifier.fragment_bug == 1)
2400 index++;
2401 return (nxgep->classifier.tcam_top + (index * nxgep->nports));
2402 }
2403
2404 static uint32_t
nxge_tcam_cls_to_flow(uint32_t class_code)2405 nxge_tcam_cls_to_flow(uint32_t class_code) {
2406 switch (class_code) {
2407 case TCAM_CLASS_TCP_IPV4:
2408 return (FSPEC_TCPIP4);
2409 case TCAM_CLASS_UDP_IPV4:
2410 return (FSPEC_UDPIP4);
2411 case TCAM_CLASS_AH_ESP_IPV4:
2412 return (FSPEC_AHIP4);
2413 case TCAM_CLASS_SCTP_IPV4:
2414 return (FSPEC_SCTPIP4);
2415 case TCAM_CLASS_TCP_IPV6:
2416 return (FSPEC_TCPIP6);
2417 case TCAM_CLASS_UDP_IPV6:
2418 return (FSPEC_UDPIP6);
2419 case TCAM_CLASS_AH_ESP_IPV6:
2420 return (FSPEC_AHIP6);
2421 case TCAM_CLASS_SCTP_IPV6:
2422 return (FSPEC_SCTPIP6);
2423 case TCAM_CLASS_IP_USER_4:
2424 case TCAM_CLASS_IP_USER_5:
2425 case TCAM_CLASS_IP_USER_6:
2426 case TCAM_CLASS_IP_USER_7:
2427 return (FSPEC_IP_USR);
2428 default:
2429 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "nxge_tcam_cls_to_flow"
2430 ": Unknown class code [0x%x]", class_code));
2431 break;
2432 }
2433 return (0);
2434 }
2435
2436 void
nxge_get_tcam_entry(p_nxge_t nxgep,flow_resource_t * fs)2437 nxge_get_tcam_entry(p_nxge_t nxgep, flow_resource_t *fs)
2438 {
2439 uint16_t index;
2440 tcam_flow_spec_t *tcam_ep;
2441 tcam_entry_t *tp;
2442 flow_spec_t *fspec;
2443 tcpip4_spec_t *fspec_key;
2444 tcpip4_spec_t *fspec_mask;
2445
2446 index = nxge_tcam_get_index(nxgep, (uint16_t)fs->location);
2447 tcam_ep = &nxgep->classifier.tcam_entries[index];
2448 if (tcam_ep->valid != 1) {
2449 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_get_tcam_entry: :"
2450 "Entry [%d] invalid for index [%d]", fs->location, index));
2451 return;
2452 }
2453
2454 /* Fill the flow spec entry */
2455 tp = &tcam_ep->tce;
2456 fspec = &fs->flow_spec;
2457 fspec->flow_type = nxge_tcam_cls_to_flow(tp->ip4_class_key);
2458
2459 /* TODO - look at proto field to differentiate between AH and ESP */
2460 if (fspec->flow_type == FSPEC_AHIP4) {
2461 if (tp->ip4_proto_key == IPPROTO_ESP)
2462 fspec->flow_type = FSPEC_ESPIP4;
2463 }
2464
2465 switch (tp->ip4_class_key) {
2466 case TCAM_CLASS_TCP_IPV4:
2467 case TCAM_CLASS_UDP_IPV4:
2468 case TCAM_CLASS_AH_ESP_IPV4:
2469 case TCAM_CLASS_SCTP_IPV4:
2470 fspec_key = (tcpip4_spec_t *)&fspec->uh.tcpip4spec;
2471 fspec_mask = (tcpip4_spec_t *)&fspec->um.tcpip4spec;
2472 FSPEC_IPV4_ADDR(fspec_key->ip4dst, tp->ip4_dest_key);
2473 FSPEC_IPV4_ADDR(fspec_mask->ip4dst, tp->ip4_dest_mask);
2474 FSPEC_IPV4_ADDR(fspec_key->ip4src, tp->ip4_src_key);
2475 FSPEC_IPV4_ADDR(fspec_mask->ip4src, tp->ip4_src_mask);
2476 fspec_key->tos = tp->ip4_tos_key;
2477 fspec_mask->tos = tp->ip4_tos_mask;
2478 break;
2479 default:
2480 break;
2481 }
2482
2483 switch (tp->ip4_class_key) {
2484 case TCAM_CLASS_TCP_IPV4:
2485 case TCAM_CLASS_UDP_IPV4:
2486 case TCAM_CLASS_SCTP_IPV4:
2487 FSPEC_IP_PORTS(fspec_key->pdst, fspec_key->psrc,
2488 tp->ip4_port_key);
2489 FSPEC_IP_PORTS(fspec_mask->pdst, fspec_mask->psrc,
2490 tp->ip4_port_mask);
2491 break;
2492 case TCAM_CLASS_AH_ESP_IPV4:
2493 fspec->uh.ahip4spec.spi = tp->ip4_port_key;
2494 fspec->um.ahip4spec.spi = tp->ip4_port_mask;
2495 break;
2496 case TCAM_CLASS_IP_USER_4:
2497 case TCAM_CLASS_IP_USER_5:
2498 case TCAM_CLASS_IP_USER_6:
2499 case TCAM_CLASS_IP_USER_7:
2500 fspec->uh.ip_usr_spec.l4_4_bytes = tp->ip4_port_key;
2501 fspec->um.ip_usr_spec.l4_4_bytes = tp->ip4_port_mask;
2502 fspec->uh.ip_usr_spec.ip_ver = FSPEC_IP4;
2503 fspec->uh.ip_usr_spec.proto = tp->ip4_proto_key;
2504 fspec->um.ip_usr_spec.proto = tp->ip4_proto_mask;
2505 break;
2506 default:
2507 break;
2508 }
2509
2510 if (tp->match_action.bits.ldw.disc == 1) {
2511 fs->channel_cookie = NXGE_PKT_DISCARD;
2512 } else {
2513 fs->channel_cookie = tp->match_action.bits.ldw.offset;
2514 }
2515 }
2516
2517 void
nxge_del_tcam_entry(p_nxge_t nxgep,uint32_t location)2518 nxge_del_tcam_entry(p_nxge_t nxgep, uint32_t location)
2519 {
2520 npi_status_t rs = NPI_SUCCESS;
2521 uint16_t index;
2522 tcam_flow_spec_t *tcam_ep;
2523 tcam_entry_t *tp;
2524 tcam_class_t class;
2525
2526 MUTEX_ENTER(&nxgep->nxge_hw_p->nxge_tcam_lock);
2527 index = nxge_tcam_get_index(nxgep, (uint16_t)location);
2528 tcam_ep = &nxgep->classifier.tcam_entries[index];
2529 if (tcam_ep->valid != 1) {
2530 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_del_tcam_entry: :"
2531 "Entry [%d] invalid for index [%d]", location, index));
2532 goto fail;
2533 }
2534
2535 /* Fill the flow spec entry */
2536 tp = &tcam_ep->tce;
2537 class = tp->ip4_class_key;
2538 if (class >= TCAM_CLASS_IP_USER_4 && class <= TCAM_CLASS_IP_USER_7) {
2539 int i;
2540 nxge_usr_l3_cls_t *l3_ucls_p;
2541 p_nxge_hw_list_t hw_p = nxgep->nxge_hw_p;
2542
2543 for (i = 0; i < NXGE_L3_PROG_CLS; i++) {
2544 l3_ucls_p = &hw_p->tcam_l3_prog_cls[i];
2545 if (l3_ucls_p->valid) {
2546 if (l3_ucls_p->cls == class &&
2547 l3_ucls_p->tcam_ref_cnt) {
2548 l3_ucls_p->tcam_ref_cnt--;
2549 if (l3_ucls_p->tcam_ref_cnt > 0)
2550 continue;
2551 /* disable class */
2552 rs = npi_fflp_cfg_ip_usr_cls_disable(
2553 nxgep->npi_reg_handle,
2554 (tcam_class_t)class);
2555 if (rs != NPI_SUCCESS)
2556 goto fail;
2557 l3_ucls_p->cls = 0;
2558 l3_ucls_p->pid = 0;
2559 l3_ucls_p->valid = 0;
2560 break;
2561 }
2562 }
2563 }
2564 if (i == NXGE_L3_PROG_CLS) {
2565 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2566 "nxge_del_tcam_entry: Usr class "
2567 "0x%llx not found", (unsigned long long) class));
2568 goto fail;
2569 }
2570 }
2571
2572 rs = npi_fflp_tcam_entry_invalidate(nxgep->npi_reg_handle, index);
2573 if (rs != NPI_SUCCESS) {
2574 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2575 "nxge_del_tcam_entry: TCAM invalidate failed "
2576 "at loc %d ", location));
2577 goto fail;
2578 }
2579
2580 nxgep->classifier.tcam_entries[index].valid = 0;
2581 nxgep->classifier.tcam_entry_cnt--;
2582
2583 MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2584 NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_del_tcam_entry"));
2585 return;
2586 fail:
2587 MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2588 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2589 "<== nxge_del_tcam_entry FAILED"));
2590 }
2591
2592 static uint8_t
nxge_iptun_pkt_type_to_pid(uint8_t pkt_type)2593 nxge_iptun_pkt_type_to_pid(uint8_t pkt_type)
2594 {
2595 uint8_t pid = 0;
2596
2597 switch (pkt_type) {
2598 case IPTUN_PKT_IPV4:
2599 pid = 4;
2600 break;
2601 case IPTUN_PKT_IPV6:
2602 pid = 41;
2603 break;
2604 case IPTUN_PKT_GRE:
2605 pid = 47;
2606 break;
2607 case IPTUN_PKT_GTP:
2608 pid = 17;
2609 break;
2610 default:
2611 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
2612 "nxge_iptun_pkt_type_to_pid: Unknown pkt type 0x%x",
2613 pkt_type));
2614 break;
2615 }
2616
2617 return (pid);
2618 }
2619
2620 static npi_status_t
nxge_set_iptun_usr_cls_reg(p_nxge_t nxgep,uint64_t class,iptun_cfg_t * iptunp)2621 nxge_set_iptun_usr_cls_reg(p_nxge_t nxgep, uint64_t class,
2622 iptun_cfg_t *iptunp)
2623 {
2624 npi_handle_t handle = nxgep->npi_reg_handle;
2625 npi_status_t rs = NPI_SUCCESS;
2626
2627 switch (iptunp->in_pkt_type) {
2628 case IPTUN_PKT_IPV4:
2629 case IPTUN_PKT_IPV6:
2630 rs = npi_fflp_cfg_ip_usr_cls_set_iptun(handle,
2631 (tcam_class_t)class, 0, 0, 0, 0);
2632 break;
2633 case IPTUN_PKT_GRE:
2634 rs = npi_fflp_cfg_ip_usr_cls_set_iptun(handle,
2635 (tcam_class_t)class, iptunp->l4b0_val,
2636 iptunp->l4b0_mask, 0, 0);
2637 break;
2638 case IPTUN_PKT_GTP:
2639 rs = npi_fflp_cfg_ip_usr_cls_set_iptun(handle,
2640 (tcam_class_t)class, 0, 0, iptunp->l4b23_val,
2641 (iptunp->l4b23_sel & 0x01));
2642 break;
2643 default:
2644 rs = NPI_FFLP_TCAM_CLASS_INVALID;
2645 break;
2646 }
2647 return (rs);
2648 }
2649
2650 void
nxge_add_iptun_class(p_nxge_t nxgep,iptun_cfg_t * iptunp,uint8_t * cls_idp)2651 nxge_add_iptun_class(p_nxge_t nxgep, iptun_cfg_t *iptunp,
2652 uint8_t *cls_idp)
2653 {
2654 int i, add_cls;
2655 uint8_t pid;
2656 uint64_t class;
2657 p_nxge_hw_list_t hw_p = nxgep->nxge_hw_p;
2658 npi_handle_t handle = nxgep->npi_reg_handle;
2659 npi_status_t rs = NPI_SUCCESS;
2660
2661 pid = nxge_iptun_pkt_type_to_pid(iptunp->in_pkt_type);
2662 if (pid == 0)
2663 return;
2664
2665 add_cls = 0;
2666 MUTEX_ENTER(&hw_p->nxge_tcam_lock);
2667
2668 /* Get an user programmable class ID */
2669 for (i = 0; i < NXGE_L3_PROG_CLS; i++) {
2670 if (hw_p->tcam_l3_prog_cls[i].valid == 0) {
2671 /* todo add new usr class reg */
2672 switch (i) {
2673 case 0:
2674 class = TCAM_CLASS_IP_USER_4;
2675 break;
2676 case 1:
2677 class = TCAM_CLASS_IP_USER_5;
2678 break;
2679 case 2:
2680 class = TCAM_CLASS_IP_USER_6;
2681 break;
2682 case 3:
2683 class = TCAM_CLASS_IP_USER_7;
2684 break;
2685 default:
2686 break;
2687 }
2688 rs = npi_fflp_cfg_ip_usr_cls_set(handle,
2689 (tcam_class_t)class, 0, 0, pid, 0);
2690 if (rs != NPI_SUCCESS)
2691 goto fail;
2692
2693 rs = nxge_set_iptun_usr_cls_reg(nxgep, class, iptunp);
2694
2695 if (rs != NPI_SUCCESS)
2696 goto fail;
2697
2698 rs = npi_fflp_cfg_ip_usr_cls_enable(handle,
2699 (tcam_class_t)class);
2700 if (rs != NPI_SUCCESS)
2701 goto fail;
2702
2703 hw_p->tcam_l3_prog_cls[i].cls = class;
2704 hw_p->tcam_l3_prog_cls[i].pid = pid;
2705 hw_p->tcam_l3_prog_cls[i].flow_pkt_type =
2706 iptunp->in_pkt_type;
2707 hw_p->tcam_l3_prog_cls[i].valid = 1;
2708 *cls_idp = (uint8_t)class;
2709 add_cls = 1;
2710 break;
2711 } else if (hw_p->tcam_l3_prog_cls[i].pid == pid) {
2712 if (hw_p->tcam_l3_prog_cls[i].flow_pkt_type == 0) {
2713 /* there is no flow key */
2714 /* todo program the existing usr class reg */
2715
2716 rs = nxge_set_iptun_usr_cls_reg(nxgep, class,
2717 iptunp);
2718 if (rs != NPI_SUCCESS)
2719 goto fail;
2720
2721 rs = npi_fflp_cfg_ip_usr_cls_enable(handle,
2722 (tcam_class_t)class);
2723 if (rs != NPI_SUCCESS)
2724 goto fail;
2725
2726 hw_p->tcam_l3_prog_cls[i].flow_pkt_type =
2727 iptunp->in_pkt_type;
2728 *cls_idp = (uint8_t)class;
2729 add_cls = 1;
2730 } else {
2731 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2732 "nxge_add_iptun_class: L3 usr "
2733 "programmable class with pid %d "
2734 "already exists", pid));
2735 }
2736 break;
2737 }
2738 }
2739 MUTEX_EXIT(&hw_p->nxge_tcam_lock);
2740
2741 if (add_cls != 1) {
2742 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2743 "nxge_add_iptun_class: Could not add IP tunneling class"));
2744 }
2745 return;
2746 fail:
2747 MUTEX_EXIT(&hw_p->nxge_tcam_lock);
2748 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_add_iptun_class: FAILED"));
2749 }
2750
2751 static boolean_t
nxge_is_iptun_cls_present(p_nxge_t nxgep,uint8_t cls_id,int * idx)2752 nxge_is_iptun_cls_present(p_nxge_t nxgep, uint8_t cls_id, int *idx)
2753 {
2754 int i;
2755 p_nxge_hw_list_t hw_p = nxgep->nxge_hw_p;
2756
2757 MUTEX_ENTER(&hw_p->nxge_tcam_lock);
2758 for (i = 0; i < NXGE_L3_PROG_CLS; i++) {
2759 if (hw_p->tcam_l3_prog_cls[i].valid &&
2760 hw_p->tcam_l3_prog_cls[i].flow_pkt_type != 0) {
2761 if (hw_p->tcam_l3_prog_cls[i].cls == cls_id)
2762 break;
2763 }
2764 }
2765 MUTEX_EXIT(&hw_p->nxge_tcam_lock);
2766
2767 if (i == NXGE_L3_PROG_CLS) {
2768 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2769 "nxge_is_iptun_cls_present: Invalid class %d", cls_id));
2770 return (B_FALSE);
2771 } else {
2772 *idx = i;
2773 return (B_TRUE);
2774 }
2775 }
2776
2777 void
nxge_cfg_iptun_hash(p_nxge_t nxgep,iptun_cfg_t * iptunp,uint8_t cls_id)2778 nxge_cfg_iptun_hash(p_nxge_t nxgep, iptun_cfg_t *iptunp, uint8_t cls_id)
2779 {
2780 int idx;
2781 npi_handle_t handle = nxgep->npi_reg_handle;
2782 flow_key_cfg_t cfg;
2783
2784 /* check to see that this is a valid class ID */
2785 if (!nxge_is_iptun_cls_present(nxgep, cls_id, &idx)) {
2786 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2787 "nxge_cfg_iptun_hash: nxge_is_iptun_cls_present "
2788 "failed for cls_id %d", cls_id));
2789 return;
2790 }
2791
2792 bzero((void *)&cfg, sizeof (flow_key_cfg_t));
2793
2794 /*
2795 * This ensures that all 4 bytes of the XOR value are loaded to the
2796 * hash key.
2797 */
2798 cfg.use_dport = cfg.use_sport = cfg.ip_opts_exist = 1;
2799
2800 cfg.l4_xor_sel = (iptunp->l4xor_sel & FL_KEY_USR_L4XOR_MSK);
2801 cfg.use_l4_md = 1;
2802
2803 if (iptunp->hash_flags & HASH_L3PROTO)
2804 cfg.use_proto = 1;
2805 else if (iptunp->hash_flags & HASH_IPDA)
2806 cfg.use_daddr = 1;
2807 else if (iptunp->hash_flags & HASH_IPSA)
2808 cfg.use_saddr = 1;
2809 else if (iptunp->hash_flags & HASH_VLAN)
2810 cfg.use_vlan = 1;
2811 else if (iptunp->hash_flags & HASH_L2DA)
2812 cfg.use_l2da = 1;
2813 else if (iptunp->hash_flags & HASH_IFPORT)
2814 cfg.use_portnum = 1;
2815
2816 (void) npi_fflp_cfg_ip_cls_flow_key_rfnl(handle, (tcam_class_t)cls_id,
2817 &cfg);
2818 }
2819
2820 void
nxge_del_iptun_class(p_nxge_t nxgep,uint8_t cls_id)2821 nxge_del_iptun_class(p_nxge_t nxgep, uint8_t cls_id)
2822 {
2823 int i;
2824 npi_handle_t handle = nxgep->npi_reg_handle;
2825 npi_status_t rs = NPI_SUCCESS;
2826
2827
2828 /* check to see that this is a valid class ID */
2829 if (!nxge_is_iptun_cls_present(nxgep, cls_id, &i)) {
2830 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2831 "nxge_del_iptun_class: Invalid class ID 0x%x", cls_id));
2832 return;
2833 }
2834
2835 MUTEX_ENTER(&nxgep->nxge_hw_p->nxge_tcam_lock);
2836 rs = npi_fflp_cfg_ip_usr_cls_disable(handle, (tcam_class_t)cls_id);
2837 if (rs != NPI_SUCCESS)
2838 goto fail;
2839 nxgep->nxge_hw_p->tcam_l3_prog_cls[i].flow_pkt_type = 0;
2840 if (nxgep->nxge_hw_p->tcam_l3_prog_cls[i].tcam_ref_cnt == 0)
2841 nxgep->nxge_hw_p->tcam_l3_prog_cls[i].valid = 0;
2842
2843 MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2844 return;
2845 fail:
2846 MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2847 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_del_iptun_class: FAILED"));
2848 }
2849
2850 void
nxge_get_iptun_class(p_nxge_t nxgep,iptun_cfg_t * iptunp,uint8_t cls_id)2851 nxge_get_iptun_class(p_nxge_t nxgep, iptun_cfg_t *iptunp, uint8_t cls_id)
2852 {
2853 int i;
2854 uint8_t pid;
2855 npi_handle_t handle = nxgep->npi_reg_handle;
2856 npi_status_t rs = NPI_SUCCESS;
2857 flow_key_cfg_t cfg;
2858
2859
2860 /* check to see that this is a valid class ID */
2861 if (!nxge_is_iptun_cls_present(nxgep, cls_id, &i))
2862 return;
2863
2864 bzero((void *)iptunp, sizeof (iptun_cfg_t));
2865
2866 pid = nxgep->nxge_hw_p->tcam_l3_prog_cls[i].pid;
2867
2868 rs = npi_fflp_cfg_ip_usr_cls_get_iptun(handle, (tcam_class_t)cls_id,
2869 &iptunp->l4b0_val, &iptunp->l4b0_mask, &iptunp->l4b23_val,
2870 &iptunp->l4b23_sel);
2871 if (rs != NPI_SUCCESS)
2872 goto fail;
2873
2874 rs = npi_fflp_cfg_ip_cls_flow_key_get_rfnl(handle,
2875 (tcam_class_t)cls_id, &cfg);
2876 if (rs != NPI_SUCCESS)
2877 goto fail;
2878
2879 iptunp->l4xor_sel = cfg.l4_xor_sel;
2880 if (cfg.use_proto)
2881 iptunp->hash_flags |= HASH_L3PROTO;
2882 else if (cfg.use_daddr)
2883 iptunp->hash_flags |= HASH_IPDA;
2884 else if (cfg.use_saddr)
2885 iptunp->hash_flags |= HASH_IPSA;
2886 else if (cfg.use_vlan)
2887 iptunp->hash_flags |= HASH_VLAN;
2888 else if (cfg.use_l2da)
2889 iptunp->hash_flags |= HASH_L2DA;
2890 else if (cfg.use_portnum)
2891 iptunp->hash_flags |= HASH_IFPORT;
2892
2893 switch (pid) {
2894 case 4:
2895 iptunp->in_pkt_type = IPTUN_PKT_IPV4;
2896 break;
2897 case 41:
2898 iptunp->in_pkt_type = IPTUN_PKT_IPV6;
2899 break;
2900 case 47:
2901 iptunp->in_pkt_type = IPTUN_PKT_GRE;
2902 break;
2903 case 17:
2904 iptunp->in_pkt_type = IPTUN_PKT_GTP;
2905 break;
2906 default:
2907 iptunp->in_pkt_type = 0;
2908 break;
2909 }
2910
2911 return;
2912 fail:
2913 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_get_iptun_class: FAILED"));
2914 }
2915
2916 void
nxge_set_ip_cls_sym(p_nxge_t nxgep,uint8_t cls_id,uint8_t sym)2917 nxge_set_ip_cls_sym(p_nxge_t nxgep, uint8_t cls_id, uint8_t sym)
2918 {
2919 npi_handle_t handle = nxgep->npi_reg_handle;
2920 npi_status_t rs = NPI_SUCCESS;
2921 boolean_t sym_en = (sym == 1) ? B_TRUE : B_FALSE;
2922
2923 rs = npi_fflp_cfg_sym_ip_cls_flow_key(handle, (tcam_class_t)cls_id,
2924 sym_en);
2925 if (rs != NPI_SUCCESS)
2926 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2927 "nxge_set_ip_cls_sym: FAILED"));
2928 }
2929
2930 void
nxge_get_ip_cls_sym(p_nxge_t nxgep,uint8_t cls_id,uint8_t * sym)2931 nxge_get_ip_cls_sym(p_nxge_t nxgep, uint8_t cls_id, uint8_t *sym)
2932 {
2933 npi_handle_t handle = nxgep->npi_reg_handle;
2934 npi_status_t rs = NPI_SUCCESS;
2935 flow_key_cfg_t cfg;
2936
2937 rs = npi_fflp_cfg_ip_cls_flow_key_get_rfnl(handle,
2938 (tcam_class_t)cls_id, &cfg);
2939 if (rs != NPI_SUCCESS)
2940 goto fail;
2941
2942 if (cfg.use_sym)
2943 *sym = 1;
2944 else
2945 *sym = 0;
2946 return;
2947 fail:
2948 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_get_ip_cls_sym: FAILED"));
2949 }
2950