1 /*******************************************************************************
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Copyright 2014 QLogic Corporation
22 * The contents of this file are subject to the terms of the
23 * QLogic End User License (the "License").
24 * You may not use this file except in compliance with the License.
25 *
26 * You can obtain a copy of the License at
27 * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
28 * QLogic_End_User_Software_License.txt
29 * See the License for the specific language governing permissions
30 * and limitations under the License.
31 *
32 *
33 * Module Description:
34 *
35 *
36 * History:
37 * 02/05/07 Alon Elhanani Inception.
38 ******************************************************************************/
39
40
41 #include "lm5710.h"
42 #include "license.h"
43 #include "mcp_shmem.h"
44 #include "command.h"
45 #include "debug.h"
46
47 // does HW statistics is active
48 // only if we are PMF && collect_enabled is on!
49 #define LM_STATS_IS_HW_ACTIVE(_pdev) ( _pdev->vars.stats.stats_collect.stats_hw.b_collect_enabled && \
50 IS_PMF(_pdev) )
51
52 // do _cmd statement only if in SF mode
53 // we use this macro since in MF mode we don't maintain non-mandatory statistics so to prevent inconsistently - we don't use them at all
54 #define LM_STATS_DO_IF_SF(_pdev,_cmd) if( !_pdev->hw_info.mf_info.multi_vnics_mode ){ _cmd; } ;
55
56 #define LM_STATS_64_TO_HI_LO( _x_64_, _hi_lo ) ( _hi_lo##_hi = (u32_t)U64_HI( _x_64_ ) ); ( _hi_lo##_lo = (u32_t)U64_LO( _x_64_ ) );
57 #define LM_STATS_HI_LO_TO_64( _hi_lo, _x_64_ ) ( _x_64_ = (((u64_t)(_hi_lo##_hi) << 32) | (_hi_lo##_lo)) )
58
59 /**
60 * driver stats are stored as 64bits where the lower bits store
61 * the value and the upper bits store the wraparound count.
62 * different stat fields are stored with different data sizes
63 * and the following macros help in storing values in the
64 * "overflow count" part of a 64bit value and seperating it from
65 * the actual data.
66 */
67 #define DATA_MASK(_bits) (((u64_t)-1)>>(64-_bits))
68 #define STATS_DATA(_bits,_val) ( (_val) & DATA_MASK(_bits) )
69 #define WRAPAROUND_COUNT_MASK(_bits) ( ~ DATA_MASK(_bits) )
70 #define HAS_WRAPPED_AROUND(_bits,_old,_new) ((STATS_DATA(_bits,_old) ) > (STATS_DATA(_bits,_new) ))
71 #define INC_WRAPAROUND_COUNT(_bits,_val) (_val + ( 1ull << _bits ) )
72
73 /**lm_update_wraparound_if_needed
74 * This function checks the old and new values, and returns a
75 * either the new data with the old wraparound count, or (if a
76 * wraparound has occured) the new data with an incremented
77 * wraparound count.
78 *
79 * val_current can be given in either little-endian or
80 * big-endian byte ordering. the values returned are always in
81 * host byte order.
82 *
83 * @param data_bits the number of data bits in the values
84 * @param val_current the newly collected value. the byte
85 * ordering is detemined by
86 * @param b_swap_bytes
87 * @param val_prev the the previously saved value in host byte
88 * order
89 * @param b_swap_bytes TRUE if val_current is byte-swapped (i.e
90 * given as little-endian on a big-endian
91 * machine), FALSE otherwise.
92 *
93 * @return u64_t the new data with an appropriate wraparound
94 * count.
95 */
96
lm_update_wraparound_if_needed(u8_t data_bits,u64_t val_current,u64_t val_prev,u8_t b_swap_bytes)97 static u64_t lm_update_wraparound_if_needed(u8_t data_bits, u64_t val_current, u64_t val_prev, u8_t b_swap_bytes)
98 {
99 if(b_swap_bytes)
100 {
101 /*We assume that only 32bit stats will ever need to be byte-swapped. this is because
102 all HW data is byte-swapped by DMAE as needed, and the 64bit FW stats are swapped
103 by the REGPAIR macros.*/
104 DbgBreakIf(data_bits != 32);
105 val_current=mm_le32_to_cpu(val_current);
106 }
107 if (HAS_WRAPPED_AROUND(data_bits,val_prev,val_current))
108 {
109 val_prev=INC_WRAPAROUND_COUNT(data_bits,val_prev);
110 }
111 return ((val_prev & WRAPAROUND_COUNT_MASK(data_bits)) |
112 (val_current & DATA_MASK(data_bits))); /*take the overflow count we calculated, and the data from the new value*/
113 }
114
115 /**
116 * The following macros handle the wraparound-count for FW
117 * stats. Note that in the 32bit case (i.e any stats that are
118 * not REGPAIRs), the bytes have to swapped if the host byte
119 * order is not little-endian.
120 */
121 #define LM_SIGN_EXTEND_VALUE_32( val_current_32, val_prev_64 ) \
122 val_prev_64 = lm_update_wraparound_if_needed( 32, val_current_32, val_prev_64, CHANGE_ENDIANITY )
123 #define LM_SIGN_EXTEND_VALUE_36( val_current_36, val_prev_64 ) \
124 val_prev_64 = lm_update_wraparound_if_needed( 36, val_current_36, val_prev_64, FALSE)
125 #define LM_SIGN_EXTEND_VALUE_42( val_current_42, val_prev_64 ) \
126 val_prev_64 = lm_update_wraparound_if_needed( 42, val_current_42, val_prev_64, FALSE )
127
128
129
130 /* function checks if there is a pending completion for statistics and a pending dpc to handle the completion:
131 * for cases where VBD gets a bit starved - we don't want to assert if chip isn't stuck and we have a pending completion
132 */
133 u8_t is_pending_stats_completion(struct _lm_device_t * pdev);
134
135 lm_status_t lm_stats_hw_collect( struct _lm_device_t *pdev );
136
137 #ifdef _VBD_CMD_
138 extern volatile u32_t* g_everest_sim_flags_ptr;
139 #define EVEREST_SIM_STATS 0x02
140 #endif
141
142
143 /*******************************************************************************
144 * Description:
145 *
146 * Return:
147 ******************************************************************************/
148 lm_status_t
lm_get_stats(lm_device_t * pdev,lm_stats_t stats_type,u64_t * stats_cnt,lm_vf_info_t * vf_info)149 lm_get_stats( lm_device_t* pdev,
150 lm_stats_t stats_type,
151 u64_t* stats_cnt
152 #ifdef VF_INVOLVED
153 ,lm_vf_info_t * vf_info
154 #endif
155 )
156 {
157 lm_status_t lm_status = LM_STATUS_SUCCESS;
158 lm_u64_t* stats = (lm_u64_t *)stats_cnt;
159 const u32_t i = LM_CLI_IDX_NDIS;
160 lm_stats_fw_t* stats_fw = NULL;
161
162 #ifdef VF_INVOLVED
163 if (vf_info != NULL) {
164 stats_fw = (lm_stats_fw_t*)vf_info->vf_stats.mirror_stats_fw;
165 vf_info->vf_stats.vf_exracted_stats_cnt++;
166 } else
167 #endif
168 {
169 stats_fw = &pdev->vars.stats.stats_mirror.stats_fw;
170 }
171
172 switch(stats_type)
173 {
174 case LM_STATS_FRAMES_XMITTED_OK:
175 stats->as_u64 = stats_fw->eth_xstorm_common.client_statistics[i].total_sent_pkts ;
176 // ioc IfHCOutPkts
177 break;
178 case LM_STATS_FRAMES_RECEIVED_OK:
179 stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].rcv_broadcast_pkts +
180 stats_fw->eth_tstorm_common.client_statistics[i].rcv_multicast_pkts +
181 stats_fw->eth_tstorm_common.client_statistics[i].rcv_unicast_pkts ;
182 stats->as_u64-= stats_fw->eth_ustorm_common.client_statistics[i].ucast_no_buff_pkts ;
183 stats->as_u64-= stats_fw->eth_ustorm_common.client_statistics[i].mcast_no_buff_pkts ;
184 stats->as_u64-= stats_fw->eth_ustorm_common.client_statistics[i].bcast_no_buff_pkts ;
185 // ioc IfHCInPkts
186 break;
187 case LM_STATS_ERRORED_RECEIVE_CNT:
188 #ifdef VF_INVOLVED
189 DbgBreakIf(vf_info);
190 #endif
191 #define LM_STATS_ERROR_DISCARD_SUM( _pdev, _i ) _pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[_i].checksum_discard + \
192 _pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[_i].packets_too_big_discard + \
193 _pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.port_statistics.mac_discard + \
194 LM_STATS_HW_GET_MACS_U64(_pdev, stats_rx.rx_stat_dot3statsframestoolong )
195 stats->as_u64 = LM_STATS_ERROR_DISCARD_SUM( pdev, i ) ;
196 break;
197 case LM_STATS_RCV_CRC_ERROR:
198 // Spec. 9
199 #ifdef VF_INVOLVED
200 DbgBreakIf(vf_info);
201 #endif
202 stats->as_u64 = LM_STATS_HW_GET_MACS_U64(pdev, stats_rx.rx_stat_dot3statsfcserrors) ;
203 // ioc Dot3StatsFCSErrors
204 break;
205 case LM_STATS_ALIGNMENT_ERROR:
206 // Spec. 10
207 #ifdef VF_INVOLVED
208 DbgBreakIf(vf_info);
209 #endif
210 if( !IS_PMF(pdev))
211 {
212 stats->as_u64 = 0 ;
213 }
214 else
215 {
216 stats->as_u64 = LM_STATS_HW_GET_MACS_U64(pdev, stats_rx.rx_stat_dot3statsalignmenterrors) ;
217 }
218 // ioc Dot3StatsAlignmentErrors
219 break;
220 case LM_STATS_SINGLE_COLLISION_FRAMES:
221 // Spec. 18
222 #ifdef VF_INVOLVED
223 DbgBreakIf(vf_info);
224 #endif
225 if( !IS_PMF(pdev) )
226 {
227 stats->as_u64 = 0 ;
228 }
229 else
230 {
231 stats->as_u64 = LM_STATS_HW_GET_MACS_U64(pdev, stats_tx.tx_stat_dot3statssinglecollisionframes ) ;
232 }
233 // ioc Dot3StatsSingleCollisionFrames
234 break;
235 case LM_STATS_MULTIPLE_COLLISION_FRAMES:
236 // Spec. 19
237 #ifdef VF_INVOLVED
238 DbgBreakIf(vf_info);
239 #endif
240 if( !IS_PMF(pdev) )
241 {
242 stats->as_u64 = 0 ;
243 }
244 else
245 {
246 stats->as_u64 = LM_STATS_HW_GET_MACS_U64(pdev, stats_tx.tx_stat_dot3statsmultiplecollisionframes ) ;
247 }
248 // ioc Dot3StatsMultipleCollisionFrame
249 break;
250 case LM_STATS_FRAMES_DEFERRED:
251 // Spec. 40 (not in mini port)
252 #ifdef VF_INVOLVED
253 DbgBreakIf(vf_info);
254 #endif
255 stats->as_u64 = LM_STATS_HW_GET_MACS_U64(pdev, stats_tx.tx_stat_dot3statsdeferredtransmissions ) ;
256 // ioc Dot3StatsDeferredTransmissions
257 break;
258 case LM_STATS_MAX_COLLISIONS:
259 // Spec. 21
260 #ifdef VF_INVOLVED
261 DbgBreakIf(vf_info);
262 #endif
263 stats->as_u64 = LM_STATS_HW_GET_MACS_U64(pdev, stats_tx.tx_stat_dot3statsexcessivecollisions ) ;
264 // ioc Dot3StatsExcessiveCollisions
265 break;
266 case LM_STATS_UNICAST_FRAMES_XMIT:
267 // Spec. 6
268 stats->as_u64 = stats_fw->eth_xstorm_common.client_statistics[i].unicast_pkts_sent ;
269 break;
270 case LM_STATS_MULTICAST_FRAMES_XMIT:
271 // Spec. 7
272 stats->as_u64 = stats_fw->eth_xstorm_common.client_statistics[i].multicast_pkts_sent ;
273 break;
274 case LM_STATS_BROADCAST_FRAMES_XMIT:
275 stats->as_u64 = stats_fw->eth_xstorm_common.client_statistics[i].broadcast_pkts_sent ;
276 break;
277 case LM_STATS_UNICAST_FRAMES_RCV:
278 stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].rcv_unicast_pkts ;
279 break;
280 case LM_STATS_MULTICAST_FRAMES_RCV:
281 stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].rcv_multicast_pkts ;
282 break;
283 case LM_STATS_BROADCAST_FRAMES_RCV:
284 stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].rcv_broadcast_pkts ;
285 break;
286 case LM_STATS_ERRORED_TRANSMIT_CNT:
287 #ifdef VF_INVOLVED
288 DbgBreakIf(vf_info);
289 #endif
290 if( !IS_PMF(pdev) )
291 {
292 stats->as_u64 = 0 ;
293 }
294 else
295 {
296 stats->as_u64 = LM_STATS_HW_GET_MACS_U64(pdev, stats_tx.tx_stat_dot3statsinternalmactransmiterrors ) ;
297 }
298 break;
299 case LM_STATS_RCV_OVERRUN:
300 #ifdef VF_INVOLVED
301 DbgBreakIf(vf_info);
302 #endif
303 stats->as_u64 = pdev->vars.stats.stats_mirror.stats_hw.nig.brb_discard ;
304 stats->as_u64+= pdev->vars.stats.stats_mirror.stats_hw.nig.brb_truncate ;
305 stats->as_u64+= pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.port_statistics.xxoverflow_discard ;
306 break;
307 case LM_STATS_XMIT_UNDERRUN:
308 //These counters are always zero
309 #ifdef VF_INVOLVED
310 DbgBreakIf(vf_info);
311 #endif
312 stats->as_u64 = 0;
313 break;
314 case LM_STATS_RCV_NO_BUFFER_DROP:
315 stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].no_buff_discard ;
316 stats->as_u64+= stats_fw->eth_ustorm_common.client_statistics[i].ucast_no_buff_pkts ;
317 stats->as_u64+= stats_fw->eth_ustorm_common.client_statistics[i].mcast_no_buff_pkts ;
318 stats->as_u64+= stats_fw->eth_ustorm_common.client_statistics[i].bcast_no_buff_pkts ;
319 // ioc IfInMBUFDiscards
320 break;
321 case LM_STATS_BYTES_RCV:
322 stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].rcv_broadcast_bytes +
323 stats_fw->eth_tstorm_common.client_statistics[i].rcv_multicast_bytes +
324 stats_fw->eth_tstorm_common.client_statistics[i].rcv_unicast_bytes ;
325 // ioc IfHCInOctets
326 break;
327 case LM_STATS_BYTES_XMIT:
328 stats->as_u64 = stats_fw->eth_xstorm_common.client_statistics[i].total_sent_bytes ;
329 // ioc IfHCOutOctets
330 break;
331 case LM_STATS_IF_IN_DISCARDS:
332 #ifdef VF_INVOLVED
333 if (vf_info != NULL)
334 {
335 stats->as_u64 = 0;
336 }
337 else
338 #endif
339 {
340 stats->as_u64 = LM_STATS_ERROR_DISCARD_SUM( pdev, i ) ; // LM_STATS_ERRORED_RECEIVE_CNT
341 }
342 stats->as_u64+= stats_fw->eth_tstorm_common.client_statistics[i].no_buff_discard ; // LM_STATS_RCV_NO_BUFFER_DROP
343 stats->as_u64+= stats_fw->eth_ustorm_common.client_statistics[i].ucast_no_buff_pkts ; // LM_STATS_RCV_NO_BUFFER_DROP
344 stats->as_u64+= stats_fw->eth_ustorm_common.client_statistics[i].mcast_no_buff_pkts ; // LM_STATS_RCV_NO_BUFFER_DROP
345 stats->as_u64+= stats_fw->eth_ustorm_common.client_statistics[i].bcast_no_buff_pkts ; // LM_STATS_RCV_NO_BUFFER_DROP
346 #ifdef VF_INVOLVED
347 if (vf_info == NULL)
348 #endif
349 {
350 stats->as_u64+= pdev->vars.stats.stats_mirror.stats_hw.nig.brb_discard ; // LM_STATS_RCV_OVERRUN
351 stats->as_u64+= pdev->vars.stats.stats_mirror.stats_hw.nig.brb_truncate ; // LM_STATS_RCV_OVERRUN
352 }
353 stats->as_u64+= stats_fw->eth_tstorm_common.port_statistics.xxoverflow_discard ; // LM_STATS_RCV_OVERRUN
354 break;
355 case LM_STATS_MULTICAST_BYTES_RCV:
356 stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].rcv_multicast_bytes ;
357 break;
358 case LM_STATS_DIRECTED_BYTES_RCV:
359 stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].rcv_unicast_bytes ;
360 break;
361 case LM_STATS_BROADCAST_BYTES_RCV:
362 stats->as_u64 = stats_fw->eth_tstorm_common.client_statistics[i].rcv_broadcast_bytes ;
363 break;
364 case LM_STATS_DIRECTED_BYTES_XMIT:
365 stats->as_u64 = stats_fw->eth_xstorm_common.client_statistics[i].unicast_bytes_sent ;
366 break;
367 case LM_STATS_MULTICAST_BYTES_XMIT:
368 stats->as_u64 = stats_fw->eth_xstorm_common.client_statistics[i].multicast_bytes_sent ;
369 break;
370 case LM_STATS_BROADCAST_BYTES_XMIT:
371 stats->as_u64 = stats_fw->eth_xstorm_common.client_statistics[i].broadcast_bytes_sent ;
372 break;
373 /*
374 case LM_STATS_IF_IN_ERRORS:
375 case LM_STATS_IF_OUT_ERRORS:
376 stats->as_u32.low = 0;
377 stats->as_u32.high = 0;
378 break;
379 */
380 default:
381 stats->as_u64 = 0 ;
382 lm_status = LM_STATUS_INVALID_PARAMETER;
383 break;
384 }
385 //DbgMessage(pdev, WARN, "lm_get_stats: stats_type=0x%X val=%d\n", stats_type, stats->as_u64);
386 return lm_status;
387 } /* lm_get_stats */
388 /*******************************************************************************
389 * Description:
390 * Zero the mirror statistics (probably after miniport was down in windows, 'driver unload' on ediag)
391 *
392 * Return:
393 ******************************************************************************/
lm_stats_reset(struct _lm_device_t * pdev)394 void lm_stats_reset( struct _lm_device_t* pdev)
395 {
396 DbgMessage(pdev, INFORM, "Zero 'mirror' statistics...\n");
397 mm_mem_zero( &pdev->vars.stats.stats_mirror, sizeof(pdev->vars.stats.stats_mirror) ) ;
398 }
399
400 /*
401 * lm_edebug_if_is_stats_disabled returns TRUE if statistics gathering is
402 * disabled according to edebug-driver interface implemented through SHMEM2
403 * field named edebug_driver_if. Otherwise, return FALSE.
404 */
405 static u32_t
lm_edebug_if_is_stats_disabled(struct _lm_device_t * pdev)406 lm_edebug_if_is_stats_disabled(struct _lm_device_t * pdev)
407 {
408 u32_t shmem2_size;
409 u32_t offset = OFFSETOF(shmem2_region_t, edebug_driver_if[1]);
410 u32_t val;
411
412 if (pdev->hw_info.shmem_base2 != 0)
413 {
414 LM_SHMEM2_READ(pdev, OFFSETOF(shmem2_region_t, size), &shmem2_size);
415
416 if (shmem2_size > offset)
417 {
418 LM_SHMEM2_READ(pdev, offset, &val);
419
420
421 if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
422 {
423 return TRUE;
424 }
425 }
426 }
427
428 return FALSE;
429 }
430
431
lm_stats_fw_post_request(lm_device_t * pdev)432 static lm_status_t lm_stats_fw_post_request(lm_device_t *pdev)
433 {
434 lm_status_t lm_status = LM_STATUS_SUCCESS;
435 lm_stats_fw_collect_t * stats_fw = &pdev->vars.stats.stats_collect.stats_fw;
436
437 stats_fw->fw_stats_req->hdr.drv_stats_counter = mm_cpu_to_le16(stats_fw->drv_counter);
438
439 // zero no completion counter
440 stats_fw->timer_wakeup_no_completion_current = 0 ;
441
442 stats_fw->b_completion_done = FALSE ;
443 if (IS_VFDEV(pdev))
444 {
445 return LM_STATUS_SUCCESS;
446 }
447 stats_fw->b_ramrod_completed = FALSE ;
448
449 #ifdef VF_INVOLVED
450 #ifndef __LINUX
451 if (IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev))
452 {
453 lm_stats_prep_vf_fw_stats_req(pdev);
454 }
455 #endif
456 #endif
457
458 /* send FW stats ramrod */
459 lm_status = lm_sq_post_entry(pdev,&(stats_fw->stats_sp_list_command),CMD_PRIORITY_HIGH);
460
461 DbgBreakIf( LM_STATUS_SUCCESS != lm_status ) ;
462
463 if (lm_status == LM_STATUS_SUCCESS)
464 {
465 // increamant ramrod counter (for debugging)
466 ++stats_fw->stats_ramrod_cnt ;
467 }
468
469 return lm_status;
470
471 }
472 // main stats function called from timer
lm_stats_on_timer(struct _lm_device_t * pdev)473 void lm_stats_on_timer( struct _lm_device_t * pdev )
474 {
475 lm_status_t lm_status = LM_STATUS_SUCCESS ;
476 u32_t val = 0 ;
477
478 if CHK_NULL( pdev )
479 {
480 DbgBreakIf(!pdev) ;
481 return;
482 }
483
484 ++pdev->vars.stats.stats_collect.timer_wakeup ;
485
486 #ifdef _VBD_CMD_
487 val = GET_FLAGS(*g_everest_sim_flags_ptr, EVEREST_SIM_STATS);
488 pdev->vars.stats.stats_collect.stats_fw.b_collect_enabled = val && pdev->vars.stats.stats_collect.stats_fw.b_collect_enabled;
489 #endif
490 /* disable statistics if FW SP trace is involved */
491 if (pdev->params.record_sp)
492 {
493 ++pdev->vars.stats.stats_collect.sp_record_disabled;
494 return;
495 }
496 /* if stats gathering is disabled according to edebug-driver i/f - return */
497 if(lm_edebug_if_is_stats_disabled(pdev))
498 {
499 ++pdev->vars.stats.stats_collect.shmem_disabled;
500 return;
501 }
502
503 if( pdev->vars.stats.stats_collect.stats_fw.b_collect_enabled )
504 {
505 // verify that previous ramrod cb is finished
506 if( lm_stats_fw_complete( pdev ) == LM_STATUS_BUSY)
507 {
508 // using a variable to have event log since the message is too long
509 val = ++pdev->vars.stats.stats_collect.stats_fw.timer_wakeup_no_completion_current ;
510
511 // update timer_wakeup_no_completion_max
512 if( pdev->vars.stats.stats_collect.stats_fw.timer_wakeup_no_completion_max < val )
513 {
514 pdev->vars.stats.stats_collect.stats_fw.timer_wakeup_no_completion_max = val ;
515 }
516 /* We give up in two case:
517 * 1. We got here #NO_COMPLETION times without having a stats-completion pending to be handled
518 * 2. There is a completion pending to be handled - but it still hasn't been handled in #COMP_NOT_HANDLED times
519 * we got here. #COMP_NOT_HANDLED > #NO_COMPLETION*/
520 if ((!is_pending_stats_completion(pdev) && (val >= MAX_STATS_TIMER_WAKEUP_NO_COMPLETION)) ||
521 (val >= MAX_STATS_TIMER_WAKEUP_COMP_NOT_HANDLED))
522 {
523 if(GET_FLAGS(pdev->params.debug_cap_flags,DEBUG_CAP_FLAGS_STATS_FW))
524 {
525 LM_TRIGGER_PCIE(pdev);
526 }
527 /* shutdown bug - BSOD only if shutdown is not in progress */
528 if (!lm_reset_is_inprogress(pdev))
529 {
530 /* BSOD */
531 if(GET_FLAGS(pdev->params.debug_cap_flags,DEBUG_CAP_FLAGS_STATS_FW))
532 {
533 DbgBreakIfAll( val >= MAX_STATS_TIMER_WAKEUP_NO_COMPLETION ) ;
534 }
535 }
536 }
537
538 /* check interrupt mode on 57710A0 boards */
539 lm_57710A0_dbg_intr(pdev);
540
541 // this is total wake up no completion - for debuging
542 ++pdev->vars.stats.stats_collect.stats_fw.timer_wakeup_no_completion_total ;
543 }
544 else
545 {
546 lm_status = lm_stats_fw_post_request(pdev);
547 DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
548 }
549 } // fw collect enabled
550
551 if( LM_STATS_IS_HW_ACTIVE(pdev) )
552 {
553 // if link is not up - we can simply pass this call (optimization)
554 if( pdev->vars.stats.stats_collect.stats_hw.b_is_link_up )
555 {
556 MM_ACQUIRE_PHY_LOCK_DPC(pdev);
557
558 // we can call dmae only if link is up, and we must check it with lock
559 if( pdev->vars.stats.stats_collect.stats_hw.b_is_link_up )
560 {
561 lm_status = lm_stats_hw_collect( pdev );
562
563 DbgBreakIf( LM_STATUS_SUCCESS != lm_status ) ;
564
565 // assign values for relevant mac type which is up - inside the lock due to consistecy reasons
566 lm_stats_hw_assign( pdev ) ;
567 }
568
569 // assign to statistics to MCP
570 lm_stats_mgmt_assign( pdev ) ;
571
572 MM_RELEASE_PHY_LOCK_DPC(pdev);
573 } // link is up
574 } // LM_STATS_IS_HW_ACTIVE
575 else if( pdev->vars.stats.stats_collect.stats_hw.b_collect_enabled &&
576 pdev->vars.stats.stats_collect.stats_hw.b_is_link_up ) // when there is no link - no use writing to mgmt
577 {
578 MM_ACQUIRE_PHY_LOCK_DPC(pdev);
579 lm_stats_mgmt_assign( pdev ) ;
580 MM_RELEASE_PHY_LOCK_DPC(pdev);
581 }
582 }
583
is_pending_stats_completion(struct _lm_device_t * pdev)584 u8_t is_pending_stats_completion(struct _lm_device_t * pdev)
585 {
586 volatile struct hc_sp_status_block * sp_sb=NULL;
587 u32_t val=0;
588
589 /* read interrupt mask from IGU - check that default-status-block bit is off... */
590 if (INTR_BLK_TYPE(pdev)==INTR_BLK_HC){
591 val = REG_RD(pdev, HC_REG_INT_MASK + 4*PORT_ID(pdev) );
592 } // TODO add IGU complement
593
594
595 sp_sb = lm_get_default_status_block(pdev);
596
597 /* check bit 0 is masked (value 0) and that cstorm in default-status-block has increased. */
598 if(!GET_FLAGS(val, 1) && lm_is_eq_completion(pdev))
599 {
600 return TRUE;
601 }
602 return FALSE; /* no pending completion */
603 }
604
605 /**lm_stats_get_dmae_operation
606 * The statistics module uses two pre-allocated DMAE operations
607 * instead of allocating and releasing a DMAE operation on every
608 * statistics collection. There is an operation for EMAC
609 * statistics, and an operation for BMAC or MSTAT statistics
610 * (since EMAC requires 3 SGEs and BMAC/MSTAT require 2).
611 * This function returns the appropriate DMAE operation based on
612 * current MAC setting.
613 *
614 *
615 * @param pdev the device to use.
616 *
617 * @return lm_dmae_operation_t* the DMAE operation to use for
618 * collection HW statistics from the current MAC.
619 */
620 static lm_dmae_operation_t*
lm_stats_get_dmae_operation(lm_device_t * pdev)621 lm_stats_get_dmae_operation(lm_device_t* pdev)
622 {
623 if (HAS_MSTAT(pdev) || (pdev->vars.mac_type == MAC_TYPE_BMAC))
624 {
625 return (lm_dmae_operation_t*)pdev->vars.stats.stats_collect.stats_hw.non_emac_dmae_operation;
626 }
627 else if(pdev->vars.mac_type == MAC_TYPE_EMAC)
628 {
629 return (lm_dmae_operation_t*)pdev->vars.stats.stats_collect.stats_hw.emac_dmae_operation;
630 }
631 else
632 {
633 DbgBreakIf((pdev->vars.mac_type != MAC_TYPE_EMAC) && (pdev->vars.mac_type != MAC_TYPE_BMAC));
634 return NULL;
635 }
636
637 }
638
639 /*
640 *Function Name:lm_stats_dmae
641 *
642 *Parameters:
643 *
644 *Description:
645 * collect stats from hw using dmae
646 *Returns:
647 *
648 */
lm_stats_dmae(lm_device_t * pdev)649 lm_status_t lm_stats_dmae( lm_device_t *pdev )
650 {
651 lm_status_t lm_status = LM_STATUS_SUCCESS ;
652 lm_dmae_context_t* context = lm_dmae_get(pdev, LM_DMAE_STATS)->context;
653 lm_dmae_operation_t* operation = lm_stats_get_dmae_operation(pdev);
654
655 DbgBreakIf( FALSE == LM_STATS_IS_HW_ACTIVE( pdev ) ) ;
656
657 if (NULL == operation)
658 {
659 DbgBreakIf( NULL == operation );
660 return LM_STATUS_FAILURE;
661 }
662
663 lm_status = lm_dmae_context_execute(pdev,context,operation);
664
665 if (LM_STATUS_ABORTED == lm_status)
666 {
667 //if the DMAE operation was interrupted by lm_reset_is_inprogress, it's OK and we can treat it as success.
668 lm_status = LM_STATUS_SUCCESS;
669 }
670
671 return lm_status ;
672 }
673
674 /*
675 *Function Name:lm_stats_clear_emac_stats
676 *
677 *Parameters:
678 *
679 *Description:
680 * resets all emac statistics counter registers
681 *Returns:
682 *
683 */
lm_stats_clear_emac_stats(lm_device_t * pdev)684 lm_status_t lm_stats_clear_emac_stats( lm_device_t *pdev )
685 {
686 u32_t i = 0 ;
687 u32_t j = 0 ;
688 u32_t count_limit[3] = { EMAC_REG_EMAC_RX_STAT_AC_COUNT,
689 1,
690 EMAC_REG_EMAC_TX_STAT_AC_COUNT } ;
691 u32_t reg_start [3] = { EMAC_REG_EMAC_RX_STAT_AC,
692 EMAC_REG_EMAC_RX_STAT_AC_28,
693 EMAC_REG_EMAC_TX_STAT_AC } ;
694 u32_t emac_base = 0 ;
695 u32_t dummy = 0 ;
696
697 ASSERT_STATIC( ARRSIZE(reg_start) == ARRSIZE(count_limit) );
698
699 if CHK_NULL( pdev )
700 {
701 return LM_STATUS_INVALID_PARAMETER ;
702 }
703
704 emac_base = ( 0 == PORT_ID(pdev) ) ? GRCBASE_EMAC0 : GRCBASE_EMAC1 ;
705
706 for( i = 0; i< ARRSIZE(reg_start) ; i++ )
707 {
708 for( j = 0 ; j < count_limit[i]; j++ )
709 {
710 dummy = REG_RD( pdev, emac_base + reg_start[i]+(j*sizeof(u32_t))) ; /*Clear stats registers by reading from from ReadClear RX/RXerr/TX STAT banks*/
711 }
712 }
713 return LM_STATUS_SUCCESS ;
714 }
715
716 /*
717 *Function Name:lm_stats_on_update_state
718 *
719 *Parameters:
720 *
721 *Description:
722 * This function should be called on one of two occasions:
723 * - When link is down
724 * - When PMF is going down (meaning - changed to another PMF)
725 * Function must be called under PHY LOCK
726 * 1. in case no link - do nothing
727 * 2. make last query to hw stats for current link
728 * 3. assign to mirror host structures
729 * 4. assign to MCP (managment)
730 * 5. saves the copy in mirror
731 *Returns:
732 *
733 */
lm_stats_on_update_state(lm_device_t * pdev)734 lm_status_t lm_stats_on_update_state(lm_device_t * pdev )
735 {
736 lm_status_t lm_status = LM_STATUS_SUCCESS ;
737
738 if CHK_NULL( pdev )
739 {
740 DbgBreakIf( !pdev ) ;
741 return LM_STATUS_INVALID_PARAMETER ;
742 }
743
744 if( MAC_TYPE_NONE == pdev->vars.mac_type )
745 {
746 DbgMessage(pdev, WARNstat, "lm_stats_on_link_update: linking down when already linked down\n" );
747 return LM_STATUS_LINK_DOWN ;
748 }
749
750 if ( LM_STATS_IS_HW_ACTIVE(pdev) )
751 {
752 // call statistics for the last time before link down
753 lm_status = lm_stats_dmae( pdev ) ;
754
755 if( LM_STATUS_SUCCESS != lm_status )
756 {
757 DbgBreakIf( LM_STATUS_SUCCESS != lm_status ) ;
758 }
759
760 // assign last values before link down
761 lm_stats_hw_assign( pdev ) ;
762 }
763
764 // assign to statistics to mgmt
765 lm_stats_mgmt_assign( pdev ) ;
766
767 return lm_status;
768 }
769 // NOTE: this function must be called under PHY LOCK!
770 // - 1. Lock with stats timer/dmae, whcih means - no timer request on air when function running
771 // - 2. Last update of stats from emac/bmac (TBD - do it with reset addresses)
772 // - 3. keep latest stats in a copy
773 // - 4. if emac - reset all stats registers!
774 // - 5. if up - change b_link_down_is_on flag to FALSE
lm_stats_on_link_update(lm_device_t * pdev,const u8_t b_is_link_up)775 lm_status_t lm_stats_on_link_update( lm_device_t *pdev, const u8_t b_is_link_up )
776 {
777 lm_status_t lm_status = LM_STATUS_SUCCESS ;
778
779 if CHK_NULL( pdev )
780 {
781 DbgBreakIf( !pdev ) ;
782 return LM_STATUS_INVALID_PARAMETER ;
783 }
784
785 if( FALSE == b_is_link_up ) // link down
786 {
787 pdev->vars.stats.stats_collect.stats_hw.b_is_link_up = FALSE ;
788
789 if ( FALSE == LM_STATS_IS_HW_ACTIVE(pdev) )
790 {
791 return LM_STATUS_SUCCESS;
792 }
793
794 // get stats for the last time, assign to managment and save copy to mirror
795 lm_status = lm_stats_on_update_state(pdev);
796
797 if( LM_STATUS_SUCCESS != lm_status )
798 {
799 return lm_status ;
800 }
801
802 switch( pdev->vars.mac_type )
803 {
804 case MAC_TYPE_EMAC:
805 lm_stats_clear_emac_stats( pdev ) ; // resest emac stats fields
806 break;
807
808 case MAC_TYPE_BMAC: // nothing to do - bigmac resets itself anyway
809 break;
810
811 case MAC_TYPE_UMAC: // nothing to do - mstat resets anyway
812 case MAC_TYPE_XMAC:
813 DbgBreakIf(!CHIP_IS_E3(pdev));
814 break;
815
816 default:
817 case MAC_TYPE_NONE:
818 DbgBreakMsg( "mac_type not acceptable\n" ) ;
819 return LM_STATUS_INVALID_PARAMETER ;
820 }
821
822 // Set current to 0
823 mm_mem_zero( &pdev->vars.stats.stats_mirror.stats_hw.macs[STATS_MACS_IDX_CURRENT],
824 sizeof(pdev->vars.stats.stats_mirror.stats_hw.macs[STATS_MACS_IDX_CURRENT]) ) ;
825 }
826 else
827 {
828 pdev->vars.stats.stats_collect.stats_hw.b_is_link_up = TRUE ;
829 }
830
831 return lm_status ;
832 }
833
834 /**lm_stats_alloc_hw_query
835 * Allocate buffers for the MAC and NIG stats. If the chip has
836 * an EMAC block, memory will be allocated for it's stats.
837 * otherwise only the non-EMAC and NIG buffers will be
838 * allocated. The non-EMAC buffer will be of the proper size for
839 * BMAC1/BMAC2/MSTAT, as needed.
840 *
841 * @param pdev the pdev to initialize
842 *
843 * @return lm_status_t LM_STATUS_SUCCESS on success,
844 * LM_STATUS_FAILURE on failure.
845 */
lm_stats_alloc_hw_query(lm_device_t * pdev)846 static lm_status_t lm_stats_alloc_hw_query(lm_device_t *pdev)
847 {
848 lm_stats_hw_collect_t* stats_hw = &(pdev->vars.stats.stats_collect.stats_hw);
849 u32_t alloc_size = 0 ;
850 u32_t mac_stats_alloc_size = 0;
851 lm_address_t phys_addr = {{0}};
852
853 if(!HAS_MSTAT(pdev)) //MSTAT replaces EMAC/BMAC1/BMAC2 stats.
854 {
855 DbgMessage(NULL, INFORM, "lm_stats_alloc_hw_query: device has no MSTAT block.\n");
856 // Allocate continuous memory for statistics buffers to be read from hardware. This can probably be changed to
857 // allocate max(emac, bmac) instead of emac+bmac, but need to make sure there are no races in the transition from
858 // 1G link to 10G link or vice-versa
859 mac_stats_alloc_size = sizeof(struct _stats_emac_query_t) + sizeof( union _stats_bmac_query_t);
860 alloc_size = mac_stats_alloc_size + sizeof( struct _stats_nig_query_t ) ;
861 stats_hw->u.s.addr_emac_stats_query = mm_alloc_phys_mem(pdev, alloc_size, &phys_addr ,PHYS_MEM_TYPE_NONCACHED, LM_RESOURCE_COMMON );
862
863 stats_hw->mac_stats_phys_addr = phys_addr;
864 LM_INC64(&phys_addr, sizeof(struct _stats_emac_query_t));
865 stats_hw->bmac_stats_phys_addr = phys_addr;
866 LM_INC64(&phys_addr, sizeof( union _stats_bmac_query_t));
867 stats_hw->nig_stats_phys_addr= phys_addr;
868
869 DbgMessage(NULL, INFORM, "lm_stats_alloc_hw_query: allocated a block of size %d at %x\n", alloc_size, stats_hw->u.s.addr_emac_stats_query);
870 if CHK_NULL( stats_hw->u.s.addr_emac_stats_query )
871 {
872 DbgBreakIf(!stats_hw->u.s.addr_emac_stats_query );
873 return LM_STATUS_FAILURE ;
874 }
875
876 stats_hw->u.s.addr_bmac1_stats_query = (struct _stats_bmac1_query_t*)((u8_t*)stats_hw->u.s.addr_emac_stats_query + sizeof(struct _stats_emac_query_t)) ;
877 stats_hw->u.s.addr_bmac2_stats_query = (struct _stats_bmac2_query_t*)((u8_t*)stats_hw->u.s.addr_emac_stats_query + sizeof(struct _stats_emac_query_t)) ;
878 stats_hw->addr_nig_stats_query = (struct _stats_nig_query_t*)((u8_t*)stats_hw->u.s.addr_bmac1_stats_query + sizeof(union _stats_bmac_query_t)) ;
879 DbgMessage(NULL, INFORM, "lm_stats_alloc_hw_query: addr_bmac1_stats_query = %x, addr_bmac2_stats_query=%x, addr_nig_stats_query=%x\n", stats_hw->u.s.addr_bmac1_stats_query, stats_hw->u.s.addr_bmac2_stats_query, stats_hw->addr_nig_stats_query);
880 }
881 else
882 {
883 DbgMessage(NULL, INFORM, "lm_stats_alloc_hw_query: device has an MSTAT block.\n");
884
885 mac_stats_alloc_size = sizeof(struct _stats_mstat_query_t);
886 alloc_size = mac_stats_alloc_size + sizeof( struct _stats_nig_query_t );
887
888 stats_hw->u.addr_mstat_stats_query = mm_alloc_phys_mem(pdev, alloc_size, &phys_addr ,PHYS_MEM_TYPE_NONCACHED, LM_RESOURCE_COMMON );
889
890 stats_hw->mac_stats_phys_addr = phys_addr;
891 LM_INC64(&phys_addr, mac_stats_alloc_size);
892 stats_hw->nig_stats_phys_addr = phys_addr;
893
894 DbgMessage(NULL, INFORM, "lm_stats_alloc_hw_query: allocated a block of size %d at %x\n", alloc_size, stats_hw->u.addr_mstat_stats_query);
895 if CHK_NULL( stats_hw->u.addr_mstat_stats_query )
896 {
897 DbgBreakIf(!stats_hw->u.addr_mstat_stats_query );
898 return LM_STATUS_FAILURE ;
899 }
900
901 stats_hw->addr_nig_stats_query = (struct _stats_nig_query_t*)((u8_t*)stats_hw->u.addr_mstat_stats_query + sizeof(struct _stats_mstat_query_t)) ;
902 DbgMessage(NULL, INFORM, "lm_stats_alloc_hw_query: stats_hw->addr_nig_stats_query=%x\n", stats_hw->addr_nig_stats_query);
903 }
904
905 return LM_STATUS_SUCCESS;
906 }
907
lm_stats_alloc_fw_resc(struct _lm_device_t * pdev)908 lm_status_t lm_stats_alloc_fw_resc (struct _lm_device_t *pdev)
909 {
910 lm_stats_fw_collect_t * stats_fw = &pdev->vars.stats.stats_collect.stats_fw;
911 u32_t num_groups = 0;
912 u32_t alloc_size = 0;
913 u8_t num_queue_stats = 1;
914
915 /* Total number of FW statistics requests =
916 * 1 for port stats + 1 for PF stats + 1 for queue stats + 1 for FCoE stats + 1 for toe stats */
917 #define NUM_FW_STATS_REQS 5
918 stats_fw->fw_static_stats_num = stats_fw->fw_stats_num = NUM_FW_STATS_REQS;
919
920 #ifndef __LINUX
921 if (IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev)) {
922 stats_fw->fw_stats_num += pdev->hw_info.sriov_info.total_vfs * 2;
923 }
924 #endif
925
926 /* Request is built from stats_query_header and an array of
927 * stats_query_cmd_group each of which contains
928 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
929 * configured in the stats_query_header.
930 */
931 num_groups = (stats_fw->fw_stats_num) / STATS_QUERY_CMD_COUNT +
932 (((stats_fw->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
933
934 #ifndef __LINUX
935 if (IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev)) {
936 DbgMessage(pdev, WARN, "%d stats groups to support %d VFs\n",num_groups, pdev->hw_info.sriov_info.total_vfs);
937 }
938 #endif
939 stats_fw->fw_stats_req_sz = sizeof(struct stats_query_header) +
940 num_groups * sizeof(struct stats_query_cmd_group);
941
942 /* Data for statistics requests + stats_conter
943 *
944 * stats_counter holds per-STORM counters that are incremented
945 * when STORM has finished with the current request.
946 */
947 stats_fw->fw_stats_data_sz = sizeof(struct per_port_stats) +
948 sizeof(struct per_pf_stats) +
949 sizeof(struct per_queue_stats) * num_queue_stats +
950 sizeof(struct toe_stats_query) +
951 sizeof(struct fcoe_statistics_params) +
952 sizeof(struct stats_counter);
953
954 alloc_size = stats_fw->fw_stats_data_sz + stats_fw->fw_stats_req_sz;
955 stats_fw->fw_stats = mm_alloc_phys_mem(pdev, alloc_size, &stats_fw->fw_stats_mapping ,PHYS_MEM_TYPE_NONCACHED, LM_RESOURCE_COMMON );
956 if (!stats_fw->fw_stats)
957 {
958 return LM_STATUS_RESOURCE;
959 }
960 /* Set shortcuts */
961 stats_fw->fw_stats_req = (lm_stats_fw_stats_req_t *)stats_fw->fw_stats;
962 stats_fw->fw_stats_req_mapping = stats_fw->fw_stats_mapping;
963
964 stats_fw->fw_stats_data = (lm_stats_fw_stats_data_t *)
965 ((u8*)stats_fw->fw_stats + stats_fw->fw_stats_req_sz);
966
967 stats_fw->fw_stats_data_mapping = stats_fw->fw_stats_mapping;
968 LM_INC64(&stats_fw->fw_stats_data_mapping, stats_fw->fw_stats_req_sz);
969
970 return LM_STATUS_SUCCESS;
971 }
972
973 /*
974 *Function Name: lm_stats_alloc_drv_info_to_mfw_resc
975 *
976 *Parameters:
977 *
978 *Description:
979 * Allocates physical memory to be used for OCBB statisics query by MFW needed for E3+ only
980 *Returns:
981 *
982 */
lm_stats_alloc_drv_info_to_mfw_resc(lm_device_t * pdev)983 static lm_status_t lm_stats_alloc_drv_info_to_mfw_resc(lm_device_t *pdev)
984 {
985 lm_stats_drv_info_to_mfw_t* drv_info_to_mfw = &(pdev->vars.stats.stats_collect.drv_info_to_mfw );
986 u32_t alloc_size = 0 ;
987 lm_address_t phys_addr = {{0}};
988 lm_status_t lm_status = LM_STATUS_SUCCESS;
989
990 if( CHIP_IS_E3(pdev) )
991 {
992 alloc_size = max( ( sizeof( *drv_info_to_mfw->addr.eth_stats ) ),
993 ( sizeof( *drv_info_to_mfw->addr.iscsi_stats ) ) ) ;
994 alloc_size = max( ( sizeof( *drv_info_to_mfw->addr.fcoe_stats ) ), alloc_size ) ;
995
996 // since it is a union it doesn't matter
997 drv_info_to_mfw->addr.eth_stats = mm_alloc_phys_mem(pdev, alloc_size, &phys_addr ,PHYS_MEM_TYPE_NONCACHED, LM_RESOURCE_COMMON );
998
999 if( !drv_info_to_mfw->addr.eth_stats )
1000 {
1001 lm_status = LM_STATUS_RESOURCE;
1002 }
1003
1004 drv_info_to_mfw->drv_info_to_mfw_phys_addr = phys_addr;
1005 }
1006
1007 return lm_status;
1008 }
1009
1010 // allocate memory both for hw and fw statistics
lm_stats_alloc_resc(struct _lm_device_t * pdev)1011 lm_status_t lm_stats_alloc_resc( struct _lm_device_t* pdev )
1012 {
1013 u8_t loader_channel_idx = (u8_t)(-1) ;
1014 u8_t executer_channel_idx = (u8_t)(-1) ;
1015 lm_status_t lm_status = LM_STATUS_SUCCESS;
1016 lm_dmae_context_info_t *stats_dmae_context_info = lm_dmae_get(pdev, LM_DMAE_STATS);
1017
1018 if CHK_NULL(pdev )
1019 {
1020 DbgBreakIf(!pdev) ;
1021 return LM_STATUS_INVALID_PARAMETER ;
1022 }
1023
1024 lm_status = lm_stats_alloc_fw_resc(pdev);
1025
1026 if( lm_status != LM_STATUS_SUCCESS )
1027 {
1028 // stats is not such a big deal if not working but since we
1029 // only allocate here buffer, it doesn't matter since next alloc will also fail...
1030 return lm_status;
1031 }
1032
1033
1034 lm_status = lm_stats_alloc_drv_info_to_mfw_resc(pdev);
1035
1036 if( lm_status != LM_STATUS_SUCCESS )
1037 {
1038 // OCBB is not such a big deal if not working but since we
1039 // only allocate here buffer, it doesn't matter since next alloc will also fail...
1040 return lm_status;
1041 }
1042
1043 lm_status = lm_stats_alloc_hw_query(pdev);
1044 if(lm_status != LM_STATUS_SUCCESS)
1045 {
1046 return lm_status;
1047 }
1048
1049
1050 switch (PORT_ID(pdev))
1051 {
1052 case 0:
1053 {
1054 loader_channel_idx = DMAE_STATS_PORT_0_CMD_IDX_0;
1055 executer_channel_idx = DMAE_STATS_PORT_0_CMD_IDX_1;
1056 }
1057 break;
1058 case 1:
1059 {
1060 loader_channel_idx = DMAE_STATS_PORT_1_CMD_IDX_0;
1061 executer_channel_idx = DMAE_STATS_PORT_1_CMD_IDX_1;
1062 }
1063 break;
1064 default:
1065 {
1066 DbgMessage(NULL, FATAL, "Invalid Port ID %d\n", PORT_ID(pdev));
1067 DbgBreak();
1068 return LM_STATUS_INVALID_PARAMETER;
1069 }
1070 break;
1071 }
1072
1073 //create the locking policy for the stats DMAE context
1074 lm_status = lm_dmae_locking_policy_create(pdev, LM_PROTECTED_RESOURCE_DMAE_STATS, LM_DMAE_LOCKING_POLICY_TYPE_PER_PF, &stats_dmae_context_info->locking_policy);
1075 if(lm_status != LM_STATUS_SUCCESS)
1076 {
1077 return lm_status;
1078 }
1079
1080 //create the stats DMAE context
1081 stats_dmae_context_info->context = lm_dmae_context_create_sgl( pdev,
1082 loader_channel_idx,
1083 executer_channel_idx,
1084 &stats_dmae_context_info->locking_policy,
1085 CHANGE_ENDIANITY);
1086 if (CHK_NULL(stats_dmae_context_info->context))
1087 {
1088 DbgBreak();
1089 return LM_STATUS_FAILURE;
1090 }
1091
1092 //create the non-EMAC DMAE operation
1093 pdev->vars.stats.stats_collect.stats_hw.non_emac_dmae_operation = lm_dmae_operation_create_sgl(pdev, TRUE, stats_dmae_context_info->context);
1094
1095 //create the EMAC DMAE operation if needed
1096 if (!HAS_MSTAT(pdev))
1097 {
1098 pdev->vars.stats.stats_collect.stats_hw.emac_dmae_operation = lm_dmae_operation_create_sgl(pdev, TRUE, stats_dmae_context_info->context);
1099 }
1100 else
1101 {
1102 pdev->vars.stats.stats_collect.stats_hw.emac_dmae_operation = NULL;
1103 }
1104
1105 return LM_STATUS_SUCCESS ;
1106 }
1107
1108 /**lm_stats_hw_setup_nig
1109 * Add the DMAE command for reading NIG stats to the non-EMAC
1110 * DMAE context.
1111 *
1112 * @param pdev the device to initialize
1113 * @param dmae_operation the operation to setup for reading NIG
1114 * statistics
1115 *
1116 * @return lm_status_t LM_STATUS_SUCCESS on success, some other
1117 * failure value on failure.
1118 */
lm_stats_hw_setup_nig(lm_device_t * pdev,lm_dmae_operation_t * dmae_operation)1119 static lm_status_t lm_stats_hw_setup_nig(lm_device_t* pdev, lm_dmae_operation_t* dmae_operation)
1120 {
1121 lm_status_t lm_status = LM_STATUS_FAILURE;
1122
1123 lm_dmae_address_t source = lm_dmae_address((0==PORT_ID(pdev))?NIG_REG_STAT0_BRB_DISCARD : NIG_REG_STAT1_BRB_DISCARD,
1124 LM_DMAE_ADDRESS_GRC);
1125 lm_dmae_address_t dest = lm_dmae_address(pdev->vars.stats.stats_collect.stats_hw.nig_stats_phys_addr.as_u64,
1126 LM_DMAE_ADDRESS_HOST_PHYS);
1127
1128 lm_status = lm_dmae_operation_add_sge(pdev, dmae_operation, source, dest, sizeof(struct _stats_nig_query_t ) / sizeof(u32_t));
1129
1130 return lm_status;
1131 }
1132
1133 /**
1134 * This struct is used to describe a DMAE SGE. It is used by the
1135 * lm_status_setup_xxx and lm_stats_set_dmae_operation_sges
1136 * functions.
1137 *
1138 */
1139 struct lm_stats_sge_descr_t{
1140 u32_t source_offset;
1141 u64_t dest_paddr;
1142 u16_t length; // in DWORDS
1143 };
1144
1145
1146 /**lm_stats_set_dmae_operation_sges
1147 * Set the SGEs of a DMAE operation according to the supplied
1148 * SGE descriptor array. If the DMAE operation had any SGEs
1149 * defined before, this function removes them.
1150 *
1151 * @param pdev the device to use
1152 * @param operation the operation to modify
1153 * @param sge_descr the array of SGE descriptors
1154 * @param num_sges the number of SGE descriptors
1155 *
1156 * @return lm_status_t LM_STATUS_SUCCESS on success, some other
1157 * failure value on failure.
1158 */
lm_stats_set_dmae_operation_sges(lm_device_t * pdev,lm_dmae_operation_t * operation,struct lm_stats_sge_descr_t * sge_descr,u8_t num_sges)1159 static lm_status_t lm_stats_set_dmae_operation_sges(lm_device_t* pdev, lm_dmae_operation_t* operation, struct lm_stats_sge_descr_t* sge_descr, u8_t num_sges)
1160 {
1161 u8_t sge_idx = 0;
1162 lm_dmae_address_t sge_source = {{0}};
1163 lm_dmae_address_t sge_dest = {{0}};
1164 lm_status_t lm_status = LM_STATUS_SUCCESS;
1165
1166 //after returning from D3 there may be some SGEs set up here.
1167 lm_dmae_operation_clear_all_sges(operation);
1168
1169 for (sge_idx = 0; sge_idx < num_sges; ++sge_idx)
1170 {
1171 sge_source = lm_dmae_address(sge_descr[sge_idx].source_offset, LM_DMAE_ADDRESS_GRC);
1172 sge_dest = lm_dmae_address(sge_descr[sge_idx].dest_paddr, LM_DMAE_ADDRESS_HOST_PHYS);
1173
1174 lm_status = lm_dmae_operation_add_sge(pdev, operation, sge_source, sge_dest, sge_descr[sge_idx].length);
1175 if (LM_STATUS_SUCCESS != lm_status)
1176 {
1177 DbgBreak();
1178 return lm_status;
1179 }
1180 }
1181
1182 return lm_status;
1183 }
1184
1185 /**lm_stats_hw_setup_emac
1186 * setup the DMAE SGL for the EMAC stats DMAE context
1187 *
1188 * @param pdev the device to initialize
1189 *
1190 * @return lm_status_t LM_STATUS_SUCCESS on success, some other
1191 * value on failure.
1192 */
lm_stats_hw_setup_emac(lm_device_t * pdev)1193 static lm_status_t lm_stats_hw_setup_emac( lm_device_t* pdev)
1194 {
1195 const u64_t base_paddr = pdev->vars.stats.stats_collect.stats_hw.mac_stats_phys_addr.as_u64;
1196
1197 const u16_t sge1_len = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_emac_stats_query->stats_rx );
1198 const u16_t sge2_len = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_emac_stats_query->stats_rx_err );
1199 const u32_t emac_base = (PORT_ID(pdev)==0) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
1200
1201 lm_status_t lm_status = LM_STATUS_FAILURE;
1202
1203 lm_dmae_operation_t* operation = pdev->vars.stats.stats_collect.stats_hw.emac_dmae_operation;
1204
1205 struct lm_stats_sge_descr_t sges[3] = {{0}}; //we can't use an initializer because DOS compiler requires that all initializers be constant.
1206
1207 sges[0].source_offset = emac_base + EMAC_REG_EMAC_RX_STAT_IFHCINOCTETS;
1208 sges[0].dest_paddr = base_paddr;
1209 sges[0].length = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
1210
1211 sges[1].source_offset = emac_base + EMAC_REG_EMAC_RX_STAT_FALSECARRIERERRORS;
1212 sges[1].dest_paddr = base_paddr + sge1_len;
1213 sges[1].length = 1;
1214
1215 sges[2].source_offset = emac_base + EMAC_REG_EMAC_TX_STAT_IFHCOUTOCTETS;
1216 sges[2].dest_paddr = base_paddr + sge1_len + sge2_len;
1217 sges[2].length = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
1218
1219 lm_status = lm_stats_set_dmae_operation_sges(pdev, operation, sges, ARRSIZE(sges));
1220 if (LM_STATUS_SUCCESS != lm_status)
1221 {
1222 DbgBreakMsg("Failed to initialize EMAC stats DMAE operation.\n");
1223 return lm_status;
1224 }
1225
1226 lm_status = lm_stats_hw_setup_nig(pdev, operation);
1227 if (LM_STATUS_SUCCESS != lm_status)
1228 {
1229 DbgBreakMsg("Failed to initialize NIG stats DMAE operation.\n");
1230 return lm_status;
1231 }
1232
1233 return lm_status;
1234 }
1235
1236 /**lm_stats_hw_setup_non_emac
1237 * Setup the DMAE SGL for the non-EMAC stats DMAE context. This
1238 * function assumes that the MAC statistics themselves can be
1239 * read with 2 DMAE transactions.
1240 *
1241 *
1242 * @param pdev the device to initialize
1243 * @param paddr_base the base physical address where the
1244 * statistics data will be copied.
1245 * @param grc_base the base GRC address of the required stats
1246 * block (e.g NIG_REG_INGRESS_BMAC0_MEM or
1247 * GRCBASE_MSTAT0)
1248 * @param block1_start offset of the first register in the first
1249 * transaction.
1250 * @param block1_size size (in bytes) of the first DMAE
1251 * transaction.
1252 * @param block2_start offset of the first register in the
1253 * second transaction.
1254 * @param block2_size size (in bytes) of the second DMAE
1255 * transaction.
1256 *
1257 * @return lm_status_t LM_STATUS_SUCCESS on success, some other
1258 * value on failure.
1259 */
lm_stats_hw_setup_non_emac(lm_device_t * pdev,u64_t paddr_base,u32_t grc_base,u32_t block1_start,u16_t block1_size,u32_t block2_start,u16_t block2_size)1260 static lm_status_t lm_stats_hw_setup_non_emac( lm_device_t* pdev,
1261 u64_t paddr_base,
1262 u32_t grc_base,
1263 u32_t block1_start, u16_t block1_size,
1264 u32_t block2_start, u16_t block2_size)
1265 {
1266 lm_status_t lm_status = LM_STATUS_FAILURE;
1267
1268 lm_dmae_operation_t* operation = (lm_dmae_operation_t*)pdev->vars.stats.stats_collect.stats_hw.non_emac_dmae_operation;
1269
1270 struct lm_stats_sge_descr_t sges[2] = {{0}};
1271
1272 sges[0].source_offset = grc_base+block1_start;
1273 sges[0].dest_paddr = paddr_base;
1274 sges[0].length = block1_size / sizeof(u32_t);
1275
1276 sges[1].source_offset = grc_base+block2_start;
1277 sges[1].dest_paddr = paddr_base + block1_size;
1278 sges[1].length = block2_size / sizeof(u32_t);
1279
1280 lm_status = lm_stats_set_dmae_operation_sges(pdev, operation, sges, ARRSIZE(sges));
1281 if (LM_STATUS_SUCCESS != lm_status)
1282 {
1283 DbgBreakMsg("Failed to initialize non-EMAC stats DMAE operation.\n");
1284 return lm_status;
1285 }
1286
1287 lm_status = lm_stats_hw_setup_nig(pdev, operation);
1288 if (LM_STATUS_SUCCESS != lm_status)
1289 {
1290 DbgBreakMsg("Failed to initialize NIG stats DMAE operation.\n");
1291 return lm_status;
1292 }
1293
1294 return lm_status;
1295 }
1296
1297 /**lm_stats_hw_setup_bmac
1298 * Setup the BMAC1/BMAC2 stats DMAE transactions.
1299 * @see lm_stats_hw_setup_non_emac for more details.
1300 *
1301 * @param pdev the device to initialize.
1302 *
1303 * @return lm_status_t LM_STATUS_SUCCESS on success, some other
1304 * value on failure.
1305 */
lm_stats_hw_setup_bmac(lm_device_t * pdev)1306 static lm_status_t lm_stats_hw_setup_bmac(lm_device_t* pdev)
1307 {
1308 const u32_t port = PORT_ID(pdev) ;
1309 u32_t bmac_base = 0 ; // bmac: GRCBASE_NIG, bmac_base + reg name
1310 // nig :GRCBASE_NIG, reg name (NIG_XXX)
1311 u32_t bmac_tx_start_reg, bmac_rx_start_reg;
1312 u16_t bmac_tx_stat_size, bmac_rx_stat_size;
1313 lm_status_t lm_status = LM_STATUS_FAILURE;
1314
1315 DbgBreakIf(HAS_MSTAT(pdev));
1316
1317 switch( port )
1318 {
1319 case 0:
1320 bmac_base = NIG_REG_INGRESS_BMAC0_MEM ;
1321 break;
1322
1323 case 1:
1324 bmac_base = NIG_REG_INGRESS_BMAC1_MEM;
1325
1326 if (!CHIP_IS_E1x(pdev))
1327 {
1328 DbgMessage(pdev, INFORMi, "BMAC stats should never be collected on port 1 of E2!\n");
1329 bmac_base = NIG_REG_INGRESS_BMAC0_MEM;
1330 }
1331 break;
1332
1333 default:
1334 DbgBreakIf( port > 1 ) ;
1335 break;
1336
1337 }
1338
1339 if (CHIP_IS_E1x(pdev))
1340 {
1341 bmac_tx_start_reg = BIGMAC_REGISTER_TX_STAT_GTPKT;
1342 bmac_rx_start_reg = BIGMAC_REGISTER_RX_STAT_GR64;
1343 bmac_tx_stat_size = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac1_stats_query->stats_tx);
1344 bmac_rx_stat_size = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac1_stats_query->stats_rx);
1345 }
1346 else
1347 {
1348 bmac_tx_start_reg = BIGMAC2_REGISTER_TX_STAT_GTPOK;
1349 bmac_rx_start_reg = BIGMAC2_REGISTER_RX_STAT_GR64;
1350 bmac_tx_stat_size = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac2_stats_query->stats_tx);
1351 bmac_rx_stat_size = sizeof(pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac2_stats_query->stats_rx);
1352 }
1353
1354 lm_status = lm_stats_hw_setup_non_emac(pdev,
1355 pdev->vars.stats.stats_collect.stats_hw.bmac_stats_phys_addr.as_u64,
1356 bmac_base,
1357 bmac_tx_start_reg,
1358 bmac_tx_stat_size,
1359 bmac_rx_start_reg,
1360 bmac_rx_stat_size);
1361
1362 return lm_status;
1363 }
1364
1365 /**lm_stats_hw_setup_mstat
1366 * Setup the MSTAT stats DMAE transactions.
1367 * @see lm_stats_hw_setup_non_emac for more details.
1368 *
1369 * @param pdev the device to initialize.
1370 *
1371 * @return lm_status_t LM_STATUS_SUCCESS on success, some other
1372 * value on failure.
1373 */
lm_stats_hw_setup_mstat(lm_device_t * pdev)1374 static lm_status_t lm_stats_hw_setup_mstat(lm_device_t* pdev)
1375 {
1376 const u32_t port = PORT_ID(pdev) ;
1377 u32_t mstat_base = 0;
1378 u32_t mstat_tx_start, mstat_rx_start;
1379 u16_t mstat_tx_size, mstat_rx_size;
1380 lm_status_t lm_status = LM_STATUS_FAILURE;
1381 lm_stats_hw_collect_t* stats_hw = &pdev->vars.stats.stats_collect.stats_hw;
1382
1383 DbgBreakIf(!HAS_MSTAT(pdev));
1384
1385 mstat_tx_start = MSTAT_REG_TX_STAT_GTXPOK_LO;
1386 mstat_tx_size = sizeof(stats_hw->u.addr_mstat_stats_query->stats_tx);
1387
1388 mstat_rx_start = MSTAT_REG_RX_STAT_GR64_LO;
1389 mstat_rx_size = sizeof(stats_hw->u.addr_mstat_stats_query->stats_rx);
1390
1391 DbgMessage(pdev, INFORM, "lm_stats_hw_setup_mstat: mstat_tx_start=%x, mstat_tx_size=%x, mstat_rx_start=%x, mstat_rx_size=%x\n",mstat_tx_start,mstat_tx_size,mstat_rx_start, mstat_rx_size);
1392
1393 switch(port)
1394 {
1395 case 0:
1396 mstat_base = GRCBASE_MSTAT0;
1397 break;
1398 case 1:
1399 mstat_base = GRCBASE_MSTAT1;
1400 break;
1401 default:
1402 DbgBreakIf( port > 1 ) ;
1403 break;
1404 }
1405
1406 lm_status = lm_stats_hw_setup_non_emac(pdev,
1407 pdev->vars.stats.stats_collect.stats_hw.mac_stats_phys_addr.as_u64,
1408 mstat_base,
1409 mstat_tx_start,
1410 mstat_tx_size,
1411 mstat_rx_start,
1412 mstat_rx_size);
1413
1414 return lm_status;
1415 }
1416
1417 /* Description:
1418 * setups resources regarding hw stats (init fields)
1419 * set offsets serials of hw reads, either from EMAC & BIGMAC or from MSTAT block
1420 */
lm_stats_hw_setup(struct _lm_device_t * pdev)1421 lm_status_t lm_stats_hw_setup(struct _lm_device_t *pdev)
1422 {
1423 lm_status_t lm_status = LM_STATUS_SUCCESS ;
1424 /* enable hw collect with mstat only if it's not fpga and not a 4-domain emulation compile... */
1425 u8_t b_enable_collect = HAS_MSTAT(pdev)? ((CHIP_REV_IS_EMUL(pdev) && (CHIP_BONDING(pdev) == 0)) || CHIP_REV_IS_ASIC(pdev)) : TRUE;
1426
1427 if(HAS_MSTAT(pdev))
1428 {
1429 lm_status = lm_stats_hw_setup_mstat(pdev);
1430 if(lm_status != LM_STATUS_SUCCESS)
1431 {
1432 DbgMessage(NULL, FATAL, "Failed to initialize MSTAT statistics\n");
1433 return lm_status;
1434 }
1435 }
1436 else
1437 {
1438 lm_status = lm_stats_hw_setup_emac(pdev);
1439 if(lm_status != LM_STATUS_SUCCESS)
1440 {
1441 DbgMessage(NULL, FATAL, "Failed to initialize EMAC statistics\n");
1442 return lm_status;
1443 }
1444 lm_status = lm_stats_hw_setup_bmac(pdev);
1445 if(lm_status != LM_STATUS_SUCCESS)
1446 {
1447 DbgMessage(NULL, FATAL, "Failed to initialize BMAC statistics\n");
1448 return lm_status;
1449 }
1450 }
1451
1452 pdev->vars.stats.stats_collect.stats_hw.b_is_link_up = FALSE;
1453
1454 pdev->vars.stats.stats_collect.stats_hw.b_collect_enabled = b_enable_collect ; // HW stats are not supported on E3 FPGA.
1455
1456 return lm_status ;
1457 } /* lm_stats_hw_setup */
1458
1459 /**
1460 * This function will prepare the statistics ramrod data the way
1461 * we will only have to increment the statistics counter and
1462 * send the ramrod each time we have to.
1463 *
1464 * @param pdev
1465 */
lm_stats_prep_fw_stats_req(lm_device_t * pdev)1466 static void lm_stats_prep_fw_stats_req(lm_device_t *pdev)
1467 {
1468 lm_stats_fw_collect_t *stats_fw = &pdev->vars.stats.stats_collect.stats_fw;
1469 struct stats_query_header *stats_hdr = &stats_fw->fw_stats_req->hdr;
1470 lm_address_t cur_data_offset = {{0}};
1471 struct stats_query_entry *cur_query_entry = NULL;
1472
1473 stats_hdr->cmd_num = stats_fw->fw_stats_num;
1474 stats_hdr->drv_stats_counter = 0;
1475
1476 /* storm_counters struct contains the counters of completed
1477 * statistics requests per storm which are incremented by FW
1478 * each time it completes hadning a statistics ramrod. We will
1479 * check these counters in the timer handler and discard a
1480 * (statistics) ramrod completion.
1481 */
1482 cur_data_offset = stats_fw->fw_stats_data_mapping;
1483 LM_INC64(&cur_data_offset, OFFSETOF(lm_stats_fw_stats_data_t, storm_counters));
1484
1485 stats_hdr->stats_counters_addrs.hi = mm_cpu_to_le32(cur_data_offset.as_u32.high);
1486 stats_hdr->stats_counters_addrs.lo = mm_cpu_to_le32(cur_data_offset.as_u32.low);
1487
1488 /* prepare to the first stats ramrod (will be completed with
1489 * the counters equal to zero) - init counters to somethig different.
1490 */
1491 mm_memset(&stats_fw->fw_stats_data->storm_counters, 0xff, sizeof(stats_fw->fw_stats_data->storm_counters) );
1492
1493 /**** Port FW statistics data ****/
1494 cur_data_offset = stats_fw->fw_stats_data_mapping;
1495 LM_INC64(&cur_data_offset, OFFSETOF(lm_stats_fw_stats_data_t, port));
1496
1497 cur_query_entry = &stats_fw->fw_stats_req->query[LM_STATS_PORT_QUERY_IDX];
1498
1499 cur_query_entry->kind = STATS_TYPE_PORT;
1500 /* For port query index is a DONT CARE */
1501 cur_query_entry->index = PORT_ID(pdev);
1502 cur_query_entry->funcID = mm_cpu_to_le16(FUNC_ID(pdev));;
1503 cur_query_entry->address.hi = mm_cpu_to_le32(cur_data_offset.as_u32.high);
1504 cur_query_entry->address.lo = mm_cpu_to_le32(cur_data_offset.as_u32.low);
1505
1506 /**** PF FW statistics data ****/
1507 cur_data_offset = stats_fw->fw_stats_data_mapping;
1508 LM_INC64(&cur_data_offset, OFFSETOF(lm_stats_fw_stats_data_t, pf));
1509
1510 cur_query_entry = &stats_fw->fw_stats_req->query[LM_STATS_PF_QUERY_IDX];
1511
1512 cur_query_entry->kind = STATS_TYPE_PF;
1513 /* For PF query index is a DONT CARE */
1514 cur_query_entry->index = PORT_ID(pdev);
1515 cur_query_entry->funcID = mm_cpu_to_le16(FUNC_ID(pdev));
1516 cur_query_entry->address.hi = mm_cpu_to_le32(cur_data_offset.as_u32.high);
1517 cur_query_entry->address.lo = mm_cpu_to_le32(cur_data_offset.as_u32.low);
1518
1519 /**** Toe query ****/
1520 cur_data_offset = stats_fw->fw_stats_data_mapping;
1521 LM_INC64(&cur_data_offset, OFFSETOF(lm_stats_fw_stats_data_t, toe));
1522
1523 ASSERT_STATIC(LM_STATS_TOE_IDX<ARRSIZE(stats_fw->fw_stats_req->query));
1524 cur_query_entry = &stats_fw->fw_stats_req->query[LM_STATS_TOE_IDX];
1525
1526 cur_query_entry->kind = STATS_TYPE_TOE;
1527 cur_query_entry->index = LM_STATS_CNT_ID(pdev);
1528 cur_query_entry->funcID = mm_cpu_to_le16(FUNC_ID(pdev));
1529 cur_query_entry->address.hi = mm_cpu_to_le32(cur_data_offset.as_u32.high);
1530 cur_query_entry->address.lo = mm_cpu_to_le32(cur_data_offset.as_u32.low);
1531
1532 if ( !CHIP_IS_E1x(pdev) )
1533 {
1534 // FW will assert if we send this kind for chip < E2
1535 /**** FCoE query ****/
1536 cur_data_offset = stats_fw->fw_stats_data_mapping;
1537 LM_INC64(&cur_data_offset, OFFSETOF(lm_stats_fw_stats_data_t, fcoe));
1538
1539 ASSERT_STATIC(LM_STATS_FCOE_IDX<ARRSIZE(stats_fw->fw_stats_req->query));
1540 cur_query_entry = &stats_fw->fw_stats_req->query[LM_STATS_FCOE_IDX];
1541 cur_query_entry->kind = STATS_TYPE_FCOE;
1542 cur_query_entry->index = LM_STATS_CNT_ID(pdev);
1543 cur_query_entry->funcID = mm_cpu_to_le16(FUNC_ID(pdev));
1544 cur_query_entry->address.hi = mm_cpu_to_le32(cur_data_offset.as_u32.high);
1545 cur_query_entry->address.lo = mm_cpu_to_le32(cur_data_offset.as_u32.low);
1546 }
1547 else
1548 {
1549 // if no FCoE, we need to decrease command count by one
1550 --stats_hdr->cmd_num;
1551 }
1552
1553 /**** Clients' queries ****/
1554 cur_data_offset = stats_fw->fw_stats_data_mapping;
1555 LM_INC64(&cur_data_offset, OFFSETOF(lm_stats_fw_stats_data_t, queue_stats));
1556
1557 ASSERT_STATIC(LM_STATS_FIRST_QUEUE_QUERY_IDX < ARRSIZE(stats_fw->fw_stats_req->query));
1558 cur_query_entry = &stats_fw->fw_stats_req->query[LM_STATS_FIRST_QUEUE_QUERY_IDX];
1559
1560 cur_query_entry->kind = STATS_TYPE_QUEUE;
1561 cur_query_entry->index = LM_STATS_CNT_ID(pdev);
1562 cur_query_entry->funcID = mm_cpu_to_le16(FUNC_ID(pdev));
1563 cur_query_entry->address.hi = mm_cpu_to_le32(cur_data_offset.as_u32.high);
1564 cur_query_entry->address.lo = mm_cpu_to_le32(cur_data_offset.as_u32.low);
1565 /* TODO : VF! more stats? */
1566 }
1567
1568 #ifdef VF_INVOLVED
lm_stats_prep_vf_fw_stats_req(lm_device_t * pdev)1569 void lm_stats_prep_vf_fw_stats_req(lm_device_t *pdev)
1570 {
1571 lm_stats_fw_collect_t *stats_fw = &pdev->vars.stats.stats_collect.stats_fw;
1572 struct stats_query_header *stats_hdr = &stats_fw->fw_stats_req->hdr;
1573 struct stats_query_entry *cur_query_entry;
1574 u8_t vf_idx = 0;
1575 u8_t cmd_cnt = 0;
1576 lm_vf_info_t *vf_info;
1577
1578 cur_query_entry = &stats_fw->fw_stats_req->query[LM_STATS_FIRST_VF_QUEUE_QUERY_IDX];
1579
1580 MM_ACQUIRE_VFS_STATS_LOCK_DPC(pdev);
1581 for (vf_idx = 0; vf_idx < pdev->vfs_set.number_of_enabled_vfs; vf_idx++) {
1582 vf_info = &pdev->vfs_set.vfs_array[vf_idx];
1583 if (vf_info->vf_stats.vf_stats_state == VF_STATS_REQ_SUBMITTED) {
1584 u8_t process_it = FALSE;
1585 if (vf_info->vf_stats.vf_stats_flag & VF_STATS_COLLECT_FW_STATS_FOR_PF) {
1586 cur_query_entry->kind = STATS_TYPE_QUEUE;
1587 cur_query_entry->index = LM_FW_VF_STATS_CNT_ID(vf_info);
1588 cur_query_entry->funcID = mm_cpu_to_le16(FUNC_ID(pdev));
1589 cur_query_entry->address.hi = mm_cpu_to_le32(vf_info->vf_stats.pf_fw_stats_phys_data.as_u32.high);
1590 cur_query_entry->address.lo = mm_cpu_to_le32(vf_info->vf_stats.pf_fw_stats_phys_data.as_u32.low);
1591 process_it = TRUE;
1592 cur_query_entry++;
1593 cmd_cnt++;
1594 }
1595 if (vf_info->vf_stats.vf_stats_flag & VF_STATS_COLLECT_FW_STATS_FOR_VF) {
1596 cur_query_entry->kind = STATS_TYPE_QUEUE;
1597 cur_query_entry->index = LM_FW_VF_STATS_CNT_ID(vf_info);
1598 cur_query_entry->funcID = mm_cpu_to_le16(8 + vf_info->abs_vf_id);
1599 cur_query_entry->address.hi = mm_cpu_to_le32(vf_info->vf_stats.vf_fw_stats_phys_data.as_u32.high);
1600 cur_query_entry->address.lo = mm_cpu_to_le32(vf_info->vf_stats.vf_fw_stats_phys_data.as_u32.low);
1601 process_it = TRUE;
1602 cur_query_entry++;
1603 cmd_cnt++;
1604 }
1605 if (process_it) {
1606 vf_info->vf_stats.vf_stats_state = VF_STATS_REQ_IN_PROCESSING;
1607 vf_info->vf_stats.vf_stats_cnt++;
1608 }
1609 }
1610 }
1611 stats_hdr->cmd_num = stats_fw->fw_static_stats_num + cmd_cnt;
1612 MM_RELEASE_VFS_STATS_LOCK_DPC(pdev);
1613
1614 }
1615 #endif
1616
1617 /* Description:
1618 * setups fw statistics parameters
1619 */
lm_stats_fw_setup(struct _lm_device_t * pdev)1620 void lm_stats_fw_setup(struct _lm_device_t *pdev)
1621 {
1622 lm_stats_fw_collect_t * stats_fw = &pdev->vars.stats.stats_collect.stats_fw;
1623 stats_fw->b_completion_done = TRUE ; // reset flag to initial value
1624 stats_fw->b_ramrod_completed = TRUE ;
1625 stats_fw->drv_counter = 0 ;
1626 stats_fw->b_collect_enabled = pdev->params.fw_stats_init_value ; // change to TRUE in order to enable fw stats
1627
1628 pdev->vars.stats.stats_collect.b_last_called = TRUE ;
1629
1630 /* Prepare the constatnt slow-path command (For stats we don't allocate a new one each time) */
1631 lm_sq_post_fill_entry(pdev,
1632 &(stats_fw->stats_sp_list_command),
1633 0 /* cid: Don't care */,
1634 RAMROD_CMD_ID_COMMON_STAT_QUERY,
1635 NONE_CONNECTION_TYPE,
1636 stats_fw->fw_stats_req_mapping.as_u64,
1637 FALSE /* don't release sp mem*/);
1638
1639 /* Prepare the FW stats ramrod request structure (can do this just once) */
1640 lm_stats_prep_fw_stats_req(pdev);
1641 }
1642 /*
1643 *------------------------------------------------------------------------
1644 * lm_stats_fw_check_update_done -
1645 *
1646 * check done flags and update flags
1647 *
1648 *------------------------------------------------------------------------
1649 */
lm_stats_fw_check_update_done(struct _lm_device_t * pdev,OUT u32_t * ptr_stats_flags_done)1650 void lm_stats_fw_check_update_done( struct _lm_device_t *pdev, OUT u32_t* ptr_stats_flags_done )
1651 {
1652 if CHK_NULL( ptr_stats_flags_done )
1653 {
1654 DbgBreakIf(!ptr_stats_flags_done) ;
1655 return;
1656 }
1657
1658 if (IS_VFDEV(pdev)) {
1659 SET_FLAGS(*ptr_stats_flags_done,LM_STATS_FLAGS_ALL);
1660 return;
1661 }
1662 // For each storm still wasn't done, we check and if done - set, so next time
1663 // we won't need to check again
1664
1665 // eth xstorm
1666 if( 0 == GET_FLAGS(*ptr_stats_flags_done, LM_STATS_FLAG_XSTORM ) )
1667 {
1668 if( LM_STATS_VERIFY_COUNTER( pdev, fw_stats_data->storm_counters.xstats_counter ) )
1669 {
1670 SET_FLAGS(*ptr_stats_flags_done,LM_STATS_FLAG_XSTORM ) ;
1671 }
1672 }
1673
1674 // eth tstorm
1675 if( 0 == GET_FLAGS(*ptr_stats_flags_done, LM_STATS_FLAG_TSTORM ) )
1676 {
1677 if( LM_STATS_VERIFY_COUNTER( pdev, fw_stats_data->storm_counters.tstats_counter ) )
1678 {
1679 SET_FLAGS(*ptr_stats_flags_done,LM_STATS_FLAG_TSTORM ) ;
1680 }
1681 }
1682
1683 // eth ustorm
1684 if( 0 == GET_FLAGS(*ptr_stats_flags_done, LM_STATS_FLAG_USTORM ) )
1685 {
1686 if( LM_STATS_VERIFY_COUNTER( pdev, fw_stats_data->storm_counters.ustats_counter ) )
1687 {
1688 SET_FLAGS(*ptr_stats_flags_done,LM_STATS_FLAG_USTORM ) ;
1689 }
1690 }
1691
1692 // eth cstorm
1693 if( 0 == GET_FLAGS(*ptr_stats_flags_done, LM_STATS_FLAG_CSTORM ) )
1694 {
1695 if( LM_STATS_VERIFY_COUNTER( pdev, fw_stats_data->storm_counters.cstats_counter ) )
1696 {
1697 SET_FLAGS(*ptr_stats_flags_done,LM_STATS_FLAG_CSTORM ) ;
1698 }
1699 }
1700
1701 }
1702
1703 /**
1704 * @Desription: Checks if FW completed last statistic update, if
1705 * it did it assigns the statistics
1706 *
1707 * @param pdev
1708 *
1709 * @return lm_status_t LM_STATUS_SUCCESS if FW has completed
1710 * LM_STATUS_BUSY if it hasn't yet completed
1711 */
lm_stats_fw_complete(struct _lm_device_t * pdev)1712 lm_status_t lm_stats_fw_complete( struct _lm_device_t *pdev )
1713 {
1714 u32_t stats_flags_done = 0 ; // bit wise for storms done flags are on
1715 u32_t stats_flags_assigned = 0 ; // bit wise for already assigned values from storms
1716 lm_status_t lm_status = LM_STATUS_SUCCESS;
1717
1718 if CHK_NULL( pdev )
1719 {
1720 DbgBreakIf( !pdev ) ;
1721 return LM_STATUS_INVALID_PARAMETER;
1722 }
1723
1724 /* First check if the ramrod has completed, if it hasn't don't bother checking
1725 * dma completion yet, we need both of them to complete before sending another
1726 * ramrod. */
1727 if (IS_PFDEV(pdev) && (FALSE == pdev->vars.stats.stats_collect.stats_fw.b_ramrod_completed))
1728 {
1729 lm_status = LM_STATUS_BUSY;
1730 }
1731 else if (FALSE == pdev->vars.stats.stats_collect.stats_fw.b_completion_done)
1732 {
1733
1734 // check done flags and update the falg if there was a change
1735 lm_stats_fw_check_update_done( pdev, &stats_flags_done ) ;
1736
1737 // Check if we can assign any of the storms
1738 if ( LM_STATS_DO_ASSIGN_ANY( stats_flags_done, stats_flags_assigned) )
1739 {
1740 // assign stats that are ready
1741 lm_stats_fw_assign( pdev, stats_flags_done, &stats_flags_assigned ) ;
1742 #ifdef VF_INVOLVED
1743 #ifndef __LINUX
1744 if (IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev)) {
1745 u32_t vf_stats_flags_assigned = 0;
1746 MM_ACQUIRE_VFS_STATS_LOCK_DPC(pdev);
1747 lm_pf_stats_vf_fw_assign( pdev, stats_flags_done, &vf_stats_flags_assigned);
1748 MM_RELEASE_VFS_STATS_LOCK_DPC(pdev);
1749 }
1750 #endif
1751 #endif
1752 }
1753
1754 // did all storms were assigned
1755 if ERR_IF( LM_STATS_FLAGS_ALL != stats_flags_assigned )
1756 {
1757 lm_status = LM_STATUS_BUSY;
1758 }
1759 else
1760 {
1761 #ifdef VF_INVOLVED
1762 #ifndef __LINUX
1763 if (IS_CHANNEL_VIRT_MODE_MASTER_PFDEV(pdev)) {
1764 u8_t vf_idx;
1765 lm_vf_info_t *vf_info;
1766 MM_ACQUIRE_VFS_STATS_LOCK_DPC(pdev);
1767 for (vf_idx = 0; vf_idx < pdev->vfs_set.number_of_enabled_vfs; vf_idx++) {
1768 vf_info = &pdev->vfs_set.vfs_array[vf_idx];
1769 if (vf_info->vf_stats.vf_stats_state == VF_STATS_REQ_IN_PROCESSING) {
1770 if (vf_info->vf_stats.stop_collect_stats || vf_info->was_flred) {
1771 vf_info->vf_stats.vf_stats_state = VF_STATS_REQ_READY;
1772 } else {
1773 vf_info->vf_stats.vf_stats_state = VF_STATS_REQ_SUBMITTED;
1774 }
1775 }
1776 }
1777 MM_RELEASE_VFS_STATS_LOCK_DPC(pdev);
1778 }
1779 #endif
1780 #endif
1781 ++pdev->vars.stats.stats_collect.stats_fw.drv_counter ;
1782
1783 // barrier (for IA64) is to assure that the counter will be incremented BEFORE
1784 // the complation_done flag is set to TRUE.
1785 // in order to assure correct drv_counter sent to fw in lm_stats_on_timer (CQ48772)
1786
1787 if (IS_PFDEV(pdev))
1788 {
1789 mm_write_barrier();
1790 }
1791 // now we can notify timer that cb is done!
1792 pdev->vars.stats.stats_collect.stats_fw.b_completion_done = TRUE ;
1793 lm_status = LM_STATUS_SUCCESS;
1794 }
1795 }
1796 return lm_status;
1797 }
1798
1799 void
lm_stats_fw_assign_fcoe_xstorm(IN const struct fcoe_statistics_params * collect,OUT lm_fcoe_stats_t * mirror)1800 lm_stats_fw_assign_fcoe_xstorm(IN const struct fcoe_statistics_params* collect,
1801 OUT lm_fcoe_stats_t* mirror)
1802 {
1803 //Tx
1804 LM_SIGN_EXTEND_VALUE_32(collect->tx_stat.fcoe_tx_byte_cnt, mirror->fcoe_tx_byte_cnt);
1805 LM_SIGN_EXTEND_VALUE_32(collect->tx_stat.fcoe_tx_pkt_cnt, mirror->fcoe_tx_pkt_cnt);
1806 LM_SIGN_EXTEND_VALUE_32(collect->tx_stat.fcp_tx_pkt_cnt, mirror->fcp_tx_pkt_cnt);
1807 }
1808
1809
1810 void
lm_stats_fw_assign_fcoe_tstorm(IN const struct fcoe_statistics_params * collect,OUT lm_fcoe_stats_t * mirror)1811 lm_stats_fw_assign_fcoe_tstorm(IN const struct fcoe_statistics_params* collect,
1812 OUT lm_fcoe_stats_t* mirror)
1813 {
1814 //Section 0
1815 LM_SIGN_EXTEND_VALUE_32(collect->rx_stat0.fcoe_rx_byte_cnt, mirror->fcoe_rx_byte_cnt);
1816 LM_SIGN_EXTEND_VALUE_32(collect->rx_stat0.fcoe_rx_pkt_cnt, mirror->fcoe_rx_pkt_cnt);
1817
1818 //Section 1
1819 LM_SIGN_EXTEND_VALUE_32(collect->rx_stat1.fcoe_rx_drop_pkt_cnt, mirror->fcoe_rx_drop_pkt_cnt_tstorm);
1820 LM_SIGN_EXTEND_VALUE_32(collect->rx_stat1.fcoe_ver_cnt, mirror->fcoe_ver_cnt);
1821 }
1822
1823 void
lm_stats_fw_assign_fcoe_ustorm(IN const struct fcoe_statistics_params * collect,OUT lm_fcoe_stats_t * mirror)1824 lm_stats_fw_assign_fcoe_ustorm(IN const struct fcoe_statistics_params* collect,
1825 OUT lm_fcoe_stats_t* mirror)
1826 {
1827 //Section 2
1828 LM_SIGN_EXTEND_VALUE_32(collect->rx_stat2.drop_seq_cnt, mirror->drop_seq_cnt);
1829 LM_SIGN_EXTEND_VALUE_32(collect->rx_stat2.eofa_del_cnt, mirror->eofa_del_cnt);
1830 LM_SIGN_EXTEND_VALUE_32(collect->rx_stat2.fc_crc_cnt, mirror->fc_crc_cnt);
1831 LM_SIGN_EXTEND_VALUE_32(collect->rx_stat2.fcoe_rx_drop_pkt_cnt, mirror->fcoe_rx_drop_pkt_cnt_ustorm);
1832 LM_SIGN_EXTEND_VALUE_32(collect->rx_stat2.fcp_rx_pkt_cnt, mirror->fcp_rx_pkt_cnt);
1833 LM_SIGN_EXTEND_VALUE_32(collect->rx_stat2.miss_frame_cnt, mirror->miss_frame_cnt);
1834 LM_SIGN_EXTEND_VALUE_32(collect->rx_stat2.seq_timeout_cnt, mirror->seq_timeout_cnt);
1835 }
1836
1837 /*
1838 *------------------------------------------------------------------------
1839 * lm_stats_fw_assign -
1840 *
1841 * assign values from fw shared memory to the lm structs
1842 *
1843 *------------------------------------------------------------------------
1844 */
lm_stats_fw_assign(struct _lm_device_t * pdev,u32_t stats_flags_done,u32_t * ptr_stats_flags_assigned)1845 void lm_stats_fw_assign( struct _lm_device_t *pdev, u32_t stats_flags_done, u32_t* ptr_stats_flags_assigned )
1846 {
1847 const u8_t cli_id = LM_CLI_IDX_NDIS ;
1848 int arr_cnt = 0 ;
1849 u8_t i = 0 ;
1850
1851 if CHK_NULL( ptr_stats_flags_assigned )
1852 {
1853 DbgBreakIf(!ptr_stats_flags_assigned) ;
1854 return;
1855 }
1856
1857 // assign reg_pair fw collected into fw mirror
1858 #define LM_STATS_FW_ASSIGN_TOE_REGPAIR(field_name) \
1859 REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.toe_##field_name, \
1860 pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->toe.field_name ) ;
1861
1862 // assign u32 fw collected into fw mirror + do sign extension
1863 #define LM_STATS_FW_ASSIGN_TOE_U32(field_name) \
1864 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->toe.field_name, \
1865 pdev->vars.stats.stats_mirror.stats_fw.toe_##field_name ) ;
1866
1867
1868 // eth xstorm
1869 if( LM_STATS_DO_ASSIGN( stats_flags_done, *ptr_stats_flags_assigned, LM_STATS_FLAG_XSTORM ) )
1870 {
1871 // regpairs
1872 REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].unicast_bytes_sent,
1873 pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.ucast_bytes_sent);
1874 // regpairs
1875 REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].multicast_bytes_sent,
1876 pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.mcast_bytes_sent);
1877
1878 // regpairs
1879 REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].broadcast_bytes_sent,
1880 pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.bcast_bytes_sent);
1881
1882 pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].total_sent_bytes =
1883 pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].unicast_bytes_sent +
1884 pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].multicast_bytes_sent +
1885 pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].broadcast_bytes_sent;
1886
1887 // non regpairs
1888 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.ucast_pkts_sent,
1889 pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].unicast_pkts_sent );
1890
1891 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.mcast_pkts_sent,
1892 pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].multicast_pkts_sent );
1893
1894 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.bcast_pkts_sent,
1895 pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].broadcast_pkts_sent );
1896
1897 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.xstorm_queue_statistics.error_drop_pkts,
1898 pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].error_drop_pkts );
1899
1900 pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].total_sent_pkts =
1901 pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].unicast_pkts_sent+
1902 pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].multicast_pkts_sent +
1903 pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[cli_id].broadcast_pkts_sent;
1904
1905
1906
1907 /* TOE Stats for Xstorm */
1908 arr_cnt = ARRSIZE(pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics) ;
1909 for ( i = 0; i < arr_cnt; i++)
1910 {
1911 LM_STATS_FW_ASSIGN_TOE_U32(xstorm_toe.statistics[i].tcp_out_segments) ;
1912 LM_STATS_FW_ASSIGN_TOE_U32(xstorm_toe.statistics[i].tcp_retransmitted_segments) ;
1913 LM_STATS_FW_ASSIGN_TOE_REGPAIR(xstorm_toe.statistics[i].ip_out_octets ) ;
1914 LM_STATS_FW_ASSIGN_TOE_U32(xstorm_toe.statistics[i].ip_out_requests) ;
1915 }
1916
1917 if( !CHIP_IS_E1x(pdev) )
1918 {
1919 lm_stats_fw_assign_fcoe_xstorm(&pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->fcoe,
1920 &pdev->vars.stats.stats_mirror.stats_fw.fcoe);
1921 }
1922
1923 SET_FLAGS( *ptr_stats_flags_assigned, LM_STATS_FLAG_XSTORM ) ;
1924 }
1925
1926 // eth tstorm
1927 if( LM_STATS_DO_ASSIGN( stats_flags_done, *ptr_stats_flags_assigned, LM_STATS_FLAG_TSTORM ) )
1928 {
1929 // regpairs
1930 REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_unicast_bytes,
1931 pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_ucast_bytes );
1932
1933 REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_broadcast_bytes,
1934 pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_bcast_bytes );
1935
1936 REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_multicast_bytes,
1937 pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_mcast_bytes );
1938
1939 // FIXME REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_error_bytes,
1940 // pdev->vars.stats.stats_collect.stats_fw.addr_eth_stats_query->tstorm_common.client_statistics[cnt_id].rcv_error_bytes );
1941
1942 // eth tstorm - non regpairs
1943 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.checksum_discard,
1944 pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].checksum_discard );
1945
1946 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.pkts_too_big_discard,
1947 pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].packets_too_big_discard );
1948
1949 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_ucast_pkts,
1950 pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_unicast_pkts );
1951
1952 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_bcast_pkts,
1953 pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_broadcast_pkts );
1954
1955 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.rcv_mcast_pkts,
1956 pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_multicast_pkts );
1957
1958 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.no_buff_discard,
1959 pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].no_buff_discard );
1960
1961 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.tstorm_queue_statistics.ttl0_discard,
1962 pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].ttl0_discard );
1963
1964
1965
1966 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->port.tstorm_port_statistics.mf_tag_discard,
1967 pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].ttl0_discard );
1968
1969
1970 /* Port Statistics */
1971 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->port.tstorm_port_statistics.mac_filter_discard, \
1972 pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.port_statistics.mac_filter_discard ) ;
1973 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->port.tstorm_port_statistics.brb_truncate_discard, \
1974 pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.port_statistics.brb_truncate_discard ) ;
1975 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->port.tstorm_port_statistics.mac_discard, \
1976 pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.port_statistics.mac_discard ) ;
1977
1978 // toe tstorm
1979 arr_cnt = ARRSIZE(pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics) ;
1980 for ( i = 0; i < arr_cnt; i++)
1981 {
1982 LM_STATS_FW_ASSIGN_TOE_U32(tstorm_toe.statistics[i].ip_in_receives) ;
1983 LM_STATS_FW_ASSIGN_TOE_U32(tstorm_toe.statistics[i].ip_in_delivers) ;
1984 LM_STATS_FW_ASSIGN_TOE_REGPAIR(tstorm_toe.statistics[i].ip_in_octets) ;
1985 LM_STATS_FW_ASSIGN_TOE_U32(tstorm_toe.statistics[i].tcp_in_errors) ;
1986 LM_STATS_FW_ASSIGN_TOE_U32(tstorm_toe.statistics[i].ip_in_header_errors) ;
1987 LM_STATS_FW_ASSIGN_TOE_U32(tstorm_toe.statistics[i].ip_in_discards) ;
1988 LM_STATS_FW_ASSIGN_TOE_U32(tstorm_toe.statistics[i].ip_in_truncated_packets) ;
1989 }
1990
1991 if( !CHIP_IS_E1x(pdev) )
1992 {
1993 lm_stats_fw_assign_fcoe_tstorm(&pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->fcoe,
1994 &pdev->vars.stats.stats_mirror.stats_fw.fcoe);
1995 }
1996
1997 SET_FLAGS( *ptr_stats_flags_assigned, LM_STATS_FLAG_TSTORM ) ;
1998 }
1999
2000 // eth ustorm
2001 if( LM_STATS_DO_ASSIGN( stats_flags_done, *ptr_stats_flags_assigned, LM_STATS_FLAG_USTORM ) )
2002 {
2003 // regpairs
2004 REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].ucast_no_buff_bytes,
2005 pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.ucast_no_buff_bytes );
2006
2007 REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].mcast_no_buff_bytes,
2008 pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.mcast_no_buff_bytes );
2009
2010 REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].bcast_no_buff_bytes,
2011 pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.bcast_no_buff_bytes );
2012
2013 REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_bytes,
2014 pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_bytes );
2015
2016 // non regpairs
2017 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.ucast_no_buff_pkts,
2018 pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].ucast_no_buff_pkts );
2019
2020 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.mcast_no_buff_pkts,
2021 pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].mcast_no_buff_pkts );
2022
2023 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.bcast_no_buff_pkts,
2024 pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].bcast_no_buff_pkts );
2025
2026 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_pkts,
2027 pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_pkts );
2028
2029 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_events,
2030 pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_events );
2031
2032 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_aborts,
2033 pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_aborts );
2034
2035 if( !CHIP_IS_E1x(pdev) )
2036 {
2037 lm_stats_fw_assign_fcoe_ustorm(&pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->fcoe,
2038 &pdev->vars.stats.stats_mirror.stats_fw.fcoe);
2039 }
2040
2041 SET_FLAGS( *ptr_stats_flags_assigned, LM_STATS_FLAG_USTORM ) ;
2042 }
2043
2044 if( LM_STATS_DO_ASSIGN( stats_flags_done, *ptr_stats_flags_assigned, LM_STATS_FLAG_CSTORM ) )
2045 {
2046 // toe cstorm
2047
2048 LM_STATS_FW_ASSIGN_TOE_U32(cstorm_toe.no_tx_cqes) ;
2049 SET_FLAGS( *ptr_stats_flags_assigned, LM_STATS_FLAG_CSTORM ) ;
2050
2051 }
2052 }
2053
2054 #ifdef VF_INVOLVED
lm_pf_stats_vf_fw_assign(struct _lm_device_t * pdev,u32_t stats_flags_done,u32_t * ptr_stats_flags_assigned)2055 void lm_pf_stats_vf_fw_assign(struct _lm_device_t *pdev, u32_t stats_flags_done, u32_t* ptr_stats_flags_assigned)
2056 {
2057 lm_stats_fw_t *mirror_stats_fw;
2058 struct per_queue_stats *queue_stats;
2059 const u8_t cli_id = LM_CLI_IDX_NDIS ;
2060 u8_t vf_idx;
2061
2062 if CHK_NULL( ptr_stats_flags_assigned )
2063 {
2064 DbgBreakIf(!ptr_stats_flags_assigned) ;
2065 return;
2066 }
2067
2068 // eth xstorm
2069 if( LM_STATS_DO_ASSIGN( stats_flags_done, *ptr_stats_flags_assigned, LM_STATS_FLAG_XSTORM ) )
2070 {
2071 for (vf_idx = 0; vf_idx < pdev->vfs_set.number_of_enabled_vfs; vf_idx++) {
2072 mirror_stats_fw = pdev->vfs_set.vfs_array[vf_idx].vf_stats.mirror_stats_fw;
2073 queue_stats = pdev->vfs_set.vfs_array[vf_idx].vf_stats.pf_fw_stats_virt_data;
2074 // regpairs
2075 REGPAIR_TO_U64(mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].unicast_bytes_sent,
2076 queue_stats->xstorm_queue_statistics.ucast_bytes_sent);
2077 // regpairs
2078 REGPAIR_TO_U64(mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].multicast_bytes_sent,
2079 queue_stats->xstorm_queue_statistics.mcast_bytes_sent);
2080
2081 // regpairs
2082 REGPAIR_TO_U64(mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].broadcast_bytes_sent,
2083 queue_stats->xstorm_queue_statistics.bcast_bytes_sent);
2084
2085 mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].total_sent_bytes =
2086 mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].unicast_bytes_sent +
2087 mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].multicast_bytes_sent +
2088 mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].broadcast_bytes_sent;
2089
2090 // non regpairs
2091 LM_SIGN_EXTEND_VALUE_32( queue_stats->xstorm_queue_statistics.ucast_pkts_sent,
2092 mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].unicast_pkts_sent );
2093
2094 LM_SIGN_EXTEND_VALUE_32( queue_stats->xstorm_queue_statistics.mcast_pkts_sent,
2095 mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].multicast_pkts_sent );
2096
2097 LM_SIGN_EXTEND_VALUE_32( queue_stats->xstorm_queue_statistics.bcast_pkts_sent,
2098 mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].broadcast_pkts_sent );
2099
2100 mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].total_sent_pkts =
2101 mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].unicast_pkts_sent+
2102 mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].multicast_pkts_sent +
2103 mirror_stats_fw->eth_xstorm_common.client_statistics[cli_id].broadcast_pkts_sent;
2104
2105
2106 }
2107 SET_FLAGS( *ptr_stats_flags_assigned, LM_STATS_FLAG_XSTORM ) ;
2108 }
2109
2110 // eth tstorm
2111 if( LM_STATS_DO_ASSIGN( stats_flags_done, *ptr_stats_flags_assigned, LM_STATS_FLAG_TSTORM ) )
2112 {
2113 for (vf_idx = 0; vf_idx < pdev->vfs_set.number_of_enabled_vfs; vf_idx++) {
2114 mirror_stats_fw = pdev->vfs_set.vfs_array[vf_idx].vf_stats.mirror_stats_fw;
2115 queue_stats = pdev->vfs_set.vfs_array[vf_idx].vf_stats.pf_fw_stats_virt_data;
2116 // regpairs
2117 REGPAIR_TO_U64(mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].rcv_unicast_bytes,
2118 queue_stats->tstorm_queue_statistics.rcv_ucast_bytes );
2119
2120 REGPAIR_TO_U64(mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].rcv_broadcast_bytes,
2121 queue_stats->tstorm_queue_statistics.rcv_bcast_bytes );
2122
2123 REGPAIR_TO_U64(mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].rcv_multicast_bytes,
2124 queue_stats->tstorm_queue_statistics.rcv_mcast_bytes );
2125
2126 // FIXME REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[cli_id].rcv_error_bytes,
2127 // pdev->vars.stats.stats_collect.stats_fw.addr_eth_stats_query->tstorm_common.client_statistics[cnt_id].rcv_error_bytes );
2128
2129 // eth tstorm - non regpairs
2130 LM_SIGN_EXTEND_VALUE_32( queue_stats->tstorm_queue_statistics.checksum_discard,
2131 mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].checksum_discard );
2132 LM_SIGN_EXTEND_VALUE_32( queue_stats->tstorm_queue_statistics.pkts_too_big_discard,
2133 mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].packets_too_big_discard );
2134
2135 LM_SIGN_EXTEND_VALUE_32( queue_stats->tstorm_queue_statistics.rcv_ucast_pkts,
2136 mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].rcv_unicast_pkts );
2137
2138 LM_SIGN_EXTEND_VALUE_32( queue_stats->tstorm_queue_statistics.rcv_bcast_pkts,
2139 mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].rcv_broadcast_pkts );
2140
2141 LM_SIGN_EXTEND_VALUE_32( queue_stats->tstorm_queue_statistics.rcv_mcast_pkts,
2142 mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].rcv_multicast_pkts );
2143 LM_SIGN_EXTEND_VALUE_32( queue_stats->tstorm_queue_statistics.no_buff_discard,
2144 mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].no_buff_discard );
2145 LM_SIGN_EXTEND_VALUE_32( queue_stats->tstorm_queue_statistics.ttl0_discard,
2146 mirror_stats_fw->eth_tstorm_common.client_statistics[cli_id].ttl0_discard );
2147
2148 }
2149 SET_FLAGS( *ptr_stats_flags_assigned, LM_STATS_FLAG_TSTORM ) ;
2150 }
2151
2152 // eth ustorm
2153 if( LM_STATS_DO_ASSIGN( stats_flags_done, *ptr_stats_flags_assigned, LM_STATS_FLAG_USTORM ) )
2154 {
2155 for (vf_idx = 0; vf_idx < pdev->vfs_set.number_of_enabled_vfs; vf_idx++) {
2156 mirror_stats_fw = pdev->vfs_set.vfs_array[vf_idx].vf_stats.mirror_stats_fw;
2157 queue_stats = pdev->vfs_set.vfs_array[vf_idx].vf_stats.pf_fw_stats_virt_data;
2158 // regpairs
2159 REGPAIR_TO_U64(mirror_stats_fw->eth_ustorm_common.client_statistics[cli_id].ucast_no_buff_bytes,
2160 queue_stats->ustorm_queue_statistics.ucast_no_buff_bytes );
2161
2162 REGPAIR_TO_U64(mirror_stats_fw->eth_ustorm_common.client_statistics[cli_id].mcast_no_buff_bytes,
2163 queue_stats->ustorm_queue_statistics.mcast_no_buff_bytes );
2164
2165 REGPAIR_TO_U64(mirror_stats_fw->eth_ustorm_common.client_statistics[cli_id].bcast_no_buff_bytes,
2166 queue_stats->ustorm_queue_statistics.bcast_no_buff_bytes );
2167
2168 REGPAIR_TO_U64(pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_bytes,
2169 pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_bytes );
2170
2171 // non regpairs
2172 LM_SIGN_EXTEND_VALUE_32( queue_stats->ustorm_queue_statistics.ucast_no_buff_pkts,
2173 mirror_stats_fw->eth_ustorm_common.client_statistics[cli_id].ucast_no_buff_pkts );
2174
2175 LM_SIGN_EXTEND_VALUE_32( queue_stats->ustorm_queue_statistics.mcast_no_buff_pkts,
2176 mirror_stats_fw->eth_ustorm_common.client_statistics[cli_id].mcast_no_buff_pkts );
2177
2178 LM_SIGN_EXTEND_VALUE_32( queue_stats->ustorm_queue_statistics.bcast_no_buff_pkts,
2179 mirror_stats_fw->eth_ustorm_common.client_statistics[cli_id].bcast_no_buff_pkts );
2180
2181 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_pkts,
2182 pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_pkts );
2183
2184 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_events,
2185 pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_events );
2186
2187 LM_SIGN_EXTEND_VALUE_32( pdev->vars.stats.stats_collect.stats_fw.fw_stats_data->queue_stats.ustorm_queue_statistics.coalesced_aborts,
2188 pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[cli_id].coalesced_aborts );
2189 }
2190 SET_FLAGS( *ptr_stats_flags_assigned, LM_STATS_FLAG_USTORM ) ;
2191 }
2192
2193 if( LM_STATS_DO_ASSIGN( stats_flags_done, *ptr_stats_flags_assigned, LM_STATS_FLAG_CSTORM ) )
2194 {
2195 SET_FLAGS( *ptr_stats_flags_assigned, LM_STATS_FLAG_CSTORM ) ;
2196 }
2197
2198 }
2199 #endif
2200
2201 /**lm_stats_hw_macs_assign
2202 *
2203 * THIS FUNCTION MUST BE CALLED INSIDE PHY LOCK
2204 *
2205 * The mirrored statistics store 2 copies of the MAC stats:
2206 * CURRENT and TOTAL. the reason for this is that each PF has
2207 * it's own MAC and when a PMF change occures, the new PMF
2208 * would start with all MAC stats equal to 0. in this case
2209 * CURRENT would be zeroed on the next collection, but TOTAL
2210 * would still have the old stats.
2211 * because of this, TOTAL is updated according to the difference
2212 * between the old value and the new value.
2213 *
2214 * the following function updates a field in the CURRENT block
2215 * and returns the value to be added to the TOTAL block
2216 *
2217 * @param bits the number of data bits in the field
2218 * @param field_collect_val the value collected from the HW
2219 * @param field_mirror_val a pointer to the relevant field in
2220 * the CURRENT block
2221 *
2222 * @return the difference between the new value and the old
2223 * value - this should be added to the relevant field in
2224 * the TOTAL block.
2225 *
2226 * @see stats_macs_idx_t , lm_stats_hw_t
2227 */
lm_stats_hw_macs_assign(IN lm_device_t * pdev,IN u8_t bits,IN u64_t field_collect_val,IN OUT u64_t * field_mirror_val)2228 static u64_t lm_stats_hw_macs_assign(IN lm_device_t* pdev,
2229 IN u8_t bits,
2230 IN u64_t field_collect_val,
2231 IN OUT u64_t *field_mirror_val)
2232 {
2233 /*MSTAT has no wraparound logic, and it's stat values are zeroed on each read.
2234 This means that what we read is the difference in the stats since the last read,
2235 so we should just update the counters and exit.
2236 EMAC and BMAC stats have wraparound logic and are not zeroed on read, so we handle
2237 the wraparound if needed and return the difference between the old value and the
2238 new value.*/
2239 if(HAS_MSTAT(pdev))
2240 {
2241 *field_mirror_val += field_collect_val;
2242 return field_collect_val;
2243 }
2244 else
2245 {
2246 u64_t prev = *field_mirror_val;
2247 *field_mirror_val = lm_update_wraparound_if_needed(bits, field_collect_val, *field_mirror_val,FALSE/*no need to swap bytes on HW stats*/) ;
2248 return *field_mirror_val - prev;
2249 }
2250 }
2251
2252 #define LM_STATS_HW_MAC_ASSIGN(field_collect, field_mirror, field_width)\
2253 if (mac_query->field_collect != 0) { DbgMessage(pdev, INFORM, "assigning %s[=%x] to %s, width %d.\n", #field_collect, mac_query->field_collect, #field_mirror, field_width ); } \
2254 macs[STATS_MACS_IDX_TOTAL].field_mirror += lm_stats_hw_macs_assign( pdev, \
2255 field_width, \
2256 mac_query->field_collect, \
2257 &(macs[STATS_MACS_IDX_CURRENT].field_mirror) ) ;
2258
2259 #define LM_STATS_HW_MAC_ASSIGN_U32( field_collect, field_mirror ) LM_STATS_HW_MAC_ASSIGN(field_collect, field_mirror, 32)
2260
2261 #define LM_STATS_HW_MAC_ASSIGN_U36( field_collect, field_mirror ) LM_STATS_HW_MAC_ASSIGN(field_collect, field_mirror, 36)
2262
2263 #define LM_STATS_HW_MAC_ASSIGN_U42( field_collect, field_mirror ) LM_STATS_HW_MAC_ASSIGN(field_collect, field_mirror, 42)
2264
2265
2266 // assign a block (emac/bmac) uXX hw collected into hw mirror + do sign extension (width is XX)
2267 #define LM_STATS_HW_NIG_ASSIGN_UXX(bits, block_name,field_collect,field_mirror) \
2268 LM_SIGN_EXTEND_VALUE_##bits( pdev->vars.stats.stats_collect.stats_hw.addr_##block_name##_stats_query->field_collect, \
2269 pdev->vars.stats.stats_mirror.stats_hw.nig.field_mirror ) ;
2270
2271 #define LM_STATS_HW_NIG_ASSIGN_U32(block_name,field_collect,field_mirror) LM_STATS_HW_NIG_ASSIGN_UXX(32, block_name,field_collect,field_mirror)
2272
2273
2274 /* The code below is duplicated for bmac1, bmac2 and mstat, the structure mac_query differs between them and therefore
2275 * needs to be done this way (to avoid duplicating the code) */
2276 #define LM_STATS_NON_EMAC_ASSIGN_CODE(_field_width) \
2277 {\
2278 /* Maps bmac_query into macs sturct */ \
2279 /* Spec .1-5 (N/A) */ \
2280 /* Spec .6 */ \
2281 if (!IS_MULTI_VNIC(pdev)) { \
2282 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtgca, stats_tx.tx_stat_ifhcoutucastpkts_bmac_bca, _field_width); \
2283 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtgca, stats_tx.tx_stat_ifhcoutbroadcastpkts, _field_width); \
2284 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtpkt, stats_tx.tx_stat_ifhcoutucastpkts_bmac_pkt , _field_width); \
2285 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtmca, stats_tx.tx_stat_ifhcoutucastpkts_bmac_mca , _field_width); \
2286 /* Spec .7 */ \
2287 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtmca, stats_tx.tx_stat_ifhcoutmulticastpkts , _field_width); \
2288 /* Spec .8 */ \
2289 } \
2290 /* Spec .9 */ \
2291 LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grfcs, stats_rx.rx_stat_dot3statsfcserrors, _field_width); \
2292 /* Spec .10-11 (N/A) */ \
2293 /* Spec .12 */ \
2294 /* Spec .13 */ \
2295 LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grovr, stats_rx.rx_stat_dot3statsframestoolong, _field_width); \
2296 /* Spec .14 (N/A) */ \
2297 /* Spec .15 */ \
2298 LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grxpf, stats_rx.rx_stat_xoffpauseframesreceived, _field_width); \
2299 /* Spec .17 */ \
2300 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtxpf, stats_tx.tx_stat_outxoffsent, _field_width); \
2301 /* Spec .18-21 (N/A) */ \
2302 /* Spec .22 */ \
2303 LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grxpf, stats_rx.rx_stat_maccontrolframesreceived_bmac_xpf, _field_width); \
2304 LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grxcf, stats_rx.rx_stat_maccontrolframesreceived_bmac_xcf, _field_width); \
2305 /* Spec .23-29 (N/A) */ \
2306 /* Spec. 30 */ \
2307 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt64, stats_tx.tx_stat_etherstatspkts64octets, _field_width); \
2308 /* Spec. 31 */ \
2309 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt127, stats_tx.tx_stat_etherstatspkts65octetsto127octets, _field_width); \
2310 /* Spec. 32 */ \
2311 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt255, stats_tx.tx_stat_etherstatspkts128octetsto255octets, _field_width); \
2312 /* Spec. 33 */ \
2313 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt511, stats_tx.tx_stat_etherstatspkts256octetsto511octets, _field_width); \
2314 /* Spec. 34 */ \
2315 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt1023, stats_tx.tx_stat_etherstatspkts512octetsto1023octets, _field_width); \
2316 /* Spec. 35 */ \
2317 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt1518, stats_tx.tx_stat_etherstatspkts1024octetsto1522octet, _field_width); \
2318 /* Spec. 36 */ \
2319 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt2047, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_2047, _field_width); \
2320 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt4095, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_4095, _field_width); \
2321 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt9216, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_9216, _field_width); \
2322 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gt16383, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_16383, _field_width);\
2323 /* Spec. 38 */ \
2324 /* Spec. 39 */ \
2325 /* Spec. 40 (N/A) */ \
2326 /* Spec. 41 */ \
2327 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gterr, stats_tx.tx_stat_dot3statsinternalmactransmiterrors, _field_width); \
2328 /* Spec. 42 (N/A) */ \
2329 /* Spec. 43 */ \
2330 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtxpf, stats_tx.tx_stat_flowcontroldone, _field_width); \
2331 /* Spec. 44 */ \
2332 LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grxpf, stats_rx.rx_stat_xoffstateentered, _field_width); \
2333 /* Spec. 45 */ \
2334 /* Spec. 46 (N/A) */ \
2335 /* Spec. 47 */ \
2336 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtufl, stats_tx.tx_stat_ifhcoutdiscards, _field_width); \
2337 }
2338
2339 //Assign the registers that do not exist in MSTAT or have a different size and therefore can't
2340 //be a part of LM_STATS_NON_EMAC_ASSIGN_CODE
2341 #define LM_STATS_BMAC_ASSIGN_CODE \
2342 { \
2343 LM_STATS_HW_MAC_ASSIGN_U42( stats_rx.rx_grund, stats_rx.rx_stat_etherstatsundersizepkts ) ; \
2344 LM_STATS_HW_MAC_ASSIGN_U36( stats_rx.rx_grjbr, stats_rx.rx_stat_etherstatsjabbers ) ; \
2345 LM_STATS_HW_MAC_ASSIGN_U42( stats_rx.rx_grfrg, stats_rx.rx_stat_etherstatsfragments ) ; \
2346 LM_STATS_HW_MAC_ASSIGN_U42( stats_rx.rx_grerb, stats_rx.rx_stat_ifhcinbadoctets ); \
2347 }
2348
2349 /* The code below is duplicated for bmac2 and mstat, the structure mac_query differs between them and therefore
2350 * needs to be done this way (to avoid duplicating the code) */
2351 #define LM_STATS_BMAC2_MSTAT_ASSIGN_CODE(_field_width) \
2352 {\
2353 LM_STATS_HW_MAC_ASSIGN( stats_tx.tx_gtxpp, stats_tx.tx_stat_pfcPacketCounter, _field_width); \
2354 /* Rx PFC Packet Counter*/ \
2355 LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grxpp, stats_rx.rx_stat_pfcPacketCounter, _field_width); \
2356 }
2357 //Assign the registers that do not exist in BMAC1/BMAC2 or have a different size and therefore
2358 //can't be a part of LM_STATS_NON_EMAC_ASSIGN_CODE.
2359 //Also, some fields are read from EMAC stats on devices that have an EMAC block but must be read
2360 //from MSTAT on devices that don't have one.
2361 #define LM_STATS_MSTAT_ASSIGN_CODE \
2362 { \
2363 LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grund, stats_rx.rx_stat_etherstatsundersizepkts, 39) ; \
2364 LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grfrg, stats_rx.rx_stat_etherstatsfragments, 39) ; \
2365 LM_STATS_HW_MAC_ASSIGN( stats_rx.rx_grerb, stats_rx.rx_stat_ifhcinbadoctets, 45); \
2366 if (!IS_MULTI_VNIC(pdev)) {\
2367 LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_grbyt, stats_rx.rx_stat_ifhcinoctets, 45);\
2368 LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gruca, stats_rx.rx_stat_ifhcinucastpkts, 39)\
2369 LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_grmca, stats_rx.rx_stat_ifhcinmulticastpkts, 39);\
2370 LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_grbca, stats_rx.rx_stat_ifhcinbroadcastpkts, 39);\
2371 LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr64, stats_rx.rx_stat_etherstatspkts64octets, 39);\
2372 LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr127, stats_rx.rx_stat_etherstatspkts65octetsto127octets, 39);\
2373 LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr255, stats_rx.rx_stat_etherstatspkts128octetsto255octets, 39);\
2374 LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr511, stats_rx.rx_stat_etherstatspkts256octetsto511octets, 39);\
2375 LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr1023, stats_rx.rx_stat_etherstatspkts512octetsto1023octets, 39);\
2376 LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr1518, stats_rx.rx_stat_etherstatspkts1024octetsto1522octets, 39);\
2377 LM_STATS_HW_MAC_ASSIGN(stats_rx.rx_gr2047, stats_rx.rx_stat_etherstatspktsover1522octets, 39);\
2378 }\
2379 }
2380
2381 /**lm_stats_hw_emac_assign
2382 * Copy the stats data from the BMAC1 stats values to the
2383 * generic struct used by the driver. This function must be
2384 * called after lm_stats_hw_collect that copies the data from
2385 * the hardware registers to the host's memory.
2386 *
2387 *
2388 * @param pdev the device to use.
2389 */
lm_stats_hw_bmac1_assign(struct _lm_device_t * pdev)2390 void lm_stats_hw_bmac1_assign( struct _lm_device_t *pdev)
2391 {
2392 /* Macros required for macros used in this code */
2393 stats_macs_t *macs = &pdev->vars.stats.stats_mirror.stats_hw.macs[STATS_MACS_IDX_CURRENT];
2394 volatile struct _stats_bmac1_query_t *mac_query = pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac1_stats_query;
2395
2396 LM_STATS_NON_EMAC_ASSIGN_CODE(36)
2397 LM_STATS_BMAC_ASSIGN_CODE
2398 }
2399
2400 /**lm_stats_hw_emac_assign
2401 * Copy the stats data from the BMAC2 stats values to the
2402 * generic struct used by the driver. This function must be
2403 * called after lm_stats_hw_collect that copies the data from
2404 * the hardware registers to the host's memory.
2405 *
2406 *
2407 * @param pdev the device to use.
2408 */
lm_stats_hw_bmac2_assign(struct _lm_device_t * pdev)2409 void lm_stats_hw_bmac2_assign( struct _lm_device_t *pdev)
2410 {
2411 stats_macs_t *macs = &pdev->vars.stats.stats_mirror.stats_hw.macs[STATS_MACS_IDX_CURRENT];
2412 volatile struct _stats_bmac2_query_t *mac_query = pdev->vars.stats.stats_collect.stats_hw.u.s.addr_bmac2_stats_query;
2413 const u8_t bmac2_field_width = 36;
2414
2415 DbgBreakIf(mac_query == NULL);
2416
2417 LM_STATS_NON_EMAC_ASSIGN_CODE(bmac2_field_width)
2418 LM_STATS_BMAC2_MSTAT_ASSIGN_CODE(bmac2_field_width)
2419 LM_STATS_BMAC_ASSIGN_CODE
2420 }
2421
2422 /**lm_stats_hw_emac_assign
2423 * Copy the stats data from the MSTAT stats values to the
2424 * generic struct used by the driver. This function must be
2425 * called after lm_stats_hw_collect that copies the data from
2426 * the hardware registers to the host's memory.
2427 *
2428 *
2429 * @param pdev the device to use.
2430 */
lm_stats_hw_mstat_assign(lm_device_t * pdev)2431 void lm_stats_hw_mstat_assign( lm_device_t* pdev)
2432 {
2433 stats_macs_t *macs = &pdev->vars.stats.stats_mirror.stats_hw.macs[STATS_MACS_IDX_CURRENT];
2434 volatile struct _stats_mstat_query_t *mac_query = pdev->vars.stats.stats_collect.stats_hw.u.addr_mstat_stats_query;
2435 const u8_t mstat_field_width = 39;
2436 DbgBreakIf(mac_query == NULL);
2437
2438 DbgMessage(pdev, INFORM, "lm_stats_hw_mstat_assign: mac_query=%x\n", mac_query);
2439
2440 LM_STATS_NON_EMAC_ASSIGN_CODE(mstat_field_width)
2441 LM_STATS_BMAC2_MSTAT_ASSIGN_CODE(mstat_field_width)
2442 LM_STATS_MSTAT_ASSIGN_CODE
2443 }
2444
2445 /**lm_stats_hw_emac_assign
2446 * Copy the stats data from the EMAC stats values to the generic
2447 * struct used by the driver. This function must be called after
2448 * lm_stats_hw_collect that copies the data from the hardware
2449 * registers to the host's memory.
2450 *
2451 *
2452 * @param pdev the device to use.
2453 */
lm_stats_hw_emac_assign(struct _lm_device_t * pdev)2454 void lm_stats_hw_emac_assign( struct _lm_device_t *pdev)
2455 {
2456 stats_macs_t *macs = &pdev->vars.stats.stats_mirror.stats_hw.macs[STATS_MACS_IDX_CURRENT];
2457 volatile struct _stats_emac_query_t *mac_query = pdev->vars.stats.stats_collect.stats_hw.u.s.addr_emac_stats_query;
2458
2459 DbgBreakIf(mac_query == NULL);
2460
2461 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_ifhcinbadoctets, stats_rx.rx_stat_ifhcinbadoctets ) ;
2462 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatsfragments, stats_rx.rx_stat_etherstatsfragments ) ;
2463
2464 if (!IS_MULTI_VNIC(pdev)) {
2465 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_ifhcinoctets, stats_rx.rx_stat_ifhcinoctets );
2466 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_ifhcinucastpkts, stats_rx.rx_stat_ifhcinucastpkts )
2467 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_ifhcinmulticastpkts, stats_rx.rx_stat_ifhcinmulticastpkts );
2468 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_ifhcinbroadcastpkts, stats_rx.rx_stat_ifhcinbroadcastpkts );
2469 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts64octets, stats_rx.rx_stat_etherstatspkts64octets );
2470 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts65octetsto127octets, stats_rx.rx_stat_etherstatspkts65octetsto127octets );
2471 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts128octetsto255octets, stats_rx.rx_stat_etherstatspkts128octetsto255octets );
2472 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts256octetsto511octets, stats_rx.rx_stat_etherstatspkts256octetsto511octets );
2473 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts512octetsto1023octets, stats_rx.rx_stat_etherstatspkts512octetsto1023octets);
2474 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspkts1024octetsto1522octets, stats_rx.rx_stat_etherstatspkts1024octetsto1522octets);
2475 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatspktsover1522octets, stats_rx.rx_stat_etherstatspktsover1522octets);
2476 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_ifhcoutoctets, stats_tx.tx_stat_ifhcoutoctets);
2477 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_ifhcoutucastpkts, stats_tx.tx_stat_ifhcoutucastpkts);
2478 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_ifhcoutmulticastpkts, stats_tx.tx_stat_ifhcoutmulticastpkts);
2479 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_ifhcoutbroadcastpkts, stats_tx.tx_stat_ifhcoutbroadcastpkts);
2480 }
2481
2482 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_dot3statsfcserrors, stats_rx.rx_stat_dot3statsfcserrors ) ;
2483 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_dot3statsalignmenterrors, stats_rx.rx_stat_dot3statsalignmenterrors ) ;
2484 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_dot3statscarriersenseerrors, stats_rx.rx_stat_dot3statscarriersenseerrors ) ;
2485 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_xonpauseframesreceived, stats_rx.rx_stat_xonpauseframesreceived ) ;
2486 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_xoffpauseframesreceived, stats_rx.rx_stat_xoffpauseframesreceived ) ;
2487 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_maccontrolframesreceived, stats_rx.rx_stat_maccontrolframesreceived ) ;
2488 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_xoffstateentered, stats_rx.rx_stat_xoffstateentered ) ;
2489 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_dot3statsframestoolong, stats_rx.rx_stat_dot3statsframestoolong ) ;
2490 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatsjabbers, stats_rx.rx_stat_etherstatsjabbers ) ;
2491 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx.rx_stat_etherstatsundersizepkts, stats_rx.rx_stat_etherstatsundersizepkts ) ;
2492
2493
2494 LM_STATS_HW_MAC_ASSIGN_U32(stats_rx_err.rx_stat_falsecarriererrors, stats_rx_err.rx_stat_falsecarriererrors ) ;
2495
2496
2497
2498 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_ifhcoutbadoctets, stats_tx.tx_stat_ifhcoutbadoctets ) ;
2499 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatscollisions, stats_tx.tx_stat_etherstatscollisions ) ;
2500 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_outxonsent, stats_tx.tx_stat_outxonsent ) ;
2501 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_outxoffsent, stats_tx.tx_stat_outxoffsent ) ;
2502 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_flowcontroldone, stats_tx.tx_stat_flowcontroldone ) ;
2503 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statssinglecollisionframes, stats_tx.tx_stat_dot3statssinglecollisionframes ) ;
2504 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statsmultiplecollisionframes, stats_tx.tx_stat_dot3statsmultiplecollisionframes ) ;
2505 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statsdeferredtransmissions, stats_tx.tx_stat_dot3statsdeferredtransmissions ) ;
2506 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statsexcessivecollisions, stats_tx.tx_stat_dot3statsexcessivecollisions ) ;
2507 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statslatecollisions, stats_tx.tx_stat_dot3statslatecollisions ) ;
2508
2509
2510 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts64octets, stats_tx.tx_stat_etherstatspkts64octets ) ;
2511 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts65octetsto127octets, stats_tx.tx_stat_etherstatspkts65octetsto127octets ) ;
2512 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts128octetsto255octets, stats_tx.tx_stat_etherstatspkts128octetsto255octets ) ;
2513 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts256octetsto511octets, stats_tx.tx_stat_etherstatspkts256octetsto511octets ) ;
2514 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts512octetsto1023octets, stats_tx.tx_stat_etherstatspkts512octetsto1023octets ) ;
2515 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspkts1024octetsto1522octet, stats_tx.tx_stat_etherstatspkts1024octetsto1522octet ) ;
2516 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_etherstatspktsover1522octets, stats_tx.tx_stat_etherstatspktsover1522octets ) ;
2517 LM_STATS_HW_MAC_ASSIGN_U32(stats_tx.tx_stat_dot3statsinternalmactransmiterrors, stats_tx.tx_stat_dot3statsinternalmactransmiterrors ) ;
2518 }
2519
lm_stats_hw_assign(struct _lm_device_t * pdev)2520 void lm_stats_hw_assign( struct _lm_device_t *pdev )
2521 {
2522 if(HAS_MSTAT(pdev))
2523 {
2524 DbgMessage(pdev, INFORM, "lm_stats_hw_assign: device has MSTAT block.\n");
2525 lm_stats_hw_mstat_assign(pdev);
2526 }
2527 else if (CHIP_IS_E2(pdev) && (pdev->vars.mac_type == MAC_TYPE_BMAC))
2528 {
2529 lm_stats_hw_bmac2_assign(pdev);
2530 }
2531 else if (pdev->vars.mac_type == MAC_TYPE_BMAC)
2532 {
2533 lm_stats_hw_bmac1_assign(pdev);
2534 }
2535 else if(pdev->vars.mac_type == MAC_TYPE_EMAC)
2536 {
2537 lm_stats_hw_emac_assign(pdev);
2538 }
2539 else
2540 {
2541 DbgBreakIf((pdev->vars.mac_type != MAC_TYPE_EMAC) && (pdev->vars.mac_type == MAC_TYPE_BMAC) && !HAS_MSTAT(pdev) );
2542 }
2543
2544 //nig
2545 {
2546 LM_STATS_HW_NIG_ASSIGN_U32(nig, brb_discard, brb_discard ) ;
2547 if (!IS_MULTI_VNIC(pdev))
2548 {
2549 LM_STATS_HW_NIG_ASSIGN_U32(nig, brb_packet, brb_packet );
2550 LM_STATS_HW_NIG_ASSIGN_U32(nig, brb_truncate, brb_truncate );
2551 LM_STATS_HW_NIG_ASSIGN_U32(nig, flow_ctrl_discard, flow_ctrl_discard );
2552 LM_STATS_HW_NIG_ASSIGN_U32(nig, flow_ctrl_octets, flow_ctrl_octets );
2553 LM_STATS_HW_NIG_ASSIGN_U32(nig, flow_ctrl_packet, flow_ctrl_packet );
2554 LM_STATS_HW_NIG_ASSIGN_U32(nig, mng_discard, mng_discard );
2555 LM_STATS_HW_NIG_ASSIGN_U32(nig, mng_octet_inp, mng_octet_inp );
2556 LM_STATS_HW_NIG_ASSIGN_U32(nig, mng_octet_out, mng_octet_out );
2557 LM_STATS_HW_NIG_ASSIGN_U32(nig, mng_packet_inp, mng_packet_inp );
2558 LM_STATS_HW_NIG_ASSIGN_U32(nig, mng_packet_out, mng_packet_out );
2559 LM_STATS_HW_NIG_ASSIGN_U32(nig, pbf_octets, pbf_octets );
2560 LM_STATS_HW_NIG_ASSIGN_U32(nig, pbf_packet, pbf_packet );
2561 LM_STATS_HW_NIG_ASSIGN_U32(nig, safc_inp, safc_inp );
2562 }
2563 if(HAS_MSTAT(pdev))//E3 has no NIG-ex registers, so we use values from MSTAT instead.
2564 {
2565 //Note: this must occur after the other HW stats have been assigned.
2566 stats_macs_t* assigned_hw_stats = &pdev->vars.stats.stats_mirror.stats_hw.macs[STATS_MACS_IDX_TOTAL];
2567 struct _stats_nig_ex_t* nig_ex_stats = &pdev->vars.stats.stats_collect.stats_hw.nig_ex_stats_query;
2568 /*NIG pkt0 counts packets with sizes 1024-1522 bytes. MSTAT has an equivalent register.*/
2569 nig_ex_stats->egress_mac_pkt0 = assigned_hw_stats->stats_tx.tx_stat_etherstatspkts1024octetsto1522octet;
2570 /*NIG pkt1 counts packets of size 1523 and up. We sum the required MSTAT values to get the right result.
2571 Note that the field names are somewhat misleading, since they don't count sizes 1522-XXXX but [1522-2047],[2048-4095],[4096-9216],[9217-14383]
2572 (see MSTAT low level design document).
2573 */
2574 nig_ex_stats->egress_mac_pkt1 = assigned_hw_stats->stats_tx.tx_stat_etherstatspktsover1522octets_bmac_2047+
2575 assigned_hw_stats->stats_tx.tx_stat_etherstatspktsover1522octets_bmac_4095+
2576 assigned_hw_stats->stats_tx.tx_stat_etherstatspktsover1522octets_bmac_9216+
2577 assigned_hw_stats->stats_tx.tx_stat_etherstatspktsover1522octets_bmac_16383;
2578 }
2579 else
2580 {
2581 LM_SIGN_EXTEND_VALUE_36( pdev->vars.stats.stats_collect.stats_hw.nig_ex_stats_query.egress_mac_pkt0, pdev->vars.stats.stats_mirror.stats_hw.nig_ex.egress_mac_pkt0 ) ;
2582 LM_SIGN_EXTEND_VALUE_36( pdev->vars.stats.stats_collect.stats_hw.nig_ex_stats_query.egress_mac_pkt1, pdev->vars.stats.stats_mirror.stats_hw.nig_ex.egress_mac_pkt1 ) ;
2583 }
2584 }
2585 }
2586
2587 /*
2588 *Function Name: lm_drv_info_to_mfw_assign_eth
2589 *
2590 *Parameters:
2591 *
2592 *Description:
2593 * assign drv_info eth stats from different places in the pdev to "mirror" (vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.eth_stats)
2594 *Returns:
2595 *
2596 */
lm_drv_info_to_mfw_assign_eth(struct _lm_device_t * pdev)2597 static void lm_drv_info_to_mfw_assign_eth( struct _lm_device_t *pdev )
2598 {
2599 const u8_t client_id = LM_CLI_CID(pdev, LM_CLI_IDX_NDIS );
2600 eth_stats_info_t* stats_eth = &pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.eth_stats;
2601 lm_client_con_params_t* cli_params = NULL;
2602
2603 if( client_id >= ARRSIZE(pdev->params.l2_cli_con_params) )
2604 {
2605 DbgBreakIf( client_id >= ARRSIZE(pdev->params.l2_cli_con_params) );
2606 return;
2607 }
2608
2609 #define DRV_INFO_TO_MFW_NOT_SUPPORTED 0
2610
2611 cli_params = &pdev->params.l2_cli_con_params[client_id];
2612
2613 ASSERT_STATIC( sizeof(stats_eth->version) <= sizeof(pdev->ver_str) );
2614
2615 ASSERT_STATIC( sizeof(stats_eth->mac_local) <= sizeof( pdev->params.mac_addr ) );
2616
2617 mm_memcpy( stats_eth->version, pdev->ver_str, sizeof(stats_eth->version) );
2618
2619 /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
2620 /* Additional Programmed MAC Addr 1. 2*/
2621
2622 // stats_eth->mac_local, mac_add1, mac_add2 - NO NEED to update here since they are already updated in lm_eq_handle_classification_eqe
2623
2624 /* MTU Size. Note : Negotiated MTU */
2625 stats_eth->mtu_size = cli_params->mtu;
2626
2627 /* LSO MaxOffloadSize. */
2628 stats_eth->lso_max_size = DRV_INFO_TO_MFW_NOT_SUPPORTED; // we should acquire this from NDIS?
2629
2630 /* LSO MinSegmentCount. */
2631 stats_eth->lso_min_seg_cnt = DRV_INFO_TO_MFW_NOT_SUPPORTED; // we should acquire this from NDIS?
2632
2633 /* Num Offloaded Connections TCP_IPv4. */
2634 stats_eth->ipv4_ofld_cnt = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_4_IDX].currently_established;
2635
2636 /* Num Offloaded Connections TCP_IPv6. */
2637 stats_eth->ipv6_ofld_cnt = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_6_IDX].currently_established;
2638
2639 /* Promiscuous Mode. non-zero true */
2640 stats_eth->promiscuous_mode = ( 0 != GET_FLAGS( pdev->client_info[client_id].last_set_rx_mask, LM_RX_MASK_PROMISCUOUS_MODE ) );
2641
2642 /* TX Descriptors Queue Size */
2643 stats_eth->txq_size = cli_params->num_tx_desc;
2644
2645 /* RX Descriptors Queue Size */
2646 stats_eth->rxq_size = cli_params->num_rx_desc;//= pdev->params.l2_rx_desc_cnt[LM_CLI_IDX_NDIS];
2647
2648 /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
2649 stats_eth->txq_avg_depth = DRV_INFO_TO_MFW_NOT_SUPPORTED;
2650
2651 /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
2652 stats_eth->rxq_avg_depth = DRV_INFO_TO_MFW_NOT_SUPPORTED;
2653
2654 /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
2655 stats_eth->iov_offload = DRV_INFO_TO_MFW_NOT_SUPPORTED;
2656
2657 /* Num VF assigned to this PF. */
2658 stats_eth->vf_cnt = 0; // Once Win8 (T7.4) should be changed!
2659
2660 /* Number of NetQueue/VMQ Config'd. */
2661 stats_eth->netq_cnt = mm_get_vmq_cnt(pdev);
2662
2663 /* Feature_Flags. */
2664 stats_eth->feature_flags = mm_get_feature_flags(pdev);
2665 } /* lm_drv_info_to_mfw_assign_eth */
2666
2667
2668 /*
2669 *Function Name: lm_stats_drv_info_to_mfw_assign
2670 *
2671 *Parameters:
2672 *
2673 *Description:
2674 * Upon the opcode assign relevant stats from "mirror" to physical memory in "collect"
2675 * then, MFW will read this data.
2676 *Returns:
2677 *
2678 */
lm_stats_drv_info_to_mfw_assign(struct _lm_device_t * pdev,const enum drv_info_opcode drv_info_op)2679 lm_status_t lm_stats_drv_info_to_mfw_assign( struct _lm_device_t *pdev, const enum drv_info_opcode drv_info_op )
2680 {
2681 lm_status_t lm_status = LM_STATUS_SUCCESS;
2682 void* dest = (void*)pdev->vars.stats.stats_collect.drv_info_to_mfw.addr.eth_stats; // this is a union so doesn't matter if etc/iscsi/fcoe
2683 void* src = NULL;
2684 u32_t size = 0;
2685
2686 if CHK_NULL(dest)
2687 {
2688 // dest might be NULL if we got here in chip id < E3
2689 DbgBreakIf(!dest);
2690 return LM_STATUS_FAILURE;
2691 }
2692
2693 switch(drv_info_op)
2694 {
2695 case ETH_STATS_OPCODE:
2696 // We gather eth stats from already known data
2697 lm_drv_info_to_mfw_assign_eth(pdev);
2698
2699 src = &pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.eth_stats;
2700 size = sizeof(pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.eth_stats);
2701 break;
2702
2703 case ISCSI_STATS_OPCODE:
2704 // storage data is set by miniport
2705 src = &pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.iscsi_stats;
2706 size = sizeof(pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.iscsi_stats);
2707 break;
2708
2709 case FCOE_STATS_OPCODE:
2710 // storage data is set by miniport
2711 src = &pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.fcoe_stats;
2712 size = sizeof(pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.fcoe_stats);
2713 break;
2714
2715 default:
2716 lm_status = LM_STATUS_INVALID_PARAMETER;
2717 break;
2718 }
2719
2720 if( LM_STATUS_SUCCESS == lm_status)
2721 {
2722 // Zero buffer
2723 mm_mem_zero( dest, size );
2724
2725 // Copy relevant field
2726 mm_memcpy( dest, src, size );
2727 }
2728
2729 return lm_status;
2730 } /* lm_stats_drv_info_to_mfw_assign */
2731
2732 // resets mirror fw statistics
lm_stats_fw_reset(struct _lm_device_t * pdev)2733 void lm_stats_fw_reset( struct _lm_device_t* pdev)
2734 {
2735 if CHK_NULL( pdev )
2736 {
2737 DbgBreakIf(!pdev) ;
2738 }
2739 mm_memset( &pdev->vars.stats.stats_mirror.stats_fw, 0, sizeof(pdev->vars.stats.stats_mirror.stats_fw) ) ;
2740 }
2741
lm_stats_get_dcb_stats(lm_device_t * pdev,lm_dcbx_stat * stats)2742 void lm_stats_get_dcb_stats( lm_device_t* pdev, lm_dcbx_stat *stats )
2743 {
2744 stats->pfc_frames_sent = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_pfcPacketCounter ) );
2745 stats->pfc_frames_received = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_pfcPacketCounter ) );
2746 }
lm_stats_get_driver_stats(struct _lm_device_t * pdev,b10_driver_statistics_t * stats)2747 void lm_stats_get_driver_stats( struct _lm_device_t* pdev, b10_driver_statistics_t *stats )
2748 {
2749 stats->ver_num = DRIVER_STATISTISTCS_VER_NUM;
2750 stats->tx_lso_frames = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.tx_lso_frames ;
2751 stats->tx_aborted = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.tx_aborted ;
2752 stats->tx_no_bd = 0 ;
2753 stats->tx_no_desc = 0 ;
2754 stats->tx_no_coalesce_buf = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.tx_no_coalesce_buf ;
2755 stats->tx_no_map_reg = 0 ;
2756 stats->rx_aborted = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.rx_aborted ;
2757 stats->rx_err = 0 ;
2758 stats->rx_crc = 0 ;
2759 stats->rx_phy_err = 0 ;
2760 stats->rx_alignment = 0;
2761 stats->rx_short_packet = 0 ;
2762 stats->rx_giant_packet = 0 ;
2763 }
2764
lm_stats_get_l2_driver_stats(struct _lm_device_t * pdev,b10_l2_driver_statistics_t * stats)2765 void lm_stats_get_l2_driver_stats( struct _lm_device_t* pdev, b10_l2_driver_statistics_t *stats )
2766 {
2767 stats->ver_num = L2_DRIVER_STATISTISTCS_VER_NUM;
2768 stats->RxIPv4FragCount = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.rx_ipv4_frag_count ;
2769 stats->RxIpCsErrorCount = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.rx_ip_cs_error_count ;
2770 stats->RxTcpCsErrorCount = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.rx_tcp_cs_error_count ;
2771 stats->RxLlcSnapCount = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.rx_llc_snap_count ;
2772 stats->RxPhyErrorCount = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.rx_phy_error_count ;
2773 stats->RxIpv6ExtCount = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.rx_ipv6_ext_count ;
2774 stats->TxNoL2Bd = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.tx_no_l2_bd ;
2775 stats->TxNoSqWqe = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.tx_no_sq_wqe ;
2776 stats->TxL2AssemblyBufUse = pdev->vars.stats.stats_mirror.stats_drv.drv_eth.tx_l2_assembly_buf_use ;
2777 }
lm_stats_get_l4_driver_stats(struct _lm_device_t * pdev,b10_l4_driver_statistics_t * stats)2778 void lm_stats_get_l4_driver_stats( struct _lm_device_t* pdev, b10_l4_driver_statistics_t *stats )
2779 {
2780 u8_t idx = 0 ;
2781
2782 stats->ver_num = L4_DRIVER_STATISTISTCS_VER_NUM;
2783
2784 idx = STATS_IP_4_IDX ;
2785 stats->CurrentlyIpv4Established = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].currently_established ;
2786 stats->OutIpv4Resets = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].out_resets ;
2787 stats->OutIpv4Fin = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].out_fin ;
2788 stats->InIpv4Reset = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].in_reset ;
2789 stats->InIpv4Fin = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].in_fin ;
2790
2791 idx = STATS_IP_6_IDX ;
2792 stats->CurrentlyIpv6Established = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].currently_established ;
2793 stats->OutIpv6Resets = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].out_resets ;
2794 stats->OutIpv6Fin = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].out_fin ;
2795 stats->InIpv6Reset = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].in_reset ;
2796 stats->InIpv6Fin = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[idx].in_fin ;
2797
2798 stats->RxIndicateReturnPendingCnt = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.rx_indicate_return_pending_cnt ;
2799 stats->RxIndicateReturnDoneCnt = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.rx_indicate_return_done_cnt ;
2800 stats->RxActiveGenBufCnt = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.rx_active_gen_buf_cnt ;
2801 stats->TxNoL4Bd = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.tx_no_l4_bd ;
2802 stats->TxL4AssemblyBufUse = pdev->vars.stats.stats_mirror.stats_drv.drv_toe.tx_l4_assembly_buf_use ;
2803 }
2804
lm_stats_get_l2_chip_stats(struct _lm_device_t * pdev,void * buf,u8_t version)2805 void lm_stats_get_l2_chip_stats( struct _lm_device_t* pdev, void *buf, u8_t version)
2806 {
2807 u32_t idx = LM_CLI_IDX_NDIS ;
2808 b10_l2_chip_statistics_t *stats = buf;
2809
2810 stats->ver_num = version ;
2811
2812 // TODO - change IOCTL structure to be per client
2813
2814 stats->IfHCInOctets = pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_broadcast_bytes +
2815 pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_multicast_bytes +
2816 pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_unicast_bytes ;
2817 stats->IfHCInBadOctets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_ifhcinbadoctets ) );
2818 stats->IfHCOutOctets = pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[idx].total_sent_bytes ;
2819 stats->IfHCOutBadOctets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_ifhcoutbadoctets ) );
2820 stats->IfHCInUcastPkts = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_unicast_pkts ) ;
2821 stats->IfHCInMulticastPkts = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_multicast_pkts ) ;
2822 stats->IfHCInBroadcastPkts = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_broadcast_pkts ) ;
2823 stats->IfHCInUcastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_unicast_bytes ) ;
2824 stats->IfHCInMulticastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_multicast_bytes ) ;
2825 stats->IfHCInBroadcastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_broadcast_bytes ) ;
2826
2827 stats->IfHCOutUcastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[idx].unicast_bytes_sent ) ;
2828 stats->IfHCOutMulticastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[idx].multicast_bytes_sent ) ;
2829 stats->IfHCOutBroadcastOctets = (pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[idx].broadcast_bytes_sent ) ;
2830 stats->IfHCOutPkts = (pdev->vars.stats.stats_mirror.stats_fw.eth_xstorm_common.client_statistics[idx].total_sent_pkts ) ;
2831
2832
2833 lm_get_stats( pdev, LM_STATS_UNICAST_FRAMES_XMIT, &stats->IfHCOutUcastPkts
2834 #ifdef VF_INVOLVED
2835 ,NULL
2836 #endif
2837 ) ;
2838 lm_get_stats( pdev, LM_STATS_MULTICAST_FRAMES_XMIT, &stats->IfHCOutMulticastPkts
2839 #ifdef VF_INVOLVED
2840 ,NULL
2841 #endif
2842 ) ;
2843 lm_get_stats( pdev, LM_STATS_BROADCAST_FRAMES_XMIT, &stats->IfHCOutBroadcastPkts
2844 #ifdef VF_INVOLVED
2845 ,NULL
2846 #endif
2847 ) ;
2848
2849 stats->IfHCInPkts = pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_broadcast_pkts +
2850 pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_multicast_pkts +
2851 pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].rcv_unicast_pkts ;
2852
2853 stats->IfHCOutDiscards = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_ifhcoutdiscards ) );
2854 stats->IfHCInFalseCarrierErrors = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx_err.rx_stat_falsecarriererrors ) );
2855
2856 stats->Dot3StatsInternalMacTransmitErrors = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statsinternalmactransmiterrors )) ;
2857 stats->Dot3StatsCarrierSenseErrors = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_dot3statscarriersenseerrors )) ;
2858 stats->Dot3StatsFCSErrors = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_dot3statsfcserrors )) ;
2859 stats->Dot3StatsAlignmentErrors = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_dot3statsalignmenterrors )) ;
2860 stats->Dot3StatsSingleCollisionFrames = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statssinglecollisionframes )) ;
2861 stats->Dot3StatsMultipleCollisionFrames = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statsmultiplecollisionframes )) ;
2862 stats->Dot3StatsDeferredTransmissions = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statsdeferredtransmissions )) ;
2863 stats->Dot3StatsExcessiveCollisions = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statsexcessivecollisions )) ;
2864 stats->Dot3StatsLateCollisions = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_dot3statslatecollisions )) ;
2865 stats->EtherStatsCollisions = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatscollisions )) ;
2866 stats->EtherStatsFragments = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_etherstatsfragments )) ;
2867 stats->EtherStatsJabbers = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_etherstatsjabbers )) ;
2868
2869
2870 stats->EtherStatsUndersizePkts = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_etherstatsundersizepkts )) ;
2871 stats->EtherStatsOverrsizePkts = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_dot3statsframestoolong )) ;
2872
2873 stats->EtherStatsPktsTx64Octets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatspkts64octets )) ;
2874 stats->EtherStatsPktsTx65Octetsto127Octets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatspkts65octetsto127octets )) ;
2875 stats->EtherStatsPktsTx128Octetsto255Octets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatspkts128octetsto255octets )) ;
2876 stats->EtherStatsPktsTx256Octetsto511Octets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatspkts256octetsto511octets )) ;
2877 stats->EtherStatsPktsTx512Octetsto1023Octets = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_etherstatspkts512octetsto1023octets)) ;
2878 stats->EtherStatsPktsTx1024Octetsto1522Octets = (pdev->vars.stats.stats_mirror.stats_hw.nig_ex.egress_mac_pkt0) ;
2879 stats->EtherStatsPktsTxOver1522Octets = (pdev->vars.stats.stats_mirror.stats_hw.nig_ex.egress_mac_pkt1) ;
2880
2881 stats->XonPauseFramesReceived = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_xonpauseframesreceived )) ;
2882 stats->XoffPauseFramesReceived = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_xoffpauseframesreceived )) ;
2883 stats->OutXonSent = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_outxonsent )) ;
2884
2885 stats->OutXoffSent = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_outxoffsent )) ;
2886
2887 stats->FlowControlDone = (LM_STATS_HW_GET_MACS_U64( pdev, stats_tx.tx_stat_flowcontroldone )) ;
2888
2889 stats->MacControlFramesReceived = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_maccontrolframesreceived )) ;
2890 stats->MacControlFramesReceived += (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_maccontrolframesreceived_bmac_xcf )) ;
2891
2892 stats->XoffStateEntered = (LM_STATS_HW_GET_MACS_U64( pdev, stats_rx.rx_stat_xoffstateentered )) ;
2893 lm_get_stats( pdev, LM_STATS_ERRORED_RECEIVE_CNT, &stats->IfInErrors
2894 #ifdef VF_INVOLVED
2895 ,NULL
2896 #endif
2897 ) ;
2898 // TBD - IfInErrorsOctets - naming and support
2899 stats->IfInErrorsOctets = 0;
2900
2901 stats->IfInNoBrbBuffer = (pdev->vars.stats.stats_mirror.stats_hw.nig.brb_discard) ;
2902 stats->IfInFramesL2FilterDiscards = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.port_statistics.mac_filter_discard) ;
2903 stats->IfInTTL0Discards = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].ttl0_discard) ;
2904 stats->IfInxxOverflowDiscards = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.port_statistics.xxoverflow_discard) ;
2905
2906 stats->IfInMBUFDiscards = (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[idx].no_buff_discard );
2907 stats->IfInMBUFDiscards += (pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].ucast_no_buff_pkts );
2908 stats->IfInMBUFDiscards += (pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].mcast_no_buff_pkts );
2909 stats->IfInMBUFDiscards += (pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].bcast_no_buff_pkts );
2910
2911 stats->Nig_brb_packet = (pdev->vars.stats.stats_mirror.stats_hw.nig.brb_packet) ;
2912 stats->Nig_brb_truncate = (pdev->vars.stats.stats_mirror.stats_hw.nig.brb_truncate) ;
2913 stats->Nig_flow_ctrl_discard = (pdev->vars.stats.stats_mirror.stats_hw.nig.flow_ctrl_discard) ;
2914 stats->Nig_flow_ctrl_octets = (pdev->vars.stats.stats_mirror.stats_hw.nig.flow_ctrl_octets) ;
2915 stats->Nig_flow_ctrl_packet = (pdev->vars.stats.stats_mirror.stats_hw.nig.flow_ctrl_packet) ;
2916 stats->Nig_mng_discard = (pdev->vars.stats.stats_mirror.stats_hw.nig.mng_discard) ;
2917 stats->Nig_mng_octet_inp = (pdev->vars.stats.stats_mirror.stats_hw.nig.mng_octet_inp) ;
2918 stats->Nig_mng_octet_out = (pdev->vars.stats.stats_mirror.stats_hw.nig.mng_octet_out) ;
2919 stats->Nig_mng_packet_inp = (pdev->vars.stats.stats_mirror.stats_hw.nig.mng_packet_inp) ;
2920 stats->Nig_mng_packet_out = (pdev->vars.stats.stats_mirror.stats_hw.nig.mng_packet_out) ;
2921 stats->Nig_pbf_octets = (pdev->vars.stats.stats_mirror.stats_hw.nig.pbf_octets) ;
2922 stats->Nig_pbf_packet = (pdev->vars.stats.stats_mirror.stats_hw.nig.pbf_packet) ;
2923 stats->Nig_safc_inp = (pdev->vars.stats.stats_mirror.stats_hw.nig.safc_inp) ;
2924
2925 if (version > L2_CHIP_STATISTICS_VER_NUM_1)
2926 {
2927 /* v2 statistics */
2928
2929 b10_l2_chip_statistics_v2_t *stats_v2 = buf;
2930
2931 stats_v2->v2.Tx_lpi_count = pdev->vars.stats.stats_mirror.stats_hw.misc.tx_lpi_count;
2932 }
2933
2934 if (version > L2_CHIP_STATISTICS_VER_NUM_2)
2935 {
2936 b10_l2_chip_statistics_v3_t *stats_v3 = buf;
2937 stats_v3->v3.coalesced_pkts = pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].coalesced_pkts;
2938 stats_v3->v3.coalesced_bytes = pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].coalesced_bytes;
2939 stats_v3->v3.coalesced_events = pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].coalesced_events;
2940 stats_v3->v3.coalesced_aborts = pdev->vars.stats.stats_mirror.stats_fw.eth_ustorm_common.client_statistics[idx].coalesced_aborts;
2941 }
2942 }
2943
lm_stats_get_l4_chip_stats(struct _lm_device_t * pdev,b10_l4_chip_statistics_t * stats)2944 void lm_stats_get_l4_chip_stats( struct _lm_device_t* pdev, b10_l4_chip_statistics_t *stats )
2945 {
2946 u8_t idx = 0 ;
2947
2948 stats->ver_num = L4_CHIP_STATISTISTCS_VER_NUM ;
2949
2950 stats->NoTxCqes = pdev->vars.stats.stats_mirror.stats_fw.toe_cstorm_toe.no_tx_cqes ;
2951
2952 // IP4
2953 idx = STATS_IP_4_IDX ;
2954
2955 stats->InTCP4Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_receives ;
2956 stats->OutTCP4Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].tcp_out_segments ;
2957 stats->RetransmittedTCP4Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].tcp_retransmitted_segments ;
2958 stats->InTCP4Errors = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].tcp_in_errors ;
2959 stats->InIP4Receives = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_receives ;
2960 stats->InIP4HeaderErrors = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_header_errors ;
2961 stats->InIP4Discards = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_discards ;
2962 stats->InIP4Delivers = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_delivers ;
2963 stats->InIP4Octets = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_octets ;
2964 stats->OutIP4Octets = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].ip_out_octets ;
2965 stats->InIP4TruncatedPackets = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_truncated_packets ;
2966
2967 // IP6
2968 idx = STATS_IP_6_IDX ;
2969
2970 stats->InTCP6Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_receives ;
2971 stats->OutTCP6Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].tcp_out_segments ;
2972 stats->RetransmittedTCP6Segments = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].tcp_retransmitted_segments ;
2973 stats->InTCP6Errors = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].tcp_in_errors ;
2974 stats->InIP6Receives = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_receives ;
2975 stats->InIP6HeaderErrors = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_header_errors ;
2976 stats->InIP6Discards = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_discards ;
2977 stats->InIP6Delivers = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_delivers ;
2978 stats->InIP6Octets = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_octets ;
2979 stats->OutIP6Octets = pdev->vars.stats.stats_mirror.stats_fw.toe_xstorm_toe.statistics[idx].ip_out_octets ;
2980 stats->InIP6TruncatedPackets = pdev->vars.stats.stats_mirror.stats_fw.toe_tstorm_toe.statistics[idx].ip_in_truncated_packets ;
2981 }
2982
lm_stats_hw_config_stats(struct _lm_device_t * pdev,u8_t b_enabled)2983 void lm_stats_hw_config_stats( struct _lm_device_t* pdev, u8_t b_enabled )
2984 {
2985 DbgMessage(pdev, WARNstat, "lm_stats_hw_config_stats: b_collect_enabled %s-->%s\n",
2986 pdev->vars.stats.stats_collect.stats_hw.b_collect_enabled ? "TRUE":"FALSE",
2987 b_enabled ? "TRUE":"FALSE" );
2988
2989 if (IS_PFDEV(pdev)) {
2990 pdev->vars.stats.stats_collect.stats_hw.b_collect_enabled = b_enabled ;
2991 }
2992 }
2993
lm_stats_fw_config_stats(struct _lm_device_t * pdev,u8_t b_enabled)2994 void lm_stats_fw_config_stats( struct _lm_device_t* pdev, u8_t b_enabled )
2995 {
2996 DbgMessage(pdev, VERBOSEstat, "lm_stats_fw_config_stats: b_collect_enabled %s-->%s\n",
2997 pdev->vars.stats.stats_collect.stats_fw.b_collect_enabled ? "TRUE":"FALSE",
2998 b_enabled ? "TRUE":"FALSE" );
2999 if (IS_PFDEV(pdev) || IS_CHANNEL_VFDEV(pdev)) {
3000 pdev->vars.stats.stats_collect.stats_fw.b_collect_enabled = b_enabled ;
3001 }
3002 }
3003
3004 /*
3005 *------------------------------------------------------------------------
3006 * lm_stats_mgmt_assign_func
3007 *
3008 * assign values from different 'mirror' structures into host_func_stats_t structure
3009 * that will be sent later to mgmt
3010 * NOTE: function must be called under PHY_LOCK (since it uses REG_WR_DMAE interface)
3011 *------------------------------------------------------------------------
3012 */
lm_stats_mgmt_assign_func(IN struct _lm_device_t * pdev)3013 STATIC void lm_stats_mgmt_assign_func( IN struct _lm_device_t* pdev )
3014 {
3015 u64_t val = 0 ;
3016 u64_t val_base = 0 ;
3017 lm_status_t lm_status = LM_STATUS_SUCCESS ;
3018 lm_stats_t stats_type = 0 ;
3019 host_func_stats_t* mcp_func = NULL ;
3020 host_func_stats_t* mcp_func_base = NULL ;
3021
3022 if CHK_NULL(pdev)
3023 {
3024 return;
3025 }
3026
3027 if ( GET_FLAGS(pdev->params.test_mode, TEST_MODE_NO_MCP ) )
3028 {
3029 return;
3030 }
3031
3032 mcp_func = &pdev->vars.stats.stats_mirror.stats_mcp_func ;
3033 mcp_func_base = &pdev->vars.stats.stats_mirror.stats_mcp_func_base ;
3034
3035 stats_type = LM_STATS_BYTES_RCV ;
3036 lm_status = lm_get_stats( pdev, stats_type, &val
3037 #ifdef VF_INVOLVED
3038 ,NULL
3039 #endif
3040 ) ;
3041 if ERR_IF( LM_STATUS_SUCCESS != lm_status )
3042 {
3043 DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
3044 }
3045 else
3046 {
3047 // calculate 'total' rcv (total+discards)
3048 val += (pdev->vars.stats.stats_mirror.stats_fw.eth_tstorm_common.client_statistics[LM_CLI_IDX_NDIS].rcv_error_bytes) ;
3049
3050 val+= LM_STATS_HI_LO_TO_64(mcp_func_base->total_bytes_received, val_base);
3051 mcp_func->total_bytes_received_hi = (u32_t)U64_HI( val ) ;
3052 mcp_func->total_bytes_received_lo = (u32_t)U64_LO( val ) ;
3053 }
3054
3055 stats_type = LM_STATS_BYTES_XMIT ;
3056 lm_status = lm_get_stats( pdev, stats_type, &val
3057 #ifdef VF_INVOLVED
3058 ,NULL
3059 #endif
3060 ) ;
3061 if ERR_IF( LM_STATUS_SUCCESS != lm_status )
3062 {
3063 DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
3064 }
3065 else
3066 {
3067 val+= LM_STATS_HI_LO_TO_64(mcp_func_base->total_bytes_transmitted, val_base);
3068 mcp_func->total_bytes_transmitted_hi = (u32_t)U64_HI( val ) ;
3069 mcp_func->total_bytes_transmitted_lo = (u32_t)U64_LO( val ) ;
3070 }
3071
3072 stats_type = LM_STATS_UNICAST_FRAMES_RCV ;
3073 lm_status = lm_get_stats( pdev, stats_type, &val
3074 #ifdef VF_INVOLVED
3075 ,NULL
3076 #endif
3077 ) ;
3078 if ERR_IF( LM_STATUS_SUCCESS != lm_status )
3079 {
3080 DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
3081 }
3082 else
3083 {
3084 val+= LM_STATS_HI_LO_TO_64(mcp_func_base->total_unicast_packets_received, val_base);
3085 mcp_func->total_unicast_packets_received_hi = (u32_t)U64_HI( val ) ;
3086 mcp_func->total_unicast_packets_received_lo = (u32_t)U64_LO( val ) ;
3087 }
3088
3089 stats_type = LM_STATS_MULTICAST_FRAMES_RCV ;
3090 lm_status = lm_get_stats( pdev, stats_type, &val
3091 #ifdef VF_INVOLVED
3092 ,NULL
3093 #endif
3094 ) ;
3095 if ERR_IF( LM_STATUS_SUCCESS != lm_status )
3096 {
3097 DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
3098 }
3099 else
3100 {
3101 val+= LM_STATS_HI_LO_TO_64(mcp_func_base->total_multicast_packets_received, val_base);
3102 mcp_func->total_multicast_packets_received_hi = (u32_t)U64_HI( val ) ;
3103 mcp_func->total_multicast_packets_received_lo = (u32_t)U64_LO( val ) ;
3104 }
3105
3106 stats_type = LM_STATS_BROADCAST_FRAMES_RCV ;
3107 lm_status = lm_get_stats( pdev, stats_type, &val
3108 #ifdef VF_INVOLVED
3109 ,NULL
3110 #endif
3111 ) ;
3112 if ERR_IF( LM_STATUS_SUCCESS != lm_status )
3113 {
3114 DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
3115 }
3116 else
3117 {
3118 val+= LM_STATS_HI_LO_TO_64(mcp_func_base->total_broadcast_packets_received, val_base);
3119 mcp_func->total_broadcast_packets_received_hi = (u32_t)U64_HI( val ) ;
3120 mcp_func->total_broadcast_packets_received_lo = (u32_t)U64_LO( val ) ;
3121 }
3122
3123 stats_type = LM_STATS_UNICAST_FRAMES_XMIT ;
3124 lm_status = lm_get_stats( pdev, stats_type, &val
3125 #ifdef VF_INVOLVED
3126 ,NULL
3127 #endif
3128 ) ;
3129 if ERR_IF( LM_STATUS_SUCCESS != lm_status )
3130 {
3131 DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
3132 }
3133 else
3134 {
3135 val+= LM_STATS_HI_LO_TO_64(mcp_func_base->total_unicast_packets_transmitted, val_base);
3136 mcp_func->total_unicast_packets_transmitted_hi = (u32_t)U64_HI( val ) ;
3137 mcp_func->total_unicast_packets_transmitted_lo = (u32_t)U64_LO( val ) ;
3138 }
3139
3140 stats_type = LM_STATS_MULTICAST_FRAMES_XMIT ;
3141 lm_status = lm_get_stats( pdev, stats_type, &val
3142 #ifdef VF_INVOLVED
3143 ,NULL
3144 #endif
3145 ) ;
3146 if ERR_IF( LM_STATUS_SUCCESS != lm_status )
3147 {
3148 DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
3149 }
3150 else
3151 {
3152 val+= LM_STATS_HI_LO_TO_64(mcp_func_base->total_multicast_packets_transmitted, val_base);
3153 mcp_func->total_multicast_packets_transmitted_hi = (u32_t)U64_HI( val ) ;
3154 mcp_func->total_multicast_packets_transmitted_lo = (u32_t)U64_LO( val ) ;
3155 }
3156
3157 stats_type = LM_STATS_BROADCAST_FRAMES_XMIT ;
3158 lm_status = lm_get_stats( pdev, stats_type, &val
3159 #ifdef VF_INVOLVED
3160 ,NULL
3161 #endif
3162 ) ;
3163 if ERR_IF( LM_STATUS_SUCCESS != lm_status )
3164 {
3165 DbgMessage(pdev, WARNstat, "lm_stats_mcp_assign: lm_get_stats type=0x%X failed. lm_status=0x%X", stats_type, lm_status ) ;
3166 }
3167 else
3168 {
3169 val+= LM_STATS_HI_LO_TO_64(mcp_func_base->total_broadcast_packets_transmitted, val_base);
3170 mcp_func->total_broadcast_packets_transmitted_hi = (u32_t)U64_HI( val ) ;
3171 mcp_func->total_broadcast_packets_transmitted_lo = (u32_t)U64_LO( val ) ;
3172 }
3173
3174 // Calculate the size to be written through DMAE
3175 val = sizeof(pdev->vars.stats.stats_mirror.stats_mcp_func) ;
3176 val = val/sizeof(u32_t) ;
3177 mcp_func->host_func_stats_end = ++mcp_func->host_func_stats_start ;
3178
3179 // This code section must be under phy lock!
3180 REG_WR_DMAE_LEN(pdev,
3181 pdev->vars.fw_func_stats_ptr,
3182 mcp_func,
3183 (u16_t)val ) ;
3184
3185 } // lm_stats_mgmt_assign
3186
3187 /*
3188 *------------------------------------------------------------------------
3189 * lm_stats_mgmt_read_base -
3190 *
3191 * read values from mgmt structures into host_func_stats_t base structure
3192 * this is as a basic value that will be added when function report statistics
3193 * NOTE: function must be called under PHY_LOCK (since it uses REG_RD_DMAE interface)
3194 *------------------------------------------------------------------------
3195 */
lm_stats_mgmt_read_func_base(IN struct _lm_device_t * pdev)3196 static void lm_stats_mgmt_read_func_base( IN struct _lm_device_t* pdev )
3197 {
3198 u64_t val = 0 ;
3199 host_func_stats_t* mcp_func_base = NULL ;
3200
3201 if CHK_NULL(pdev)
3202 {
3203 return;
3204 }
3205
3206 if( 0 == pdev->vars.fw_func_stats_ptr )
3207 {
3208 return;
3209 }
3210
3211 if (GET_FLAGS(pdev->params.test_mode, TEST_MODE_NO_MCP ))
3212 {
3213 return;
3214 }
3215
3216 mcp_func_base = &pdev->vars.stats.stats_mirror.stats_mcp_func_base ;
3217
3218 val = sizeof(pdev->vars.stats.stats_mirror.stats_mcp_func_base) ;
3219 val = val/sizeof(u32_t) ;
3220
3221 // This code section must be under phy lock!
3222 REG_RD_DMAE_LEN(pdev,
3223 pdev->vars.fw_func_stats_ptr,
3224 mcp_func_base,
3225 (u16_t)val ) ;
3226
3227 } // lm_stats_mgmt_read_base
3228
3229
3230 /*
3231 *------------------------------------------------------------------------
3232 * lm_stats_mgmt_clear_all_func -
3233 *
3234 * clear mgmt statistics for all function
3235 * should be called on init port part. first function should clear all other functions mail box
3236 * NOTE: function must be called under PHY_LOCK (since it uses REG_WR_DMAE interface)
3237 *------------------------------------------------------------------------
3238 */
lm_stats_mgmt_clear_all_func(IN struct _lm_device_t * pdev)3239 static void lm_stats_mgmt_clear_all_func( IN struct _lm_device_t* pdev )
3240 {
3241 u64_t val = 0 ;
3242 u8_t func = 0;
3243 u32_t fw_func_stats_ptr = 0;
3244
3245 // use current pdev stats_mcp_func for all function - (zeroed buffer)
3246 val = sizeof(pdev->vars.stats.stats_mirror.stats_mcp_func);
3247 mm_mem_zero(&pdev->vars.stats.stats_mirror.stats_mcp_func, (u32_t)val );
3248
3249 val = val/sizeof(u32_t) ;
3250
3251 LM_FOREACH_FUNC_MAILBOX_IN_PORT(pdev,func)
3252 {
3253 lm_setup_read_mgmt_stats_ptr(pdev, func, NULL, &fw_func_stats_ptr );
3254
3255 if( 0 != fw_func_stats_ptr )
3256 {
3257
3258 // This code section must be under phy lock!
3259 // writes zero
3260 REG_WR_DMAE_LEN(pdev,
3261 fw_func_stats_ptr,
3262 &pdev->vars.stats.stats_mirror.stats_mcp_func,
3263 (u16_t)val ) ;
3264 }
3265 if(CHIP_IS_E1(pdev) || (!CHIP_IS_E1x(pdev) && (CHIP_PORT_MODE(pdev) == LM_CHIP_PORT_MODE_4)))
3266 {
3267 // only one iteration functionand one for E1 !
3268 break;
3269 }
3270 }
3271 } // lm_stats_mgmt_clear_all_func
3272
3273 /*
3274 *Function Name:lm_stats_port_to_from
3275 *
3276 *Parameters:
3277 * b_is_to - determine is it operation to/from MCP
3278 * b_is_to TRUE - to MCP
3279 * b_is_to FLASE - from MCP
3280 *Description:
3281 * Helper function in order to set stats to/from mcp to driver host when swithcing PMF's
3282 *
3283 *Returns:
3284 *
3285 */
lm_stats_port_to_from(IN OUT struct _lm_device_t * pdev,u8_t b_is_to)3286 void lm_stats_port_to_from( IN OUT struct _lm_device_t* pdev, u8_t b_is_to )
3287 {
3288 host_port_stats_t* mcp_port = NULL ;
3289 lm_stats_hw_t* stats_hw = NULL ;
3290 stats_macs_idx_t stats_macs_idx = STATS_MACS_IDX_MAX ;
3291 u8_t i = 0 ;
3292
3293 mcp_port = &pdev->vars.stats.stats_mirror.stats_mcp_port ;
3294 stats_hw = &pdev->vars.stats.stats_mirror.stats_hw ;
3295
3296 ASSERT_STATIC( STATS_MACS_IDX_MAX == MAC_STX_IDX_MAX );
3297 ASSERT_STATIC( STATS_MACS_IDX_CURRENT < STATS_MACS_IDX_TOTAL );
3298
3299
3300 // B/EMAC is up:
3301 // OLD PMF:
3302 // copy all EMAC 'reset' to 'total'
3303 //
3304 // NEW PMF:
3305 // copy all EMAC 'total' to 'reset'
3306 //
3307 // NONE is up:
3308 // copy only 'reset' to 'total'
3309
3310 switch( pdev->vars.mac_type )
3311 {
3312 case MAC_TYPE_EMAC:
3313 case MAC_TYPE_BMAC:
3314 case MAC_TYPE_UMAC:
3315 case MAC_TYPE_XMAC:
3316 stats_macs_idx = STATS_MACS_IDX_CURRENT ;
3317 break;
3318
3319 case MAC_TYPE_NONE:
3320 stats_macs_idx = STATS_MACS_IDX_TOTAL ;
3321 break;
3322
3323 default:
3324 DbgBreakMsg( "mac_type not acceptable" ) ;
3325 return;
3326 }
3327
3328 #define LM_STATS_PMF_TO_FROM( _mcp_field, _hw_field, _b_is_to ) \
3329 if( _b_is_to )\
3330 { \
3331 LM_STATS_64_TO_HI_LO( stats_hw->macs[i]._hw_field, mcp_port->mac_stx[i]._mcp_field );\
3332 } \
3333 else \
3334 { \
3335 LM_STATS_HI_LO_TO_64( mcp_port->mac_stx[i]._mcp_field, stats_hw->macs[i]._hw_field ) ;\
3336 }
3337
3338
3339 for( i = stats_macs_idx; i < STATS_MACS_IDX_MAX; i++ )
3340 {
3341 LM_STATS_PMF_TO_FROM( rx_stat_dot3statsfcserrors, stats_rx.rx_stat_dot3statsfcserrors, b_is_to ) ;
3342 LM_STATS_PMF_TO_FROM( rx_stat_dot3statsalignmenterrors, stats_rx.rx_stat_dot3statsalignmenterrors, b_is_to ) ; // BMAC 0
3343 LM_STATS_PMF_TO_FROM( rx_stat_dot3statscarriersenseerrors, stats_rx.rx_stat_dot3statscarriersenseerrors, b_is_to ) ; // BMAC 0
3344 LM_STATS_PMF_TO_FROM( rx_stat_etherstatsundersizepkts, stats_rx.rx_stat_etherstatsundersizepkts, b_is_to ) ;
3345
3346 // Exception - don't migrate this parameter (mandatory NDIS parameter)
3347 //LM_STATS_PMF_TO_FROM( rx_stat_dot3statsframestoolong, stats_rx.rx_stat_dot3statsframestoolong, b_is_to ) ;
3348
3349 LM_STATS_PMF_TO_FROM( rx_stat_xonpauseframesreceived, stats_rx.rx_stat_xonpauseframesreceived, b_is_to ) ; // BMAC 0
3350 LM_STATS_PMF_TO_FROM( rx_stat_xoffpauseframesreceived, stats_rx.rx_stat_xoffpauseframesreceived, b_is_to ) ;
3351 LM_STATS_PMF_TO_FROM( tx_stat_outxonsent, stats_tx.tx_stat_outxonsent, b_is_to ) ; // BMAC 0
3352 LM_STATS_PMF_TO_FROM( tx_stat_outxoffsent, stats_tx.tx_stat_outxoffsent, b_is_to ) ;
3353 LM_STATS_PMF_TO_FROM( tx_stat_dot3statssinglecollisionframes, stats_tx.tx_stat_dot3statssinglecollisionframes, b_is_to ) ; // BMAC 0
3354 LM_STATS_PMF_TO_FROM( tx_stat_dot3statsmultiplecollisionframes, stats_tx.tx_stat_dot3statsmultiplecollisionframes, b_is_to ) ; // BMAC 0
3355 LM_STATS_PMF_TO_FROM( tx_stat_dot3statslatecollisions, stats_tx.tx_stat_dot3statslatecollisions, b_is_to ) ; // BMAC 0
3356 LM_STATS_PMF_TO_FROM( tx_stat_dot3statsexcessivecollisions, stats_tx.tx_stat_dot3statsexcessivecollisions, b_is_to ) ; // BMAC 0
3357 LM_STATS_PMF_TO_FROM( rx_stat_maccontrolframesreceived, stats_rx.rx_stat_maccontrolframesreceived, b_is_to ) ;
3358
3359 LM_STATS_PMF_TO_FROM( rx_stat_mac_xpf, stats_rx.rx_stat_maccontrolframesreceived_bmac_xpf, b_is_to ) ; // EMAC 0 BMAC only
3360 LM_STATS_PMF_TO_FROM( rx_stat_mac_xcf, stats_rx.rx_stat_maccontrolframesreceived_bmac_xcf, b_is_to ) ; // EMAC 0 BMAC only
3361
3362 LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts64octets, stats_tx.tx_stat_etherstatspkts64octets, b_is_to ) ;
3363 LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts65octetsto127octets, stats_tx.tx_stat_etherstatspkts65octetsto127octets, b_is_to ) ;
3364 LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts128octetsto255octets, stats_tx.tx_stat_etherstatspkts128octetsto255octets, b_is_to ) ;
3365 LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts256octetsto511octets, stats_tx.tx_stat_etherstatspkts256octetsto511octets, b_is_to ) ;
3366 LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts512octetsto1023octets, stats_tx.tx_stat_etherstatspkts512octetsto1023octets, b_is_to ) ;
3367 LM_STATS_PMF_TO_FROM( tx_stat_etherstatspkts1024octetsto1522octets, stats_tx.tx_stat_etherstatspkts1024octetsto1522octet, b_is_to ) ;
3368 LM_STATS_PMF_TO_FROM( tx_stat_etherstatspktsover1522octets, stats_tx.tx_stat_etherstatspktsover1522octets, b_is_to ) ;
3369
3370
3371 LM_STATS_PMF_TO_FROM( tx_stat_mac_2047, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_2047, b_is_to ) ; // EMAC 0 BMAC only
3372 LM_STATS_PMF_TO_FROM( tx_stat_mac_4095, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_4095, b_is_to ) ; // EMAC 0 BMAC only
3373 LM_STATS_PMF_TO_FROM( tx_stat_mac_9216, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_9216, b_is_to ) ; // EMAC 0 BMAC only
3374 LM_STATS_PMF_TO_FROM( tx_stat_mac_16383, stats_tx.tx_stat_etherstatspktsover1522octets_bmac_16383, b_is_to ) ; // EMAC 0 BMAC only
3375
3376 LM_STATS_PMF_TO_FROM( rx_stat_etherstatsfragments, stats_rx.rx_stat_etherstatsfragments, b_is_to ) ;
3377 LM_STATS_PMF_TO_FROM( rx_stat_etherstatsjabbers, stats_rx.rx_stat_etherstatsjabbers, b_is_to ) ;
3378 LM_STATS_PMF_TO_FROM( tx_stat_dot3statsdeferredtransmissions, stats_tx.tx_stat_dot3statsdeferredtransmissions, b_is_to ) ; // BMAC 0
3379 LM_STATS_PMF_TO_FROM( tx_stat_dot3statsinternalmactransmiterrors, stats_tx.tx_stat_dot3statsinternalmactransmiterrors, b_is_to ) ;
3380 LM_STATS_PMF_TO_FROM( tx_stat_etherstatscollisions, stats_tx.tx_stat_etherstatscollisions, b_is_to ) ; // BMAC 0
3381 LM_STATS_PMF_TO_FROM( tx_stat_flowcontroldone, stats_tx.tx_stat_flowcontroldone, b_is_to ) ;
3382 LM_STATS_PMF_TO_FROM( rx_stat_xoffstateentered, stats_rx.rx_stat_xoffstateentered, b_is_to ) ;
3383 LM_STATS_PMF_TO_FROM( rx_stat_ifhcinbadoctets, stats_rx.rx_stat_ifhcinbadoctets, b_is_to ) ;
3384 LM_STATS_PMF_TO_FROM( tx_stat_ifhcoutbadoctets, stats_tx.tx_stat_ifhcoutbadoctets, b_is_to ) ; // BMAC 0
3385 LM_STATS_PMF_TO_FROM( tx_stat_mac_ufl, stats_tx.tx_stat_ifhcoutdiscards, b_is_to ) ; // EMAC 0
3386 LM_STATS_PMF_TO_FROM( rx_stat_dot3statscarriersenseerrors, stats_rx.rx_stat_dot3statscarriersenseerrors, b_is_to ) ; // BMAC 0
3387 }
3388
3389 // NIG, MSTAT and EEE
3390 if( b_is_to)
3391 {
3392 LM_STATS_64_TO_HI_LO( stats_hw->nig.brb_discard, mcp_port->brb_drop ) ;
3393
3394 LM_STATS_64_TO_HI_LO( stats_hw->macs->stats_tx.tx_stat_pfcPacketCounter, mcp_port->pfc_frames_tx );
3395 LM_STATS_64_TO_HI_LO( stats_hw->macs->stats_rx.rx_stat_pfcPacketCounter, mcp_port->pfc_frames_rx );
3396
3397 LM_STATS_64_TO_HI_LO( stats_hw->misc.tx_lpi_count, mcp_port->eee_lpi_count);
3398 }
3399 else
3400 {
3401 LM_STATS_HI_LO_TO_64( mcp_port->brb_drop, stats_hw->nig.brb_discard ) ;
3402
3403 LM_STATS_HI_LO_TO_64( mcp_port->pfc_frames_tx, stats_hw->macs->stats_tx.tx_stat_pfcPacketCounter );
3404 LM_STATS_HI_LO_TO_64( mcp_port->pfc_frames_rx, stats_hw->macs->stats_rx.rx_stat_pfcPacketCounter );
3405
3406 LM_STATS_HI_LO_TO_64( mcp_port->eee_lpi_count, stats_hw->misc.tx_lpi_count);
3407 }
3408
3409 }
3410
3411 /*
3412 * \brief Calculate MCP status port size
3413 *
3414 * Calculate the size to be written.
3415 *
3416 * This logic is required as b10_l2_chip_statistics_t may increase in size
3417 * (due to driver change), while MCP area reserved does not follow suit
3418 * (as is the case, for example, when the driver and MFW do not version-
3419 * match).
3420 *
3421 * This logic calculates the size available based on MFW version, and an
3422 * additional shmem item added to specifically report size available, thus
3423 * making future changes to statistics MCP size proof.
3424 *
3425 */
3426
lm_stats_port_size(IN struct _lm_device_t * pdev)3427 STATIC u16_t lm_stats_port_size(IN struct _lm_device_t *pdev)
3428 {
3429 const u32_t bc_rev_major = LM_GET_BC_REV_MAJOR(pdev);
3430 const u8_t b_bc_pfc_support = bc_rev_major >= REQ_BC_VER_4_PFC_STATS_SUPPORTED;
3431 size_t sizeof_port_stats = 0;
3432 u32_t sizeof_port_satas_shmem = 0;
3433
3434 if (LM_SHMEM2_HAS(pdev,sizeof_port_stats))
3435 {
3436 LM_SHMEM2_READ(pdev,OFFSETOF(struct shmem2_region, sizeof_port_stats), &sizeof_port_satas_shmem);
3437
3438 sizeof_port_stats = min((size_t)sizeof_port_satas_shmem, sizeof(pdev->vars.stats.stats_mirror.stats_mcp_port));
3439 }
3440 else
3441 {
3442 if (b_bc_pfc_support)
3443 {
3444 // "pfc_frames_rx_lo" is the last member of host_port_stats_t for that MFW version.
3445
3446 sizeof_port_stats = OFFSETOF(host_port_stats_t, pfc_frames_rx_lo) +
3447 sizeof(pdev->vars.stats.stats_mirror.stats_mcp_port.pfc_frames_rx_lo);
3448 }
3449 else
3450 {
3451 // "not_used" is the last member of host_port_stats_t for that MFW version.
3452
3453 sizeof_port_stats = OFFSETOF(host_port_stats_t, not_used ) +
3454 sizeof(pdev->vars.stats.stats_mirror.stats_mcp_port.not_used);
3455 }
3456 }
3457
3458 sizeof_port_stats /= sizeof(u32_t) ;
3459
3460 /*
3461 * we are returning only 16 bits of the size calculated. Check (CHK version only) if the size
3462 * is too big to be held in 16 bits, which either indicate an error wrt size, or DMAE
3463 * about to be provided with a task too big.
3464 */
3465
3466 DbgBreakIf( sizeof_port_stats >= 1u<<(sizeof(u16_t)*8) );
3467
3468 return (u16_t)sizeof_port_stats;
3469 }
3470
3471 /*
3472 *Function Name:lm_stats_port_zero
3473 *
3474 *Parameters:
3475 *
3476 *Description:
3477 * This function should be called by first function on port (PMF) - zeros MCP scatrch pad
3478 *Returns:
3479 *
3480 */
lm_stats_port_zero(IN struct _lm_device_t * pdev)3481 lm_status_t lm_stats_port_zero( IN struct _lm_device_t* pdev )
3482 {
3483 u16_t size = 0 ;
3484 lm_status_t lm_status = LM_STATUS_SUCCESS ;
3485
3486 if( 0 == pdev->vars.fw_port_stats_ptr )
3487 {
3488 /* This could happen and therefore is not considered an error */
3489 return LM_STATUS_SUCCESS;
3490 }
3491
3492 // Calculate the size to be written through DMAE
3493 size = lm_stats_port_size(pdev);
3494
3495 // This code section must be under phy lock!
3496 REG_WR_DMAE_LEN_ZERO(pdev,
3497 pdev->vars.fw_port_stats_ptr,
3498 size ) ;
3499
3500 return lm_status ;
3501 }
3502
3503 /*
3504 *Function Name:lm_stats_port_save
3505 *
3506 *Parameters:
3507 *
3508 *Description:
3509 * This function should be called before PMF is unloaded in order to preserve statitiscs for the next PMF
3510 * ASSUMPTION: function must be called under PHY_LOCK (since it uses REG_WR_DMAE interface)
3511 * ASSUMPTION: link can not change at this point and until PMF is down
3512 *Returns:
3513 *
3514 */
lm_stats_port_save(IN struct _lm_device_t * pdev)3515 lm_status_t lm_stats_port_save( IN struct _lm_device_t* pdev )
3516 {
3517 u16_t size = 0 ;
3518 lm_status_t lm_status = LM_STATUS_SUCCESS ;
3519 host_port_stats_t* mcp_port = NULL ;
3520
3521 if( 0 == pdev->vars.fw_port_stats_ptr )
3522 {
3523 /* This could happen and therefore is not considered an error */
3524 return LM_STATUS_SUCCESS;
3525 }
3526
3527 lm_stats_port_to_from( pdev, TRUE ) ;
3528
3529 // Calculate the size to be written through DMAE
3530 size = lm_stats_port_size(pdev);
3531
3532 mcp_port = &pdev->vars.stats.stats_mirror.stats_mcp_port ;
3533 mcp_port->not_used = ++mcp_port->host_port_stats_counter ;
3534
3535 // This code section must be under phy lock!
3536 REG_WR_DMAE_LEN(pdev,
3537 pdev->vars.fw_port_stats_ptr,
3538 mcp_port,
3539 size ) ;
3540
3541 return lm_status ;
3542 }
3543
3544 /*
3545 *Function Name:lm_stats_port_load
3546 *
3547 *Parameters:
3548 *
3549 *Description:
3550 * This function should be called before a new PMF is loaded in order to restore statitiscs from the previous PMF
3551 * vars.is_pmf should be set to TRUE only after this function completed!
3552 * ASSUMPTION: function must be called under PHY_LOCK (since it uses REG_RD_DMAE interface)
3553 * ASSUMPTION: link can not change at this point and until PMF is up
3554 *Returns:
3555 *
3556 */
lm_stats_port_load(IN struct _lm_device_t * pdev)3557 lm_status_t lm_stats_port_load( IN struct _lm_device_t* pdev )
3558 {
3559 u16_t size = 0 ;
3560 lm_status_t lm_status = LM_STATUS_SUCCESS ;
3561 host_port_stats_t* mcp_port = NULL ;
3562
3563 if( 0 == pdev->vars.fw_port_stats_ptr )
3564 {
3565 /* This could happen and therefore is not considered an error */
3566 return LM_STATUS_SUCCESS;
3567 }
3568
3569 // Calculate the size to be written through DMAE
3570 size = lm_stats_port_size(pdev);
3571
3572 mcp_port = &pdev->vars.stats.stats_mirror.stats_mcp_port ;
3573 mcp_port->not_used = ++mcp_port->host_port_stats_counter ;
3574
3575 // This code section must be under phy lock!
3576 REG_RD_DMAE_LEN(pdev,
3577 pdev->vars.fw_port_stats_ptr,
3578 mcp_port,
3579 size ) ;
3580
3581 lm_stats_port_to_from( pdev, FALSE ) ;
3582
3583 return lm_status ;
3584 }
3585
3586 /*
3587 *------------------------------------------------------------------------
3588 * lm_stats_mgmt_assign
3589 *
3590 * write values from mgmt structures into func and port base structure
3591 * NOTE: function must be called under PHY_LOCK (since it uses REG_RD_DMAE interface)
3592 *------------------------------------------------------------------------
3593 */
lm_stats_mgmt_assign(IN struct _lm_device_t * pdev)3594 void lm_stats_mgmt_assign( IN struct _lm_device_t* pdev )
3595 {
3596 if CHK_NULL(pdev)
3597 {
3598 return;
3599 }
3600
3601 if ( GET_FLAGS(pdev->params.test_mode, TEST_MODE_NO_MCP ) )
3602 {
3603 return;
3604 }
3605
3606 if( pdev->vars.fw_func_stats_ptr )
3607 {
3608 lm_stats_mgmt_assign_func(pdev);
3609 }
3610 if( pdev->vars.fw_port_stats_ptr )
3611 {
3612 // only PMF should assign port statistics
3613 if( IS_PMF(pdev) )
3614 {
3615 lm_stats_port_save(pdev);
3616 }
3617 }
3618 }
3619
3620 /*
3621 *Function Name:lm_stats_on_pmf_update
3622 *
3623 *Parameters:
3624 * b_on:
3625 * TRUE - the device is beocming now a PMF
3626 * FALSE - the device is now going down and transfering PMF to another device
3627 *Description:
3628 * the function should be called under PHY LOCK.
3629 * TRUE when a device becoming a PMF and before the link status changed from last state when previous PMF was down after call for mcp driver load
3630 * FALSE when a device going down and after the link status saved and can not be changed (interrupts are disabled) before call for mcp driver unload
3631 *Returns:
3632 *
3633 */
lm_stats_on_pmf_update(struct _lm_device_t * pdev,IN u8_t b_on)3634 lm_status_t lm_stats_on_pmf_update( struct _lm_device_t* pdev, IN u8_t b_on )
3635 {
3636 lm_status_t lm_status = LM_STATUS_SUCCESS ;
3637
3638 if CHK_NULL(pdev)
3639 {
3640 return LM_STATUS_INVALID_PARAMETER;
3641 }
3642
3643 if( b_on )
3644 {
3645 lm_status = lm_stats_port_load( pdev );
3646 }
3647 else
3648 {
3649 lm_status = lm_stats_on_update_state(pdev);
3650
3651 // check for success, but link down is a valid situation!
3652 DbgBreakIf( ( LM_STATUS_SUCCESS != lm_status ) && ( LM_STATUS_LINK_DOWN != lm_status ) );
3653
3654 // we need to save port stats only if link is down
3655 // if link is up, it was already made on call to lm_stats_on_update_state.
3656 if( LM_STATUS_LINK_DOWN == lm_status )
3657 {
3658 lm_status = lm_stats_port_save( pdev );
3659 }
3660 }
3661 return lm_status ;
3662 }
3663 /*
3664 *Function Name:lm_stats_on_pmf_init
3665 *
3666 *Parameters:
3667 *
3668 *Description:
3669 * call this function under PHY LOCK when FIRST ever PMF is on
3670 *Returns:
3671 *
3672 */
lm_stats_on_pmf_init(struct _lm_device_t * pdev)3673 lm_status_t lm_stats_on_pmf_init( struct _lm_device_t* pdev )
3674 {
3675 lm_status_t lm_status = LM_STATUS_SUCCESS ;
3676 if CHK_NULL(pdev)
3677 {
3678 return LM_STATUS_INVALID_PARAMETER;
3679 }
3680
3681 lm_status = lm_stats_port_zero( pdev ) ;
3682
3683 return lm_status ;
3684
3685 }
3686
lm_stats_hw_collect(struct _lm_device_t * pdev)3687 lm_status_t lm_stats_hw_collect( struct _lm_device_t* pdev )
3688 {
3689 lm_status_t lm_status = LM_STATUS_SUCCESS;
3690 u8_t port = PORT_ID(pdev);
3691 const u32_t pkt0 = port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : NIG_REG_STAT0_EGRESS_MAC_PKT0 ;
3692 const u32_t pkt1 = port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : NIG_REG_STAT0_EGRESS_MAC_PKT1 ;
3693 const u32_t eee = port ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1 : MISC_REG_CPMU_LP_SM_ENT_CNT_P0 ;
3694
3695 // call the dmae commands sequance
3696 lm_status = lm_stats_dmae( pdev ) ;
3697 if( LM_STATUS_SUCCESS != lm_status )
3698 {
3699 return lm_status;
3700 }
3701
3702 // read two more NIG registers in the regular way - on E3 these do not exist!!!
3703 if (!CHIP_IS_E3(pdev))
3704 {
3705 REG_RD_DMAE( pdev, pkt0, &pdev->vars.stats.stats_collect.stats_hw.nig_ex_stats_query.egress_mac_pkt0 );
3706 REG_RD_DMAE( pdev, pkt1, &pdev->vars.stats.stats_collect.stats_hw.nig_ex_stats_query.egress_mac_pkt1 );
3707 }
3708
3709 // EEE is only supported in E3 chip
3710 if (CHIP_IS_E3(pdev))
3711 {
3712 pdev->vars.stats.stats_collect.stats_hw.misc_stats_query.tx_lpi_count = REG_RD(pdev, eee);
3713 }
3714
3715 return lm_status ;
3716 }
3717
3718 /*
3719 *Function Name:lm_stats_init_port_part
3720 *
3721 *Parameters:
3722 *
3723 *Description:
3724 * call this function under PHY LOCK on port init
3725 *Returns:
3726 *
3727 */
lm_stats_init_port_part(struct _lm_device_t * pdev)3728 void lm_stats_init_port_part( struct _lm_device_t* pdev )
3729 {
3730 lm_stats_mgmt_clear_all_func(pdev);
3731 }
3732
3733 /*
3734 *Function Name:lm_stats_init_port_part
3735 *
3736 *Parameters:
3737 *
3738 *Description:
3739 * call this function under PHY LOCK on function init
3740 *Returns:
3741 *
3742 */
lm_stats_init_func_part(struct _lm_device_t * pdev)3743 void lm_stats_init_func_part( struct _lm_device_t* pdev )
3744 {
3745 if (IS_PMF(pdev) && IS_MULTI_VNIC(pdev))
3746 {
3747 lm_stats_on_pmf_init(pdev);
3748 }
3749 lm_stats_mgmt_read_func_base(pdev);
3750 }
3751