1 /*******************************************************************************
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Copyright 2014 QLogic Corporation
22 * The contents of this file are subject to the terms of the
23 * QLogic End User License (the "License").
24 * You may not use this file except in compliance with the License.
25 *
26 * You can obtain a copy of the License at
27 * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
28 * QLogic_End_User_Software_License.txt
29 * See the License for the specific language governing permissions
30 * and limitations under the License.
31 *
32 *
33 * Module Description:
34 *
35 *
36 * History:
37 * 11/29/10 Alon Elhanani Inception.
38 ******************************************************************************/
39
40 #include "lm5710.h"
41 #include "mcp_shmem.h"
42 #include "mac_stats.h"
43
lm_niv_set_loopback_mode_imp(struct _lm_device_t * pdev,IN const u8_t b_enable)44 static void lm_niv_set_loopback_mode_imp(struct _lm_device_t *pdev, IN const u8_t b_enable )
45 {
46 lm_status_t lm_status = LM_STATUS_SUCCESS;
47 struct function_update_data* data = LM_SLOWPATH(pdev, niv_function_update_data);
48 const lm_address_t data_phys = LM_SLOWPATH_PHYS(pdev, niv_function_update_data);
49 const niv_ramrod_state_t initial_state = b_enable ? NIV_RAMROD_SET_LOOPBACK_POSTED :NIV_RAMROD_CLEAR_LOOPBACK_POSTED ;
50
51 data->vif_id_change_flg = FALSE;
52 data->afex_default_vlan_change_flg = TRUE;
53 data->afex_default_vlan = mm_cpu_to_le16(NIV_DEFAULT_VLAN(pdev));
54 data->allowed_priorities_change_flg = TRUE;
55 data->allowed_priorities = NIV_ALLOWED_PRIORITIES(pdev);
56 data->network_cos_mode_change_flg = FALSE;
57
58 data->lb_mode_en = b_enable;
59 data->lb_mode_en_change_flg = 1;
60 data->echo = FUNC_UPDATE_RAMROD_SOURCE_NIV;
61
62 lm_status = lm_niv_post_command(pdev,RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, data_phys.as_u64, initial_state);
63 }
64
65 /**lm_niv_cli_update
66 * Update each client with new NIV default VLAN
67 *
68 * @param pdev the device to use
69 *
70 * @return lm_status_t LM_STATUS_SUCCESS on success, some other
71 * failure code on failure.
72 */
lm_niv_clients_update(IN lm_device_t * pdev)73 static lm_status_t lm_niv_clients_update(IN lm_device_t *pdev)
74 {
75 lm_status_t lm_status = LM_STATUS_FAILURE;
76 u16_t silent_vlan_value = NIV_DEFAULT_VLAN(pdev);
77 u16_t silent_vlan_mask = ETHERNET_VLAN_ID_MASK;
78 u8_t cid = 0;
79 u8_t client_id = 0;
80
81 if(FUNC_MF_CFG_AFEX_VLAN_TRUNK_TAG_NATIVE_MODE == AFEX_VLAN_MODE(pdev))
82 {
83 // In this mode FW should remove all VLANS
84 silent_vlan_value = 0;
85 silent_vlan_mask = 0;
86 }
87
88 /* init l2 client conn param with default mtu values */
89 for (cid = 0; cid < (LM_SB_CNT(pdev) + MAX_NON_RSS_CHAINS); cid++) //pdev->params.l2_cli_con_params
90 {
91 /* We want only Ethernet clients. For ethernet cid == client_id, we base the following check on that */
92 if((OOO_CID(pdev) != cid) && //For OOO_CID we don't want to strip the VLAN
93 (FWD_CID(pdev) != cid)) //The FWD_CID is TX only In T7.4 we should enable only for RX clients.
94 {
95 client_id = cid; // TODO: For ethernet client_id == cid... extra parameter added for terminology clearness incase this changes in the future.
96 lm_status = lm_update_eth_client(pdev, client_id, silent_vlan_value, silent_vlan_mask, 1, 1);
97
98 if((LM_STATUS_ABORTED != lm_status) &&
99 (LM_STATUS_SUCCESS != lm_status))
100 {
101 return lm_status;
102 }
103 }
104 }
105
106 return LM_STATUS_SUCCESS;
107 }
108
lm_niv_set_loopback_mode_enable(struct _lm_device_t * pdev)109 static void lm_niv_set_loopback_mode_enable(struct _lm_device_t *pdev)
110 {
111 lm_status_t lm_status = LM_STATUS_FAILURE;
112
113 lm_hardware_mf_info_t *mf_info = &pdev->hw_info.mf_info;
114
115 // loopback tests will use default vlan 0x1 must be a value diffrent from zero,
116 // TODO : ask Barak that DIAG test will change the value in SHMEM.
117 mf_info->default_vlan = 0x1;
118 mf_info->niv_allowed_priorities = 0xff;
119
120 lm_niv_set_loopback_mode_imp(pdev, TRUE);
121
122 lm_status = lm_niv_clients_update(pdev);
123
124 if (LM_STATUS_SUCCESS != lm_status)
125 {
126 DbgBreakMsg("lm_niv_cli_update failed ");
127 }
128 }
129
lm_niv_set_loopback_mode_disable(struct _lm_device_t * pdev)130 static void lm_niv_set_loopback_mode_disable(struct _lm_device_t *pdev)
131 {
132 lm_hardware_mf_info_t *mf_info = &pdev->hw_info.mf_info;
133
134 // loopback tests revert values (has no real effect except debugging)
135 mf_info->default_vlan = 0;
136 mf_info->niv_allowed_priorities = 0;
137
138 lm_niv_set_loopback_mode_imp(pdev, FALSE);
139 }
140
lm_niv_set_loopback_mode(struct _lm_device_t * pdev,IN const u8_t b_enable)141 lm_status_t lm_niv_set_loopback_mode(struct _lm_device_t *pdev, IN const u8_t b_enable)
142 {
143 lm_status_t lm_status = LM_STATUS_SUCCESS;
144
145 if (b_enable)
146 {
147 #ifdef EDIAG
148 lm_niv_set_loopback_mode_enable(pdev);
149 #else
150 lm_status = MM_REGISTER_LPME(pdev, lm_niv_set_loopback_mode_enable, TRUE, FALSE);
151 #endif
152 }
153 else
154 {
155 #ifdef EDIAG
156 lm_niv_set_loopback_mode_disable(pdev);
157 #else
158 lm_status = MM_REGISTER_LPME(pdev, lm_niv_set_loopback_mode_disable, TRUE, FALSE);
159 #endif
160 }
161
162 return lm_status;
163 }
164
165 /**lm_niv_vif_enable
166 * enable current function or change its parameters. This
167 * function must be run in PASSIVE IRQL.
168 *
169 * @param pdev the device to use
170 *
171 * @return lm_status_t LM_STATUS_SUCCESS on success, some other
172 * failure code on failure.
173 */
lm_niv_vif_enable(lm_device_t * pdev)174 static lm_status_t lm_niv_vif_enable(lm_device_t *pdev)
175 {
176 lm_status_t lm_status = LM_STATUS_FAILURE;
177 u16_t vif_id = 0;
178 u16_t default_vlan = 0;
179 u8_t allowed_priorities = 0;
180 const u32_t VLAN_PRIORITY_SHIFT = 13;
181
182 ///Refresh MF CFG values
183 lm_status = lm_get_shmem_mf_cfg_info_niv(pdev);
184
185 if (LM_STATUS_SUCCESS != lm_status)
186 {
187 return lm_status;
188 }
189
190 //Reconfigure rate-limit
191 MM_ACQUIRE_PHY_LOCK(pdev);
192 lm_reload_link_and_cmng(pdev);
193 MM_RELEASE_PHY_LOCK(pdev);
194
195 ///Send function-update ramrod and wait for completion
196 vif_id = VIF_ID(pdev);
197 default_vlan = NIV_DEFAULT_VLAN(pdev) | (NIV_DEFAULT_COS(pdev) << VLAN_PRIORITY_SHIFT);
198 allowed_priorities = NIV_ALLOWED_PRIORITIES(pdev);
199
200
201 lm_status = lm_niv_vif_update(pdev,vif_id, default_vlan, allowed_priorities);
202 if (LM_STATUS_SUCCESS != lm_status)
203 {
204 return lm_status;
205 }
206
207 /* init l2 client conn param with default mtu values */
208 lm_status = lm_niv_clients_update(pdev);
209 if (LM_STATUS_SUCCESS != lm_status)
210 {
211 DbgBreakMsg("lm_niv_cli_update failed ");
212 return lm_status;
213 }
214
215 ///notify "link-up" to miniport
216 MM_ACQUIRE_PHY_LOCK(pdev);
217 // cq64469 - verify that the link is up before reporting it as active to the miniport
218 if (pdev->vars.link.link_up)
219 {
220 pdev->vars.link_status = LM_STATUS_LINK_ACTIVE;
221 }
222 mm_indicate_link(pdev, pdev->vars.link_status, pdev->vars.medium);
223 MM_RELEASE_PHY_LOCK(pdev);
224
225 return lm_status;
226 }
227
228 /** lm_niv_vif_disable
229 * disable current function. This function must be run in
230 * PASSIVE IRQL.
231 *
232 * @param pdev the device to use
233 *
234 * @return lm_status_t LM_STATUS_SUCCESS on success, some other
235 * failure code on failure.
236 */
lm_niv_vif_disable(lm_device_t * pdev)237 static lm_status_t lm_niv_vif_disable(lm_device_t *pdev)
238 {
239 lm_status_t lm_status = LM_STATUS_FAILURE;
240
241 ///indicate "link-down"
242 MM_ACQUIRE_PHY_LOCK(pdev);
243
244 pdev->vars.link_status = LM_STATUS_LINK_DOWN;
245 mm_indicate_link(pdev, pdev->vars.link_status, pdev->vars.medium);
246
247 MM_RELEASE_PHY_LOCK(pdev);
248
249 ///Send function-update ramrod with vif_id=0xFFFF and wait for completion
250 lm_status = lm_niv_vif_update(pdev,INVALID_VIF_ID, 0, 0);
251 if (LM_STATUS_SUCCESS != lm_status)
252 {
253 return lm_status;
254 }
255
256 return lm_status;
257 }
258
259 /**lm_niv_vif_delete
260 * Delete current function. . This function must be run in
261 * PASSIVE IRQL.
262 *
263 * @param pdev the device to use
264 *
265 * @return lm_status_t LM_STATUS_SUCCESS on success, some other
266 * failure code on failure.
267 */
lm_niv_vif_delete(lm_device_t * pdev)268 static lm_status_t lm_niv_vif_delete(lm_device_t *pdev)
269 {
270 lm_status_t lm_status = LM_STATUS_FAILURE;
271
272 ///Send a vif-list ramrod with VIF_LIST_RULE_CLEAR_FUNC opcode and wait for completion
273 lm_status = lm_niv_vif_list_update(pdev, VIF_LIST_RULE_CLEAR_FUNC, 0/*list_index*/, 0/*func_bit_map*/ ,ABS_FUNC_ID(pdev)/*func_to_clear*/);
274 if (LM_STATUS_SUCCESS != lm_status)
275 {
276 DbgBreakMsg("Failed to clear VIF lists on VIF delete.\n");
277 return lm_status;
278 }
279
280 lm_status = lm_niv_vif_disable(pdev);
281 if (LM_STATUS_SUCCESS != lm_status)
282 {
283 DbgBreakMsg("Failed to disable VIF on VIF delete.\n");
284 return lm_status;
285 }
286
287 return lm_status;
288 }
289
290 #define NIV_STATS_ASSIGN_HI_LO(_field, _val) _field##_hi = U64_HI((_val));\
291 _field##_lo = U64_LO((_val));
292 /**lm_chip_stats_to_niv_stats
293 * Copy relevant fields from driver statistics to the format
294 * written to the SHMEM for NIV stats.
295 *
296 * @param pdev the device to take the stats from
297 * @param p_afex_stats the SHMEM structure
298 */
lm_niv_chip_stats_to_niv_stats(lm_device_t * pdev,OUT struct afex_stats * p_afex_stats)299 static void lm_niv_chip_stats_to_niv_stats(lm_device_t* pdev, OUT struct afex_stats* p_afex_stats)
300 {
301 b10_l2_chip_statistics_t stats = {0};
302 lm_stats_fw_t *fw_stats = &pdev->vars.stats.stats_mirror.stats_fw;
303 fcoe_stats_info_t *fcoe_stats_mfw = &pdev->vars.stats.stats_mirror.stats_drv.drv_info_to_mfw.fcoe_stats;
304 u64_t sum_64 = 0;
305
306 lm_stats_get_l2_chip_stats(pdev, &stats, L2_CHIP_STATISTICS_VER_NUM_1);
307
308 sum_64 = stats.IfHCOutUcastPkts + fw_stats->fcoe.fcoe_tx_pkt_cnt + (HILO_U64(fcoe_stats_mfw->tx_frames_hi, fcoe_stats_mfw->tx_frames_lo ));
309 NIV_STATS_ASSIGN_HI_LO(p_afex_stats->tx_unicast_frames, sum_64 );
310
311 sum_64 = stats.IfHCOutUcastOctets + fw_stats->fcoe.fcoe_tx_byte_cnt + (HILO_U64(fcoe_stats_mfw->tx_bytes_hi, fcoe_stats_mfw->tx_bytes_lo ));
312 NIV_STATS_ASSIGN_HI_LO(p_afex_stats->tx_unicast_bytes, sum_64 );
313
314 NIV_STATS_ASSIGN_HI_LO(p_afex_stats->tx_multicast_frames, stats.IfHCOutMulticastPkts );
315 NIV_STATS_ASSIGN_HI_LO(p_afex_stats->tx_multicast_bytes, stats.IfHCOutMulticastOctets );
316
317 NIV_STATS_ASSIGN_HI_LO(p_afex_stats->tx_broadcast_frames, stats.IfHCOutBroadcastPkts );
318 NIV_STATS_ASSIGN_HI_LO(p_afex_stats->tx_broadcast_bytes, stats.IfHCOutBroadcastOctets );
319
320 NIV_STATS_ASSIGN_HI_LO(p_afex_stats->tx_frames_discarded, 0 );
321
322 NIV_STATS_ASSIGN_HI_LO(p_afex_stats->tx_frames_dropped, fw_stats->eth_xstorm_common.client_statistics[LM_CLI_IDX_NDIS].error_drop_pkts);
323
324 sum_64 = stats.IfHCInUcastPkts + fw_stats->fcoe.fcoe_rx_pkt_cnt + (HILO_U64( fcoe_stats_mfw->rx_frames_hi, fcoe_stats_mfw->rx_frames_lo ));
325 NIV_STATS_ASSIGN_HI_LO(p_afex_stats->rx_unicast_frames, sum_64 );
326
327 sum_64 = stats.IfHCInUcastOctets + fw_stats->fcoe.fcoe_rx_byte_cnt + (HILO_U64( fcoe_stats_mfw->rx_bytes_hi, fcoe_stats_mfw->rx_bytes_lo ));
328 NIV_STATS_ASSIGN_HI_LO(p_afex_stats->rx_unicast_bytes, sum_64 );
329
330 NIV_STATS_ASSIGN_HI_LO(p_afex_stats->rx_multicast_frames, stats.IfHCInMulticastPkts );
331 NIV_STATS_ASSIGN_HI_LO(p_afex_stats->rx_multicast_bytes, stats.IfHCInMulticastOctets );
332
333 NIV_STATS_ASSIGN_HI_LO(p_afex_stats->rx_broadcast_frames, stats.IfHCInBroadcastPkts );
334 NIV_STATS_ASSIGN_HI_LO(p_afex_stats->rx_broadcast_bytes, stats.IfHCInBroadcastOctets );
335
336 NIV_STATS_ASSIGN_HI_LO(p_afex_stats->rx_frames_discarded, stats.IfInTTL0Discards +
337 stats.EtherStatsOverrsizePkts +
338 fw_stats->eth_tstorm_common.client_statistics[LM_CLI_IDX_NDIS].checksum_discard);
339
340 NIV_STATS_ASSIGN_HI_LO(p_afex_stats->rx_frames_dropped, stats.IfInMBUFDiscards+
341 fw_stats->fcoe.fcoe_rx_drop_pkt_cnt_tstorm +
342 fw_stats->fcoe.fcoe_rx_drop_pkt_cnt_ustorm );
343 }
344
345 /**lm_niv_stats_get
346 * Update NIV statistics in SHMEM. This function runs in PASSIVE
347 * IRQL as an LPME.
348 *
349 * @param pdev the device to use
350 */
lm_niv_stats_get(lm_device_t * pdev)351 static void lm_niv_stats_get(lm_device_t *pdev)
352 {
353 u32_t mcp_resp = 0;
354 u32_t output_offset = 0;
355 u32_t *field_ptr = NULL;
356 int bytes_written = 0;
357 const u32_t func_mailbox_id = FUNC_MAILBOX_ID(pdev);
358 const u32_t offset = OFFSETOF(shmem2_region_t, afex_scratchpad_addr_to_write[func_mailbox_id]);
359 struct afex_stats afex_stats_var = {0};
360
361 // verify that change in struct afex_stats won't corrupt our small stack
362 ASSERT_STATIC( sizeof(afex_stats_var) >= 100 );
363
364 lm_niv_chip_stats_to_niv_stats(pdev, &afex_stats_var);
365
366 ///Read from SHMEM2 the address where the response should be placed
367 LM_SHMEM2_READ(pdev, offset, &output_offset);
368
369 ///Write the response to the scratchpad field by field.
370 field_ptr = (u32_t*)&afex_stats_var;
371 for (bytes_written = 0; bytes_written < sizeof(afex_stats_var); bytes_written += sizeof(u32_t))
372 {
373 REG_WR(pdev, output_offset + bytes_written, *field_ptr);
374 ++field_ptr;
375 }
376 ///ACK the MCP message
377 lm_mcp_cmd_send_recieve(pdev, lm_mcp_mb_header, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0, MCP_CMD_DEFAULT_TIMEOUT, &mcp_resp);
378 DbgBreakIf(mcp_resp != FW_MSG_CODE_AFEX_STATSGET_ACK);
379 }
380
381 /**lm_niv_vif_list_set
382 * Modify local information about VIF lists. This function runs
383 * in PASSIVE IRQL as an LPME. (PMF only)
384 *
385 * @param pdev the device to use
386 */
lm_niv_vif_list_set(lm_device_t * pdev)387 static void lm_niv_vif_list_set(lm_device_t *pdev)
388 {
389 lm_status_t lm_status = LM_STATUS_FAILURE;
390 u32_t list_idx = 0;
391 u32_t list_bitmap = 0;
392 u32_t mcp_resp = 0;
393 const u32_t func_mailbox_id = FUNC_MAILBOX_ID(pdev);
394 u32_t offset = 0;
395
396 ///Read VIF list id+bitfield from SHMEM2
397 offset = OFFSETOF(struct shmem2_region, afex_param1_to_driver[func_mailbox_id]);
398 LM_SHMEM2_READ(pdev, offset, &list_idx);
399 DbgBreakIf(list_idx > 0xFFFF);
400
401 offset = OFFSETOF(struct shmem2_region, afex_param2_to_driver[func_mailbox_id]);
402 LM_SHMEM2_READ(pdev, offset, &list_bitmap);
403 DbgBreakIf(list_bitmap > 0xFF);
404
405 ///Send a vif-list ramrod with VIF_LIST_RULE_SET opcode and wait for completion
406 lm_status = lm_niv_vif_list_update(pdev, VIF_LIST_RULE_SET,(u16_t)list_idx, (u8_t)list_bitmap,0);
407 DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
408
409 ///ACK the MCP message
410 lm_mcp_cmd_send_recieve(pdev, lm_mcp_mb_header, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0, MCP_CMD_DEFAULT_TIMEOUT, &mcp_resp);
411 DbgBreakIf(mcp_resp != FW_MSG_CODE_AFEX_LISTSET_ACK);
412 }
413
414 /**lm_niv_vif_list_get
415 * Update NIV statistics in SHMEM. This function runs in PASSIVE
416 * IRQL as an LPME.
417 *
418 * @param pdev the device to use
419 *
420 */
lm_niv_vif_list_get(lm_device_t * pdev)421 static void lm_niv_vif_list_get(lm_device_t *pdev)
422 {
423 lm_status_t lm_status = LM_STATUS_FAILURE;
424 u32_t list_idx = 0;
425 u32_t mcp_resp = 0;
426 const u32_t func_mailbox_id = FUNC_MAILBOX_ID(pdev);
427 const u32_t offset = OFFSETOF(struct shmem2_region, afex_param1_to_driver[func_mailbox_id]);
428
429 ///Read list ID from SHMEM2
430 LM_SHMEM2_READ(pdev, offset, &list_idx);
431 DbgBreakIf(list_idx > 0xFFFF);
432
433 ///Send a vif-list ramrod with VIF_LIST_RULE_GET opcode and wait for completion
434 lm_status = lm_niv_vif_list_update(pdev, VIF_LIST_RULE_GET, (u16_t)list_idx, 0, 0);
435 DbgBreakIf (LM_STATUS_SUCCESS != lm_status);
436
437 ///Write response to SHMEM and ACK the MCP message
438 lm_mcp_cmd_send_recieve(pdev, lm_mcp_mb_header, DRV_MSG_CODE_AFEX_LISTGET_ACK, pdev->slowpath_info.last_vif_list_bitmap, MCP_CMD_DEFAULT_TIMEOUT, &mcp_resp);
439 DbgBreakIf(mcp_resp != FW_MSG_CODE_AFEX_LISTGET_ACK);
440 }
441
442 /**lm_niv_vif_set
443 * Handle a VIF-SET command. This function runs in PASSIVE IRQL
444 * as an LPME.
445 *
446 * @param pdev the device to use
447 */
lm_niv_vif_set(lm_device_t * pdev)448 static void lm_niv_vif_set(lm_device_t *pdev)
449 {
450 //lm_status_t lm_status = LM_STATUS_FAILURE;
451 u32_t func_mf_config = 0;
452 u32_t mcp_resp = 0;
453 u32_t val = 0;
454 const u32_t abs_func_id = ABS_FUNC_ID(pdev);
455 const u32_t offset = OFFSETOF(mf_cfg_t, func_mf_config[abs_func_id].config);
456
457 ///read FUNC-DISABLED and FUNC-DELETED from func_mf_cfg
458 LM_MFCFG_READ(pdev, offset, &func_mf_config);
459
460 pdev->hw_info.mf_info.func_mf_cfg = func_mf_config ;
461
462 ///if it's enable, call lm_niv_vif_enable
463 ///if it's disable, call lm_niv_vif_disable
464 ///if it's delete, call lm_niv_vif_delete
465 val = GET_FLAGS(func_mf_config, FUNC_MF_CFG_FUNC_DISABLED|FUNC_MF_CFG_FUNC_DELETED);
466 switch(val)
467 {
468 case FUNC_MF_CFG_FUNC_DISABLED:
469 {
470 lm_niv_vif_disable(pdev);
471 }
472 break;
473
474 case FUNC_MF_CFG_FUNC_DELETED|FUNC_MF_CFG_FUNC_DISABLED:
475 {
476 lm_niv_vif_delete(pdev);
477 }
478 break;
479
480 case 0: //neither=enabled
481 {
482 lm_niv_vif_enable(pdev);
483 }
484 break;
485
486 default:
487 {
488 DbgBreakIf(1);//invalid value - FUNC_DELETED without FUNC_DISABLED
489 }
490 break;
491 }
492
493 ///ACK the MCP message
494 lm_mcp_cmd_send_recieve(pdev, lm_mcp_mb_header, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0, MCP_CMD_DEFAULT_TIMEOUT, &mcp_resp);
495 DbgBreakIf(mcp_resp != FW_MSG_CODE_AFEX_VIFSET_ACK);
496 }
497
498 typedef struct _lm_niv_event_function_t
499 {
500 u32_t niv_event_flag;
501 void (*function)(lm_device_t*);
502 } lm_niv_event_function_t;
503
504 /**lm_niv_event
505 * handle a NIV-related MCP general attention by scheduling the
506 * appropriate work item.
507 *
508 * @param pdev the device to use
509 * @param niv_event the DRIVER_STATUS flags that the MCP sent.
510 * It's assumed that only NIV-related flags are
511 * set.
512 *
513 * @return lm_status_t LM_STATUS_SUCCESS on success, some other
514 * failure code on failure.
515 */
lm_niv_event(lm_device_t * pdev,const u32_t niv_event)516 lm_status_t lm_niv_event(lm_device_t *pdev, const u32_t niv_event)
517 {
518 lm_status_t lm_status = LM_STATUS_FAILURE;
519 u32_t event_idx = 0;
520 u32_t handled_events = 0;
521 u32_t cur_event = 0;
522 static const lm_niv_event_function_t event_functions_arr[] = { {DRV_STATUS_AFEX_VIFSET_REQ, lm_niv_vif_set},
523 {DRV_STATUS_AFEX_LISTGET_REQ, lm_niv_vif_list_get},
524 {DRV_STATUS_AFEX_LISTSET_REQ, lm_niv_vif_list_set},
525 {DRV_STATUS_AFEX_STATSGET_REQ, lm_niv_stats_get},
526 };
527
528 //for every possible flag: if it's set, schedule a WI with the associated function and set the same flag in handled_events
529 for (event_idx = 0; event_idx < ARRSIZE(event_functions_arr); ++event_idx)
530 {
531 cur_event = event_functions_arr[event_idx].niv_event_flag;
532
533 if (GET_FLAGS(niv_event, cur_event))
534 {
535 lm_status = MM_REGISTER_LPME(pdev, event_functions_arr[event_idx].function, TRUE, TRUE);
536 if (lm_status != LM_STATUS_SUCCESS)
537 {
538 DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
539 return lm_status;
540 }
541 SET_FLAGS(handled_events, cur_event);
542 }
543 }
544
545 //make sure there we no unknown events set.
546 if (handled_events != niv_event)
547 {
548 DbgBreakIf(handled_events != niv_event);
549 return LM_STATUS_INVALID_PARAMETER;
550 }
551
552 return lm_status;
553 }
554