1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3 * Copyright (C) 2020 Marvell International Ltd.
4 */
5
6 #include <linux/iopoll.h>
7
8 #include "aq_hw.h"
9 #include "aq_hw_utils.h"
10 #include "aq_nic.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl2_utils.h"
13 #include "hw_atl2_llh.h"
14 #include "hw_atl2_internal.h"
15
16 #define AQ_A2_FW_READ_TRY_MAX 1000
17
18 #define hw_atl2_shared_buffer_write(HW, ITEM, VARIABLE) \
19 {\
20 BUILD_BUG_ON_MSG((offsetof(struct fw_interface_in, ITEM) % \
21 sizeof(u32)) != 0,\
22 "Unaligned write " # ITEM);\
23 BUILD_BUG_ON_MSG((sizeof(VARIABLE) % sizeof(u32)) != 0,\
24 "Unaligned write length " # ITEM);\
25 hw_atl2_mif_shared_buf_write(HW,\
26 (offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\
27 (u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32));\
28 }
29
30 #define hw_atl2_shared_buffer_get(HW, ITEM, VARIABLE) \
31 {\
32 BUILD_BUG_ON_MSG((offsetof(struct fw_interface_in, ITEM) % \
33 sizeof(u32)) != 0,\
34 "Unaligned get " # ITEM);\
35 BUILD_BUG_ON_MSG((sizeof(VARIABLE) % sizeof(u32)) != 0,\
36 "Unaligned get length " # ITEM);\
37 hw_atl2_mif_shared_buf_get(HW, \
38 (offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\
39 (u32 *)&(VARIABLE), \
40 sizeof(VARIABLE) / sizeof(u32));\
41 }
42
43 /* This should never be used on non atomic fields,
44 * treat any > u32 read as non atomic.
45 */
46 #define hw_atl2_shared_buffer_read(HW, ITEM, VARIABLE) \
47 {\
48 BUILD_BUG_ON_MSG((offsetof(struct fw_interface_out, ITEM) % \
49 sizeof(u32)) != 0,\
50 "Unaligned read " # ITEM);\
51 BUILD_BUG_ON_MSG((sizeof(VARIABLE) % sizeof(u32)) != 0,\
52 "Unaligned read length " # ITEM);\
53 BUILD_BUG_ON_MSG(sizeof(VARIABLE) > sizeof(u32),\
54 "Non atomic read " # ITEM);\
55 hw_atl2_mif_shared_buf_read(HW, \
56 (offsetof(struct fw_interface_out, ITEM) / sizeof(u32)),\
57 (u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32));\
58 }
59
60 #define hw_atl2_shared_buffer_read_safe(HW, ITEM, DATA) \
61 ({\
62 BUILD_BUG_ON_MSG((offsetof(struct fw_interface_out, ITEM) % \
63 sizeof(u32)) != 0,\
64 "Unaligned read_safe " # ITEM);\
65 BUILD_BUG_ON_MSG((sizeof(((struct fw_interface_out *)0)->ITEM) % \
66 sizeof(u32)) != 0,\
67 "Unaligned read_safe length " # ITEM);\
68 hw_atl2_shared_buffer_read_block((HW), \
69 (offsetof(struct fw_interface_out, ITEM) / sizeof(u32)),\
70 sizeof(((struct fw_interface_out *)0)->ITEM) / sizeof(u32),\
71 (DATA));\
72 })
73
hw_atl2_shared_buffer_read_block(struct aq_hw_s * self,u32 offset,u32 dwords,void * data)74 static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self,
75 u32 offset, u32 dwords, void *data)
76 {
77 struct transaction_counter_s tid1, tid2;
78 int cnt = 0;
79
80 do {
81 do {
82 hw_atl2_shared_buffer_read(self, transaction_id, tid1);
83 cnt++;
84 if (cnt > AQ_A2_FW_READ_TRY_MAX)
85 return -ETIME;
86 if (tid1.transaction_cnt_a != tid1.transaction_cnt_b)
87 mdelay(1);
88 } while (tid1.transaction_cnt_a != tid1.transaction_cnt_b);
89
90 hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords);
91
92 hw_atl2_shared_buffer_read(self, transaction_id, tid2);
93
94 cnt++;
95 if (cnt > AQ_A2_FW_READ_TRY_MAX)
96 return -ETIME;
97 } while (tid2.transaction_cnt_a != tid2.transaction_cnt_b ||
98 tid1.transaction_cnt_a != tid2.transaction_cnt_a);
99
100 return 0;
101 }
102
hw_atl2_shared_buffer_finish_ack(struct aq_hw_s * self)103 static inline int hw_atl2_shared_buffer_finish_ack(struct aq_hw_s *self)
104 {
105 u32 val;
106 int err;
107
108 hw_atl2_mif_host_finished_write_set(self, 1U);
109 err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_finished_read_get,
110 self, val, val == 0U,
111 100, 100000U);
112 WARN(err, "hw_atl2_shared_buffer_finish_ack");
113
114 return err;
115 }
116
aq_a2_fw_init(struct aq_hw_s * self)117 static int aq_a2_fw_init(struct aq_hw_s *self)
118 {
119 struct link_control_s link_control;
120 u32 mtu;
121 u32 val;
122 int err;
123
124 hw_atl2_shared_buffer_get(self, link_control, link_control);
125 link_control.mode = AQ_HOST_MODE_ACTIVE;
126 hw_atl2_shared_buffer_write(self, link_control, link_control);
127
128 hw_atl2_shared_buffer_get(self, mtu, mtu);
129 mtu = HW_ATL2_MTU_JUMBO;
130 hw_atl2_shared_buffer_write(self, mtu, mtu);
131
132 hw_atl2_mif_host_finished_write_set(self, 1U);
133 err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_finished_read_get,
134 self, val, val == 0U,
135 100, 5000000U);
136 WARN(err, "hw_atl2_shared_buffer_finish_ack");
137
138 return err;
139 }
140
aq_a2_fw_deinit(struct aq_hw_s * self)141 static int aq_a2_fw_deinit(struct aq_hw_s *self)
142 {
143 struct link_control_s link_control;
144
145 hw_atl2_shared_buffer_get(self, link_control, link_control);
146 link_control.mode = AQ_HOST_MODE_SHUTDOWN;
147 hw_atl2_shared_buffer_write(self, link_control, link_control);
148
149 return hw_atl2_shared_buffer_finish_ack(self);
150 }
151
a2_link_speed_mask2fw(u32 speed,struct link_options_s * link_options)152 static void a2_link_speed_mask2fw(u32 speed,
153 struct link_options_s *link_options)
154 {
155 link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G);
156 link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G);
157 link_options->rate_N5G = link_options->rate_5G;
158 link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5);
159 link_options->rate_N2P5G = link_options->rate_2P5G;
160 link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G);
161 link_options->rate_100M = !!(speed & AQ_NIC_RATE_100M);
162 link_options->rate_10M = !!(speed & AQ_NIC_RATE_10M);
163
164 link_options->rate_1G_hd = !!(speed & AQ_NIC_RATE_1G_HALF);
165 link_options->rate_100M_hd = !!(speed & AQ_NIC_RATE_100M_HALF);
166 link_options->rate_10M_hd = !!(speed & AQ_NIC_RATE_10M_HALF);
167 }
168
a2_fw_dev_to_eee_mask(struct device_link_caps_s * device_link_caps)169 static u32 a2_fw_dev_to_eee_mask(struct device_link_caps_s *device_link_caps)
170 {
171 u32 rate = 0;
172
173 if (device_link_caps->eee_10G)
174 rate |= AQ_NIC_RATE_EEE_10G;
175 if (device_link_caps->eee_5G)
176 rate |= AQ_NIC_RATE_EEE_5G;
177 if (device_link_caps->eee_2P5G)
178 rate |= AQ_NIC_RATE_EEE_2G5;
179 if (device_link_caps->eee_1G)
180 rate |= AQ_NIC_RATE_EEE_1G;
181 if (device_link_caps->eee_100M)
182 rate |= AQ_NIC_RATE_EEE_100M;
183
184 return rate;
185 }
186
a2_fw_lkp_to_mask(struct lkp_link_caps_s * lkp_link_caps)187 static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps)
188 {
189 u32 rate = 0;
190
191 if (lkp_link_caps->rate_10G)
192 rate |= AQ_NIC_RATE_10G;
193 if (lkp_link_caps->rate_5G)
194 rate |= AQ_NIC_RATE_5G;
195 if (lkp_link_caps->rate_2P5G)
196 rate |= AQ_NIC_RATE_2G5;
197 if (lkp_link_caps->rate_1G)
198 rate |= AQ_NIC_RATE_1G;
199 if (lkp_link_caps->rate_1G_hd)
200 rate |= AQ_NIC_RATE_1G_HALF;
201 if (lkp_link_caps->rate_100M)
202 rate |= AQ_NIC_RATE_100M;
203 if (lkp_link_caps->rate_100M_hd)
204 rate |= AQ_NIC_RATE_100M_HALF;
205 if (lkp_link_caps->rate_10M)
206 rate |= AQ_NIC_RATE_10M;
207 if (lkp_link_caps->rate_10M_hd)
208 rate |= AQ_NIC_RATE_10M_HALF;
209
210 if (lkp_link_caps->eee_10G)
211 rate |= AQ_NIC_RATE_EEE_10G;
212 if (lkp_link_caps->eee_5G)
213 rate |= AQ_NIC_RATE_EEE_5G;
214 if (lkp_link_caps->eee_2P5G)
215 rate |= AQ_NIC_RATE_EEE_2G5;
216 if (lkp_link_caps->eee_1G)
217 rate |= AQ_NIC_RATE_EEE_1G;
218 if (lkp_link_caps->eee_100M)
219 rate |= AQ_NIC_RATE_EEE_100M;
220
221 return rate;
222 }
223
aq_a2_fw_set_link_speed(struct aq_hw_s * self,u32 speed)224 static int aq_a2_fw_set_link_speed(struct aq_hw_s *self, u32 speed)
225 {
226 struct link_options_s link_options;
227
228 hw_atl2_shared_buffer_get(self, link_options, link_options);
229 link_options.link_up = 1U;
230 a2_link_speed_mask2fw(speed, &link_options);
231 hw_atl2_shared_buffer_write(self, link_options, link_options);
232
233 return hw_atl2_shared_buffer_finish_ack(self);
234 }
235
aq_a2_fw_set_mpi_flow_control(struct aq_hw_s * self,struct link_options_s * link_options)236 static void aq_a2_fw_set_mpi_flow_control(struct aq_hw_s *self,
237 struct link_options_s *link_options)
238 {
239 u32 flow_control = self->aq_nic_cfg->fc.req;
240
241 link_options->pause_rx = !!(flow_control & AQ_NIC_FC_RX);
242 link_options->pause_tx = !!(flow_control & AQ_NIC_FC_TX);
243 }
244
aq_a2_fw_upd_eee_rate_bits(struct aq_hw_s * self,struct link_options_s * link_options,u32 eee_speeds)245 static void aq_a2_fw_upd_eee_rate_bits(struct aq_hw_s *self,
246 struct link_options_s *link_options,
247 u32 eee_speeds)
248 {
249 link_options->eee_10G = !!(eee_speeds & AQ_NIC_RATE_EEE_10G);
250 link_options->eee_5G = !!(eee_speeds & AQ_NIC_RATE_EEE_5G);
251 link_options->eee_2P5G = !!(eee_speeds & AQ_NIC_RATE_EEE_2G5);
252 link_options->eee_1G = !!(eee_speeds & AQ_NIC_RATE_EEE_1G);
253 link_options->eee_100M = !!(eee_speeds & AQ_NIC_RATE_EEE_100M);
254 }
255
aq_a2_fw_set_state(struct aq_hw_s * self,enum hal_atl_utils_fw_state_e state)256 static int aq_a2_fw_set_state(struct aq_hw_s *self,
257 enum hal_atl_utils_fw_state_e state)
258 {
259 struct link_options_s link_options;
260
261 hw_atl2_shared_buffer_get(self, link_options, link_options);
262
263 switch (state) {
264 case MPI_INIT:
265 link_options.link_up = 1U;
266 aq_a2_fw_upd_eee_rate_bits(self, &link_options,
267 self->aq_nic_cfg->eee_speeds);
268 aq_a2_fw_set_mpi_flow_control(self, &link_options);
269 break;
270 case MPI_DEINIT:
271 link_options.link_up = 0U;
272 break;
273 case MPI_RESET:
274 case MPI_POWER:
275 /* No actions */
276 break;
277 }
278
279 hw_atl2_shared_buffer_write(self, link_options, link_options);
280
281 return hw_atl2_shared_buffer_finish_ack(self);
282 }
283
aq_a2_fw_update_link_status(struct aq_hw_s * self)284 static int aq_a2_fw_update_link_status(struct aq_hw_s *self)
285 {
286 struct lkp_link_caps_s lkp_link_caps;
287 struct link_status_s link_status;
288
289 hw_atl2_shared_buffer_read(self, link_status, link_status);
290
291 switch (link_status.link_rate) {
292 case AQ_A2_FW_LINK_RATE_10G:
293 self->aq_link_status.mbps = 10000;
294 break;
295 case AQ_A2_FW_LINK_RATE_5G:
296 self->aq_link_status.mbps = 5000;
297 break;
298 case AQ_A2_FW_LINK_RATE_2G5:
299 self->aq_link_status.mbps = 2500;
300 break;
301 case AQ_A2_FW_LINK_RATE_1G:
302 self->aq_link_status.mbps = 1000;
303 break;
304 case AQ_A2_FW_LINK_RATE_100M:
305 self->aq_link_status.mbps = 100;
306 break;
307 case AQ_A2_FW_LINK_RATE_10M:
308 self->aq_link_status.mbps = 10;
309 break;
310 default:
311 self->aq_link_status.mbps = 0;
312 }
313 self->aq_link_status.full_duplex = link_status.duplex;
314
315 hw_atl2_shared_buffer_read(self, lkp_link_caps, lkp_link_caps);
316
317 self->aq_link_status.lp_link_speed_msk =
318 a2_fw_lkp_to_mask(&lkp_link_caps);
319 self->aq_link_status.lp_flow_control =
320 ((lkp_link_caps.pause_rx) ? AQ_NIC_FC_RX : 0) |
321 ((lkp_link_caps.pause_tx) ? AQ_NIC_FC_TX : 0);
322
323 return 0;
324 }
325
aq_a2_fw_get_mac_permanent(struct aq_hw_s * self,u8 * mac)326 static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
327 {
328 struct mac_address_aligned_s mac_address;
329
330 hw_atl2_shared_buffer_get(self, mac_address, mac_address);
331 ether_addr_copy(mac, (u8 *)mac_address.aligned.mac_address);
332
333 return 0;
334 }
335
aq_a2_fill_a0_stats(struct aq_hw_s * self,struct statistics_s * stats)336 static void aq_a2_fill_a0_stats(struct aq_hw_s *self,
337 struct statistics_s *stats)
338 {
339 struct hw_atl2_priv *priv = self->priv;
340 struct aq_stats_s *cs = &self->curr_stats;
341 struct aq_stats_s curr_stats = *cs;
342 bool corrupted_stats = false;
343
344 #define AQ_SDELTA(_N, _F) \
345 do { \
346 if (!corrupted_stats && \
347 ((s64)(stats->a0.msm._F - priv->last_stats.a0.msm._F)) >= 0) \
348 curr_stats._N += stats->a0.msm._F - priv->last_stats.a0.msm._F;\
349 else \
350 corrupted_stats = true; \
351 } while (0)
352
353 if (self->aq_link_status.mbps) {
354 AQ_SDELTA(uprc, rx_unicast_frames);
355 AQ_SDELTA(mprc, rx_multicast_frames);
356 AQ_SDELTA(bprc, rx_broadcast_frames);
357 AQ_SDELTA(erpr, rx_error_frames);
358
359 AQ_SDELTA(uptc, tx_unicast_frames);
360 AQ_SDELTA(mptc, tx_multicast_frames);
361 AQ_SDELTA(bptc, tx_broadcast_frames);
362 AQ_SDELTA(erpt, tx_errors);
363
364 AQ_SDELTA(ubrc, rx_unicast_octets);
365 AQ_SDELTA(ubtc, tx_unicast_octets);
366 AQ_SDELTA(mbrc, rx_multicast_octets);
367 AQ_SDELTA(mbtc, tx_multicast_octets);
368 AQ_SDELTA(bbrc, rx_broadcast_octets);
369 AQ_SDELTA(bbtc, tx_broadcast_octets);
370
371 if (!corrupted_stats)
372 *cs = curr_stats;
373 }
374 #undef AQ_SDELTA
375
376 }
377
aq_a2_fill_b0_stats(struct aq_hw_s * self,struct statistics_s * stats)378 static void aq_a2_fill_b0_stats(struct aq_hw_s *self,
379 struct statistics_s *stats)
380 {
381 struct hw_atl2_priv *priv = self->priv;
382 struct aq_stats_s *cs = &self->curr_stats;
383 struct aq_stats_s curr_stats = *cs;
384 bool corrupted_stats = false;
385
386 #define AQ_SDELTA(_N, _F) \
387 do { \
388 if (!corrupted_stats && \
389 ((s64)(stats->b0._F - priv->last_stats.b0._F)) >= 0) \
390 curr_stats._N += stats->b0._F - priv->last_stats.b0._F; \
391 else \
392 corrupted_stats = true; \
393 } while (0)
394
395 if (self->aq_link_status.mbps) {
396 AQ_SDELTA(uprc, rx_unicast_frames);
397 AQ_SDELTA(mprc, rx_multicast_frames);
398 AQ_SDELTA(bprc, rx_broadcast_frames);
399 AQ_SDELTA(erpr, rx_errors);
400 AQ_SDELTA(brc, rx_good_octets);
401
402 AQ_SDELTA(uptc, tx_unicast_frames);
403 AQ_SDELTA(mptc, tx_multicast_frames);
404 AQ_SDELTA(bptc, tx_broadcast_frames);
405 AQ_SDELTA(erpt, tx_errors);
406 AQ_SDELTA(btc, tx_good_octets);
407
408 if (!corrupted_stats)
409 *cs = curr_stats;
410 }
411 #undef AQ_SDELTA
412 }
413
aq_a2_fw_update_stats(struct aq_hw_s * self)414 static int aq_a2_fw_update_stats(struct aq_hw_s *self)
415 {
416 struct aq_stats_s *cs = &self->curr_stats;
417 struct hw_atl2_priv *priv = self->priv;
418 struct statistics_s stats;
419 struct version_s version;
420 int err;
421
422 err = hw_atl2_shared_buffer_read_safe(self, version, &version);
423 if (err)
424 return err;
425
426 err = hw_atl2_shared_buffer_read_safe(self, stats, &stats);
427 if (err)
428 return err;
429
430 if (version.drv_iface_ver == AQ_A2_FW_INTERFACE_A0)
431 aq_a2_fill_a0_stats(self, &stats);
432 else
433 aq_a2_fill_b0_stats(self, &stats);
434
435 cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
436 cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
437 cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
438 cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
439 cs->dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
440
441 memcpy(&priv->last_stats, &stats, sizeof(stats));
442
443 return 0;
444 }
445
aq_a2_fw_get_phy_temp(struct aq_hw_s * self,int * temp)446 static int aq_a2_fw_get_phy_temp(struct aq_hw_s *self, int *temp)
447 {
448 struct phy_health_monitor_s phy_health_monitor;
449
450 hw_atl2_shared_buffer_read_safe(self, phy_health_monitor,
451 &phy_health_monitor);
452
453 *temp = (int8_t)phy_health_monitor.phy_temperature * 1000;
454 return 0;
455 }
456
aq_a2_fw_get_mac_temp(struct aq_hw_s * self,int * temp)457 static int aq_a2_fw_get_mac_temp(struct aq_hw_s *self, int *temp)
458 {
459 /* There's only one temperature sensor on A2, use it for
460 * both MAC and PHY.
461 */
462 return aq_a2_fw_get_phy_temp(self, temp);
463 }
464
aq_a2_fw_set_eee_rate(struct aq_hw_s * self,u32 speed)465 static int aq_a2_fw_set_eee_rate(struct aq_hw_s *self, u32 speed)
466 {
467 struct link_options_s link_options;
468
469 hw_atl2_shared_buffer_get(self, link_options, link_options);
470
471 aq_a2_fw_upd_eee_rate_bits(self, &link_options, speed);
472
473 hw_atl2_shared_buffer_write(self, link_options, link_options);
474
475 return hw_atl2_shared_buffer_finish_ack(self);
476 }
477
aq_a2_fw_get_eee_rate(struct aq_hw_s * self,u32 * rate,u32 * supported_rates)478 static int aq_a2_fw_get_eee_rate(struct aq_hw_s *self, u32 *rate,
479 u32 *supported_rates)
480 {
481 struct device_link_caps_s device_link_caps;
482 struct lkp_link_caps_s lkp_link_caps;
483
484 hw_atl2_shared_buffer_read(self, device_link_caps, device_link_caps);
485 hw_atl2_shared_buffer_read(self, lkp_link_caps, lkp_link_caps);
486
487 *supported_rates = a2_fw_dev_to_eee_mask(&device_link_caps);
488 *rate = a2_fw_lkp_to_mask(&lkp_link_caps);
489
490 return 0;
491 }
492
aq_a2_fw_renegotiate(struct aq_hw_s * self)493 static int aq_a2_fw_renegotiate(struct aq_hw_s *self)
494 {
495 struct link_options_s link_options;
496 int err;
497
498 hw_atl2_shared_buffer_get(self, link_options, link_options);
499 link_options.link_renegotiate = 1U;
500 hw_atl2_shared_buffer_write(self, link_options, link_options);
501
502 err = hw_atl2_shared_buffer_finish_ack(self);
503
504 /* We should put renegotiate status back to zero
505 * after command completes
506 */
507 link_options.link_renegotiate = 0U;
508 hw_atl2_shared_buffer_write(self, link_options, link_options);
509
510 return err;
511 }
512
aq_a2_fw_set_flow_control(struct aq_hw_s * self)513 static int aq_a2_fw_set_flow_control(struct aq_hw_s *self)
514 {
515 struct link_options_s link_options;
516
517 hw_atl2_shared_buffer_get(self, link_options, link_options);
518
519 aq_a2_fw_set_mpi_flow_control(self, &link_options);
520
521 hw_atl2_shared_buffer_write(self, link_options, link_options);
522
523 return hw_atl2_shared_buffer_finish_ack(self);
524 }
525
aq_a2_fw_get_flow_control(struct aq_hw_s * self,u32 * fcmode)526 static u32 aq_a2_fw_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
527 {
528 struct link_status_s link_status;
529
530 hw_atl2_shared_buffer_read(self, link_status, link_status);
531
532 *fcmode = ((link_status.pause_rx) ? AQ_NIC_FC_RX : 0) |
533 ((link_status.pause_tx) ? AQ_NIC_FC_TX : 0);
534 return 0;
535 }
536
aq_a2_fw_set_phyloopback(struct aq_hw_s * self,u32 mode,bool enable)537 static int aq_a2_fw_set_phyloopback(struct aq_hw_s *self, u32 mode, bool enable)
538 {
539 struct link_options_s link_options;
540
541 hw_atl2_shared_buffer_get(self, link_options, link_options);
542
543 switch (mode) {
544 case AQ_HW_LOOPBACK_PHYINT_SYS:
545 link_options.internal_loopback = enable;
546 break;
547 case AQ_HW_LOOPBACK_PHYEXT_SYS:
548 link_options.external_loopback = enable;
549 break;
550 default:
551 return -EINVAL;
552 }
553
554 hw_atl2_shared_buffer_write(self, link_options, link_options);
555
556 return hw_atl2_shared_buffer_finish_ack(self);
557 }
558
hw_atl2_utils_get_fw_version(struct aq_hw_s * self)559 u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self)
560 {
561 struct version_s version;
562
563 hw_atl2_shared_buffer_read_safe(self, version, &version);
564
565 /* A2 FW version is stored in reverse order */
566 return version.bundle.major << 24 |
567 version.bundle.minor << 16 |
568 version.bundle.build;
569 }
570
hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s * self,u8 * base_index,u8 * count)571 int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
572 u8 *base_index, u8 *count)
573 {
574 struct filter_caps_s filter_caps;
575 int err;
576
577 err = hw_atl2_shared_buffer_read_safe(self, filter_caps, &filter_caps);
578 if (err)
579 return err;
580
581 *base_index = filter_caps.rslv_tbl_base_index;
582 *count = filter_caps.rslv_tbl_count;
583 return 0;
584 }
585
aq_a2_fw_set_downshift(struct aq_hw_s * self,u32 counter)586 static int aq_a2_fw_set_downshift(struct aq_hw_s *self, u32 counter)
587 {
588 struct link_options_s link_options;
589
590 hw_atl2_shared_buffer_get(self, link_options, link_options);
591 link_options.downshift = !!counter;
592 link_options.downshift_retry = counter;
593 hw_atl2_shared_buffer_write(self, link_options, link_options);
594
595 return hw_atl2_shared_buffer_finish_ack(self);
596 }
597
598 const struct aq_fw_ops aq_a2_fw_ops = {
599 .init = aq_a2_fw_init,
600 .deinit = aq_a2_fw_deinit,
601 .reset = NULL,
602 .renegotiate = aq_a2_fw_renegotiate,
603 .get_mac_permanent = aq_a2_fw_get_mac_permanent,
604 .set_link_speed = aq_a2_fw_set_link_speed,
605 .set_state = aq_a2_fw_set_state,
606 .update_link_status = aq_a2_fw_update_link_status,
607 .update_stats = aq_a2_fw_update_stats,
608 .get_mac_temp = aq_a2_fw_get_mac_temp,
609 .get_phy_temp = aq_a2_fw_get_phy_temp,
610 .set_eee_rate = aq_a2_fw_set_eee_rate,
611 .get_eee_rate = aq_a2_fw_get_eee_rate,
612 .set_flow_control = aq_a2_fw_set_flow_control,
613 .get_flow_control = aq_a2_fw_get_flow_control,
614 .set_phyloopback = aq_a2_fw_set_phyloopback,
615 .set_downshift = aq_a2_fw_set_downshift,
616 };
617