1 /******************************************************************************
2 SPDX-License-Identifier: BSD-3-Clause
3
4 Copyright (c) 2025, Intel Corporation
5 All rights reserved.
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
12
13 2. Redistributions in binary form must reproduce the above copyright
14 notice, this list of conditions and the following disclaimer in the
15 documentation and/or other materials provided with the distribution.
16
17 3. Neither the name of the Intel Corporation nor the names of its
18 contributors may be used to endorse or promote products derived from
19 this software without specific prior written permission.
20
21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE.
32
33 ******************************************************************************/
34
35 #include "ixgbe_type.h"
36 #include "ixgbe_e610.h"
37 #include "ixgbe_x550.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40 #include "ixgbe_api.h"
41
42 /**
43 * ixgbe_init_aci - initialization routine for Admin Command Interface
44 * @hw: pointer to the hardware structure
45 *
46 * Initialize the ACI lock.
47 */
ixgbe_init_aci(struct ixgbe_hw * hw)48 void ixgbe_init_aci(struct ixgbe_hw *hw)
49 {
50 ixgbe_init_lock(&hw->aci.lock);
51 }
52
53 /**
54 * ixgbe_shutdown_aci - shutdown routine for Admin Command Interface
55 * @hw: pointer to the hardware structure
56 *
57 * Destroy the ACI lock.
58 */
ixgbe_shutdown_aci(struct ixgbe_hw * hw)59 void ixgbe_shutdown_aci(struct ixgbe_hw *hw)
60 {
61 ixgbe_destroy_lock(&hw->aci.lock);
62 }
63
64 /**
65 * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
66 * be resent
67 * @opcode: ACI opcode
68 *
69 * Check if ACI command should be sent again depending on the provided opcode.
70 *
71 * Return: true if the sending command routine should be repeated,
72 * otherwise false.
73 */
ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)74 static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
75 {
76 switch (opcode) {
77 case ixgbe_aci_opc_disable_rxen:
78 case ixgbe_aci_opc_get_phy_caps:
79 case ixgbe_aci_opc_get_link_status:
80 case ixgbe_aci_opc_get_link_topo:
81 return true;
82 }
83
84 return false;
85 }
86
87 /**
88 * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
89 * Command Interface
90 * @hw: pointer to the HW struct
91 * @desc: descriptor describing the command
92 * @buf: buffer to use for indirect commands (NULL for direct commands)
93 * @buf_size: size of buffer for indirect commands (0 for direct commands)
94 *
95 * Admin Command is sent using CSR by setting descriptor and buffer in specific
96 * registers.
97 *
98 * Return: the exit code of the operation.
99 * * - IXGBE_SUCCESS - success.
100 * * - IXGBE_ERR_ACI_DISABLED - CSR mechanism is not enabled.
101 * * - IXGBE_ERR_ACI_BUSY - CSR mechanism is busy.
102 * * - IXGBE_ERR_PARAM - buf_size is too big or
103 * invalid argument buf or buf_size.
104 * * - IXGBE_ERR_ACI_TIMEOUT - Admin Command X command timeout.
105 * * - IXGBE_ERR_ACI_ERROR - Admin Command X invalid state of HICR register or
106 * Admin Command failed because of bad opcode was returned or
107 * Admin Command failed with error Y.
108 */
109 static s32
ixgbe_aci_send_cmd_execute(struct ixgbe_hw * hw,struct ixgbe_aci_desc * desc,void * buf,u16 buf_size)110 ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
111 void *buf, u16 buf_size)
112 {
113 u32 hicr = 0, tmp_buf_size = 0, i = 0;
114 u32 *raw_desc = (u32 *)desc;
115 s32 status = IXGBE_SUCCESS;
116 bool valid_buf = false;
117 u32 *tmp_buf = NULL;
118 u16 opcode = 0;
119
120 do {
121 hw->aci.last_status = IXGBE_ACI_RC_OK;
122
123 /* It's necessary to check if mechanism is enabled */
124 hicr = IXGBE_READ_REG(hw, PF_HICR);
125 if (!(hicr & PF_HICR_EN)) {
126 status = IXGBE_ERR_ACI_DISABLED;
127 break;
128 }
129 if (hicr & PF_HICR_C) {
130 hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
131 status = IXGBE_ERR_ACI_BUSY;
132 break;
133 }
134 opcode = desc->opcode;
135
136 if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE) {
137 status = IXGBE_ERR_PARAM;
138 break;
139 }
140
141 if (buf)
142 desc->flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF);
143
144 /* Check if buf and buf_size are proper params */
145 if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF)) {
146 if ((buf && buf_size == 0) ||
147 (buf == NULL && buf_size)) {
148 status = IXGBE_ERR_PARAM;
149 break;
150 }
151 if (buf && buf_size)
152 valid_buf = true;
153 }
154
155 if (valid_buf == true) {
156 if (buf_size % 4 == 0)
157 tmp_buf_size = buf_size;
158 else
159 tmp_buf_size = (buf_size & (u16)(~0x03)) + 4;
160
161 tmp_buf = (u32*)ixgbe_malloc(hw, tmp_buf_size);
162 if (!tmp_buf)
163 return IXGBE_ERR_OUT_OF_MEM;
164
165 /* tmp_buf will be firstly filled with 0xFF and after
166 * that the content of buf will be written into it.
167 * This approach lets us use valid buf_size and
168 * prevents us from reading past buf area
169 * when buf_size mod 4 not equal to 0.
170 */
171 memset(tmp_buf, 0xFF, tmp_buf_size);
172 memcpy(tmp_buf, buf, buf_size);
173
174 if (tmp_buf_size > IXGBE_ACI_LG_BUF)
175 desc->flags |=
176 IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_LB);
177
178 desc->datalen = IXGBE_CPU_TO_LE16(buf_size);
179
180 if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD)) {
181 for (i = 0; i < tmp_buf_size / 4; i++) {
182 IXGBE_WRITE_REG(hw, PF_HIBA(i),
183 IXGBE_LE32_TO_CPU(tmp_buf[i]));
184 }
185 }
186 }
187
188 /* Descriptor is written to specific registers */
189 for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
190 IXGBE_WRITE_REG(hw, PF_HIDA(i),
191 IXGBE_LE32_TO_CPU(raw_desc[i]));
192
193 /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
194 * PF_HICR_EV
195 */
196 hicr = IXGBE_READ_REG(hw, PF_HICR);
197 hicr = (hicr | PF_HICR_C) & ~(PF_HICR_SV | PF_HICR_EV);
198 IXGBE_WRITE_REG(hw, PF_HICR, hicr);
199
200 /* Wait for sync Admin Command response */
201 for (i = 0; i < IXGBE_ACI_SYNC_RESPONSE_TIMEOUT; i += 1) {
202 hicr = IXGBE_READ_REG(hw, PF_HICR);
203 if ((hicr & PF_HICR_SV) || !(hicr & PF_HICR_C))
204 break;
205
206 msec_delay(1);
207 }
208
209 /* Wait for async Admin Command response */
210 if ((hicr & PF_HICR_SV) && (hicr & PF_HICR_C)) {
211 for (i = 0; i < IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT;
212 i += 1) {
213 hicr = IXGBE_READ_REG(hw, PF_HICR);
214 if ((hicr & PF_HICR_EV) || !(hicr & PF_HICR_C))
215 break;
216
217 msec_delay(1);
218 }
219 }
220
221 /* Read sync Admin Command response */
222 if ((hicr & PF_HICR_SV)) {
223 for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
224 raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA(i));
225 raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
226 }
227 }
228
229 /* Read async Admin Command response */
230 if ((hicr & PF_HICR_EV) && !(hicr & PF_HICR_C)) {
231 for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
232 raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA_2(i));
233 raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
234 }
235 }
236
237 /* Handle timeout and invalid state of HICR register */
238 if (hicr & PF_HICR_C) {
239 status = IXGBE_ERR_ACI_TIMEOUT;
240 break;
241 } else if (!(hicr & PF_HICR_SV) && !(hicr & PF_HICR_EV)) {
242 status = IXGBE_ERR_ACI_ERROR;
243 break;
244 }
245
246 /* For every command other than 0x0014 treat opcode mismatch
247 * as an error. Response to 0x0014 command read from HIDA_2
248 * is a descriptor of an event which is expected to contain
249 * different opcode than the command.
250 */
251 if (desc->opcode != opcode &&
252 opcode != IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
253 status = IXGBE_ERR_ACI_ERROR;
254 break;
255 }
256
257 if (desc->retval != IXGBE_ACI_RC_OK) {
258 hw->aci.last_status = (enum ixgbe_aci_err)desc->retval;
259 status = IXGBE_ERR_ACI_ERROR;
260 break;
261 }
262
263 /* Write a response values to a buf */
264 if (valid_buf && (desc->flags &
265 IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF))) {
266 for (i = 0; i < tmp_buf_size / 4; i++) {
267 tmp_buf[i] = IXGBE_READ_REG(hw, PF_HIBA(i));
268 tmp_buf[i] = IXGBE_CPU_TO_LE32(tmp_buf[i]);
269 }
270 memcpy(buf, tmp_buf, buf_size);
271 }
272 } while (0);
273
274 if (tmp_buf)
275 ixgbe_free(hw, tmp_buf);
276
277 return status;
278 }
279
280 /**
281 * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
282 * @hw: pointer to the HW struct
283 * @desc: descriptor describing the command
284 * @buf: buffer to use for indirect commands (NULL for direct commands)
285 * @buf_size: size of buffer for indirect commands (0 for direct commands)
286 *
287 * Helper function to send FW Admin Commands to the FW Admin Command Interface.
288 *
289 * Retry sending the FW Admin Command multiple times to the FW ACI
290 * if the EBUSY Admin Command error is returned.
291 *
292 * Return: the exit code of the operation.
293 */
ixgbe_aci_send_cmd(struct ixgbe_hw * hw,struct ixgbe_aci_desc * desc,void * buf,u16 buf_size)294 s32 ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
295 void *buf, u16 buf_size)
296 {
297 struct ixgbe_aci_desc desc_cpy;
298 enum ixgbe_aci_err last_status;
299 bool is_cmd_for_retry;
300 u8 *buf_cpy = NULL;
301 s32 status;
302 u16 opcode;
303 u8 idx = 0;
304
305 opcode = IXGBE_LE16_TO_CPU(desc->opcode);
306 is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
307 memset(&desc_cpy, 0, sizeof(desc_cpy));
308
309 if (is_cmd_for_retry) {
310 if (buf) {
311 buf_cpy = (u8 *)ixgbe_malloc(hw, buf_size);
312 if (!buf_cpy)
313 return IXGBE_ERR_OUT_OF_MEM;
314 }
315 memcpy(&desc_cpy, desc, sizeof(desc_cpy));
316 }
317
318 do {
319 ixgbe_acquire_lock(&hw->aci.lock);
320 status = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
321 last_status = hw->aci.last_status;
322 ixgbe_release_lock(&hw->aci.lock);
323
324 if (!is_cmd_for_retry || status == IXGBE_SUCCESS ||
325 (last_status != IXGBE_ACI_RC_EBUSY && status != IXGBE_ERR_ACI_ERROR))
326 break;
327
328 if (buf)
329 memcpy(buf, buf_cpy, buf_size);
330 memcpy(desc, &desc_cpy, sizeof(desc_cpy));
331
332 msec_delay(IXGBE_ACI_SEND_DELAY_TIME_MS);
333 } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE);
334
335 if (buf_cpy)
336 ixgbe_free(hw, buf_cpy);
337
338 return status;
339 }
340
341 /**
342 * ixgbe_aci_check_event_pending - check if there are any pending events
343 * @hw: pointer to the HW struct
344 *
345 * Determine if there are any pending events.
346 *
347 * Return: true if there are any currently pending events
348 * otherwise false.
349 */
ixgbe_aci_check_event_pending(struct ixgbe_hw * hw)350 bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
351 {
352 u32 ep_bit_mask;
353 u32 fwsts;
354
355 ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
356
357 /* Check state of Event Pending (EP) bit */
358 fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
359 return (fwsts & ep_bit_mask) ? true : false;
360 }
361
362 /**
363 * ixgbe_aci_get_event - get an event from ACI
364 * @hw: pointer to the HW struct
365 * @e: event information structure
366 * @pending: optional flag signaling that there are more pending events
367 *
368 * Obtain an event from ACI and return its content
369 * through 'e' using ACI command (0x0014).
370 * Provide information if there are more events
371 * to retrieve through 'pending'.
372 *
373 * Return: the exit code of the operation.
374 */
ixgbe_aci_get_event(struct ixgbe_hw * hw,struct ixgbe_aci_event * e,bool * pending)375 s32 ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
376 bool *pending)
377 {
378 struct ixgbe_aci_desc desc;
379 s32 status;
380
381 if (!e || (!e->msg_buf && e->buf_len) || (e->msg_buf && !e->buf_len))
382 return IXGBE_ERR_PARAM;
383
384 ixgbe_acquire_lock(&hw->aci.lock);
385
386 /* Check if there are any events pending */
387 if (!ixgbe_aci_check_event_pending(hw)) {
388 status = IXGBE_ERR_ACI_NO_EVENTS;
389 goto aci_get_event_exit;
390 }
391
392 /* Obtain pending event */
393 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
394 status = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
395 if (status)
396 goto aci_get_event_exit;
397
398 /* Returned 0x0014 opcode indicates that no event was obtained */
399 if (desc.opcode == IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
400 status = IXGBE_ERR_ACI_NO_EVENTS;
401 goto aci_get_event_exit;
402 }
403
404 /* Determine size of event data */
405 e->msg_len = MIN_T(u16, IXGBE_LE16_TO_CPU(desc.datalen), e->buf_len);
406 /* Write event descriptor to event info structure */
407 memcpy(&e->desc, &desc, sizeof(e->desc));
408
409 /* Check if there are any further events pending */
410 if (pending) {
411 *pending = ixgbe_aci_check_event_pending(hw);
412 }
413
414 aci_get_event_exit:
415 ixgbe_release_lock(&hw->aci.lock);
416
417 return status;
418 }
419
420 /**
421 * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
422 * @desc: pointer to the temp descriptor (non DMA mem)
423 * @opcode: the opcode can be used to decide which flags to turn off or on
424 *
425 * Helper function to fill the descriptor desc with default values
426 * and the provided opcode.
427 */
ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc * desc,u16 opcode)428 void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
429 {
430 /* zero out the desc */
431 memset(desc, 0, sizeof(*desc));
432 desc->opcode = IXGBE_CPU_TO_LE16(opcode);
433 desc->flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_SI);
434 }
435
436 /**
437 * ixgbe_aci_get_fw_ver - get the firmware version
438 * @hw: pointer to the HW struct
439 *
440 * Get the firmware version using ACI command (0x0001).
441 *
442 * Return: the exit code of the operation.
443 */
ixgbe_aci_get_fw_ver(struct ixgbe_hw * hw)444 s32 ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
445 {
446 struct ixgbe_aci_cmd_get_ver *resp;
447 struct ixgbe_aci_desc desc;
448 s32 status;
449
450 resp = &desc.params.get_ver;
451
452 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver);
453
454 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
455
456 if (!status) {
457 hw->fw_branch = resp->fw_branch;
458 hw->fw_maj_ver = resp->fw_major;
459 hw->fw_min_ver = resp->fw_minor;
460 hw->fw_patch = resp->fw_patch;
461 hw->fw_build = IXGBE_LE32_TO_CPU(resp->fw_build);
462 hw->api_branch = resp->api_branch;
463 hw->api_maj_ver = resp->api_major;
464 hw->api_min_ver = resp->api_minor;
465 hw->api_patch = resp->api_patch;
466 }
467
468 return status;
469 }
470
471 /**
472 * ixgbe_aci_send_driver_ver - send the driver version to firmware
473 * @hw: pointer to the HW struct
474 * @dv: driver's major, minor version
475 *
476 * Send the driver version to the firmware
477 * using the ACI command (0x0002).
478 *
479 * Return: the exit code of the operation.
480 * Returns IXGBE_ERR_PARAM, if dv is NULL.
481 */
ixgbe_aci_send_driver_ver(struct ixgbe_hw * hw,struct ixgbe_driver_ver * dv)482 s32 ixgbe_aci_send_driver_ver(struct ixgbe_hw *hw, struct ixgbe_driver_ver *dv)
483 {
484 struct ixgbe_aci_cmd_driver_ver *cmd;
485 struct ixgbe_aci_desc desc;
486 u16 len;
487
488 cmd = &desc.params.driver_ver;
489
490 if (!dv)
491 return IXGBE_ERR_PARAM;
492
493 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_driver_ver);
494
495 desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
496 cmd->major_ver = dv->major_ver;
497 cmd->minor_ver = dv->minor_ver;
498 cmd->build_ver = dv->build_ver;
499 cmd->subbuild_ver = dv->subbuild_ver;
500
501 len = 0;
502 while (len < sizeof(dv->driver_string) &&
503 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
504 len++;
505
506 return ixgbe_aci_send_cmd(hw, &desc, dv->driver_string, len);
507 }
508
509 /**
510 * ixgbe_aci_req_res - request a common resource
511 * @hw: pointer to the HW struct
512 * @res: resource ID
513 * @access: access type
514 * @sdp_number: resource number
515 * @timeout: the maximum time in ms that the driver may hold the resource
516 *
517 * Requests a common resource using the ACI command (0x0008).
518 * Specifies the maximum time the driver may hold the resource.
519 * If the requested resource is currently occupied by some other driver,
520 * a busy return value is returned and the timeout field value indicates the
521 * maximum time the current owner has to free it.
522 *
523 * Return: the exit code of the operation.
524 */
525 static s32
ixgbe_aci_req_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res,enum ixgbe_aci_res_access_type access,u8 sdp_number,u32 * timeout)526 ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
527 enum ixgbe_aci_res_access_type access, u8 sdp_number,
528 u32 *timeout)
529 {
530 struct ixgbe_aci_cmd_req_res *cmd_resp;
531 struct ixgbe_aci_desc desc;
532 s32 status;
533
534 cmd_resp = &desc.params.res_owner;
535
536 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
537
538 cmd_resp->res_id = IXGBE_CPU_TO_LE16(res);
539 cmd_resp->access_type = IXGBE_CPU_TO_LE16(access);
540 cmd_resp->res_number = IXGBE_CPU_TO_LE32(sdp_number);
541 cmd_resp->timeout = IXGBE_CPU_TO_LE32(*timeout);
542 *timeout = 0;
543
544 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
545
546 /* The completion specifies the maximum time in ms that the driver
547 * may hold the resource in the Timeout field.
548 * If the resource is held by some other driver, the command completes
549 * with a busy return value and the timeout field indicates the maximum
550 * time the current owner of the resource has to free it.
551 */
552 if (!status || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
553 *timeout = IXGBE_LE32_TO_CPU(cmd_resp->timeout);
554
555 return status;
556 }
557
558 /**
559 * ixgbe_aci_release_res - release a common resource using ACI
560 * @hw: pointer to the HW struct
561 * @res: resource ID
562 * @sdp_number: resource number
563 *
564 * Release a common resource using ACI command (0x0009).
565 *
566 * Return: the exit code of the operation.
567 */
568 static s32
ixgbe_aci_release_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res,u8 sdp_number)569 ixgbe_aci_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
570 u8 sdp_number)
571 {
572 struct ixgbe_aci_cmd_req_res *cmd;
573 struct ixgbe_aci_desc desc;
574
575 cmd = &desc.params.res_owner;
576
577 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
578
579 cmd->res_id = IXGBE_CPU_TO_LE16(res);
580 cmd->res_number = IXGBE_CPU_TO_LE32(sdp_number);
581
582 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
583 }
584
585 /**
586 * ixgbe_acquire_res - acquire the ownership of a resource
587 * @hw: pointer to the HW structure
588 * @res: resource ID
589 * @access: access type (read or write)
590 * @timeout: timeout in milliseconds
591 *
592 * Make an attempt to acquire the ownership of a resource using
593 * the ixgbe_aci_req_res to utilize ACI.
594 * In case if some other driver has previously acquired the resource and
595 * performed any necessary updates, the IXGBE_ERR_ACI_NO_WORK is returned,
596 * and the caller does not obtain the resource and has no further work to do.
597 * If needed, the function will poll until the current lock owner timeouts.
598 *
599 * Return: the exit code of the operation.
600 */
ixgbe_acquire_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res,enum ixgbe_aci_res_access_type access,u32 timeout)601 s32 ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
602 enum ixgbe_aci_res_access_type access, u32 timeout)
603 {
604 #define IXGBE_RES_POLLING_DELAY_MS 10
605 u32 delay = IXGBE_RES_POLLING_DELAY_MS;
606 u32 res_timeout = timeout;
607 u32 retry_timeout = 0;
608 s32 status;
609
610 status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
611
612 /* A return code of IXGBE_ERR_ACI_NO_WORK means that another driver has
613 * previously acquired the resource and performed any necessary updates;
614 * in this case the caller does not obtain the resource and has no
615 * further work to do.
616 */
617 if (status == IXGBE_ERR_ACI_NO_WORK)
618 goto ixgbe_acquire_res_exit;
619
620 /* If necessary, poll until the current lock owner timeouts.
621 * Set retry_timeout to the timeout value reported by the FW in the
622 * response to the "Request Resource Ownership" (0x0008) Admin Command
623 * as it indicates the maximum time the current owner of the resource
624 * is allowed to hold it.
625 */
626 retry_timeout = res_timeout;
627 while (status && retry_timeout && res_timeout) {
628 msec_delay(delay);
629 retry_timeout = (retry_timeout > delay) ?
630 retry_timeout - delay : 0;
631 status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
632
633 if (status == IXGBE_ERR_ACI_NO_WORK)
634 /* lock free, but no work to do */
635 break;
636
637 if (!status)
638 /* lock acquired */
639 break;
640 }
641
642 ixgbe_acquire_res_exit:
643 return status;
644 }
645
646 /**
647 * ixgbe_release_res - release a common resource
648 * @hw: pointer to the HW structure
649 * @res: resource ID
650 *
651 * Release a common resource using ixgbe_aci_release_res.
652 */
ixgbe_release_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res)653 void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
654 {
655 u32 total_delay = 0;
656 s32 status;
657
658 status = ixgbe_aci_release_res(hw, res, 0);
659
660 /* There are some rare cases when trying to release the resource
661 * results in an admin command timeout, so handle them correctly.
662 */
663 while ((status == IXGBE_ERR_ACI_TIMEOUT) &&
664 (total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT)) {
665 msec_delay(1);
666 status = ixgbe_aci_release_res(hw, res, 0);
667 total_delay++;
668 }
669 }
670
671 /**
672 * ixgbe_parse_common_caps - Parse common device/function capabilities
673 * @hw: pointer to the HW struct
674 * @caps: pointer to common capabilities structure
675 * @elem: the capability element to parse
676 * @prefix: message prefix for tracing capabilities
677 *
678 * Given a capability element, extract relevant details into the common
679 * capability structure.
680 *
681 * Return: true if the capability matches one of the common capability ids,
682 * false otherwise.
683 */
684 static bool
ixgbe_parse_common_caps(struct ixgbe_hw * hw,struct ixgbe_hw_common_caps * caps,struct ixgbe_aci_cmd_list_caps_elem * elem,const char * prefix)685 ixgbe_parse_common_caps(struct ixgbe_hw *hw, struct ixgbe_hw_common_caps *caps,
686 struct ixgbe_aci_cmd_list_caps_elem *elem,
687 const char *prefix)
688 {
689 u32 logical_id = IXGBE_LE32_TO_CPU(elem->logical_id);
690 u32 phys_id = IXGBE_LE32_TO_CPU(elem->phys_id);
691 u32 number = IXGBE_LE32_TO_CPU(elem->number);
692 u16 cap = IXGBE_LE16_TO_CPU(elem->cap);
693 bool found = true;
694
695 UNREFERENCED_1PARAMETER(hw);
696
697 switch (cap) {
698 case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
699 caps->valid_functions = number;
700 break;
701 case IXGBE_ACI_CAPS_SRIOV:
702 caps->sr_iov_1_1 = (number == 1);
703 break;
704 case IXGBE_ACI_CAPS_VMDQ:
705 caps->vmdq = (number == 1);
706 break;
707 case IXGBE_ACI_CAPS_DCB:
708 caps->dcb = (number == 1);
709 caps->active_tc_bitmap = logical_id;
710 caps->maxtc = phys_id;
711 break;
712 case IXGBE_ACI_CAPS_RSS:
713 caps->rss_table_size = number;
714 caps->rss_table_entry_width = logical_id;
715 break;
716 case IXGBE_ACI_CAPS_RXQS:
717 caps->num_rxq = number;
718 caps->rxq_first_id = phys_id;
719 break;
720 case IXGBE_ACI_CAPS_TXQS:
721 caps->num_txq = number;
722 caps->txq_first_id = phys_id;
723 break;
724 case IXGBE_ACI_CAPS_MSIX:
725 caps->num_msix_vectors = number;
726 caps->msix_vector_first_id = phys_id;
727 break;
728 case IXGBE_ACI_CAPS_NVM_VER:
729 break;
730 case IXGBE_ACI_CAPS_NVM_MGMT:
731 caps->sec_rev_disabled =
732 (number & IXGBE_NVM_MGMT_SEC_REV_DISABLED) ?
733 true : false;
734 caps->update_disabled =
735 (number & IXGBE_NVM_MGMT_UPDATE_DISABLED) ?
736 true : false;
737 caps->nvm_unified_update =
738 (number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
739 true : false;
740 caps->netlist_auth =
741 (number & IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT) ?
742 true : false;
743 break;
744 case IXGBE_ACI_CAPS_MAX_MTU:
745 caps->max_mtu = number;
746 break;
747 case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
748 caps->pcie_reset_avoidance = (number > 0);
749 break;
750 case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
751 caps->reset_restrict_support = (number == 1);
752 break;
753 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
754 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
755 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
756 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
757 {
758 u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
759
760 caps->ext_topo_dev_img_ver_high[index] = number;
761 caps->ext_topo_dev_img_ver_low[index] = logical_id;
762 caps->ext_topo_dev_img_part_num[index] =
763 (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
764 IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S;
765 caps->ext_topo_dev_img_load_en[index] =
766 (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
767 caps->ext_topo_dev_img_prog_en[index] =
768 (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
769 break;
770 }
771 case IXGBE_ACI_CAPS_OROM_RECOVERY_UPDATE:
772 caps->orom_recovery_update = (number == 1);
773 break;
774 case IXGBE_ACI_CAPS_NEXT_CLUSTER_ID:
775 caps->next_cluster_id_support = (number == 1);
776 DEBUGOUT2("%s: next_cluster_id_support = %d\n",
777 prefix, caps->next_cluster_id_support);
778 break;
779 default:
780 /* Not one of the recognized common capabilities */
781 found = false;
782 }
783
784 return found;
785 }
786
787 /**
788 * ixgbe_hweight8 - count set bits among the 8 lowest bits
789 * @w: variable storing set bits to count
790 *
791 * Return: the number of set bits among the 8 lowest bits in the provided value.
792 */
ixgbe_hweight8(u32 w)793 static u8 ixgbe_hweight8(u32 w)
794 {
795 u8 hweight = 0, i;
796
797 for (i = 0; i < 8; i++)
798 if (w & (1 << i))
799 hweight++;
800
801 return hweight;
802 }
803
804 /**
805 * ixgbe_hweight32 - count set bits among the 32 lowest bits
806 * @w: variable storing set bits to count
807 *
808 * Return: the number of set bits among the 32 lowest bits in the
809 * provided value.
810 */
ixgbe_hweight32(u32 w)811 static u8 ixgbe_hweight32(u32 w)
812 {
813 u32 bitMask = 0x1, i;
814 u8 bitCnt = 0;
815
816 for (i = 0; i < 32; i++)
817 {
818 if (w & bitMask)
819 bitCnt++;
820
821 bitMask = bitMask << 0x1;
822 }
823
824 return bitCnt;
825 }
826
827 /**
828 * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
829 * @hw: pointer to the HW struct
830 * @dev_p: pointer to device capabilities structure
831 * @cap: capability element to parse
832 *
833 * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
834 */
835 static void
ixgbe_parse_valid_functions_cap(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)836 ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
837 struct ixgbe_hw_dev_caps *dev_p,
838 struct ixgbe_aci_cmd_list_caps_elem *cap)
839 {
840 u32 number = IXGBE_LE32_TO_CPU(cap->number);
841
842 UNREFERENCED_1PARAMETER(hw);
843
844 dev_p->num_funcs = ixgbe_hweight32(number);
845 }
846
847 /**
848 * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps
849 * @hw: pointer to the HW struct
850 * @dev_p: pointer to device capabilities structure
851 * @cap: capability element to parse
852 *
853 * Parse IXGBE_ACI_CAPS_VF for device capabilities.
854 */
ixgbe_parse_vf_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)855 static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
856 struct ixgbe_hw_dev_caps *dev_p,
857 struct ixgbe_aci_cmd_list_caps_elem *cap)
858 {
859 u32 number = IXGBE_LE32_TO_CPU(cap->number);
860
861 UNREFERENCED_1PARAMETER(hw);
862
863 dev_p->num_vfs_exposed = number;
864 }
865
866 /**
867 * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
868 * @hw: pointer to the HW struct
869 * @dev_p: pointer to device capabilities structure
870 * @cap: capability element to parse
871 *
872 * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
873 */
ixgbe_parse_vsi_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)874 static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
875 struct ixgbe_hw_dev_caps *dev_p,
876 struct ixgbe_aci_cmd_list_caps_elem *cap)
877 {
878 u32 number = IXGBE_LE32_TO_CPU(cap->number);
879
880 UNREFERENCED_1PARAMETER(hw);
881
882 dev_p->num_vsi_allocd_to_host = number;
883 }
884
885 /**
886 * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
887 * @hw: pointer to the HW struct
888 * @dev_p: pointer to device capabilities structure
889 * @cap: capability element to parse
890 *
891 * Parse IXGBE_ACI_CAPS_FD for device capabilities.
892 */
ixgbe_parse_fdir_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)893 static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
894 struct ixgbe_hw_dev_caps *dev_p,
895 struct ixgbe_aci_cmd_list_caps_elem *cap)
896 {
897 u32 number = IXGBE_LE32_TO_CPU(cap->number);
898
899 UNREFERENCED_1PARAMETER(hw);
900
901 dev_p->num_flow_director_fltr = number;
902 }
903
904 /**
905 * ixgbe_parse_dev_caps - Parse device capabilities
906 * @hw: pointer to the HW struct
907 * @dev_p: pointer to device capabilities structure
908 * @buf: buffer containing the device capability records
909 * @cap_count: the number of capabilities
910 *
911 * Helper device to parse device (0x000B) capabilities list. For
912 * capabilities shared between device and function, this relies on
913 * ixgbe_parse_common_caps.
914 *
915 * Loop through the list of provided capabilities and extract the relevant
916 * data into the device capabilities structured.
917 */
ixgbe_parse_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,void * buf,u32 cap_count)918 static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
919 struct ixgbe_hw_dev_caps *dev_p,
920 void *buf, u32 cap_count)
921 {
922 struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
923 u32 i;
924
925 cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
926
927 memset(dev_p, 0, sizeof(*dev_p));
928
929 for (i = 0; i < cap_count; i++) {
930 u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
931 bool found;
932
933 found = ixgbe_parse_common_caps(hw, &dev_p->common_cap,
934 &cap_resp[i], "dev caps");
935
936 switch (cap) {
937 case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
938 ixgbe_parse_valid_functions_cap(hw, dev_p,
939 &cap_resp[i]);
940 break;
941 case IXGBE_ACI_CAPS_VF:
942 ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
943 break;
944 case IXGBE_ACI_CAPS_VSI:
945 ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
946 break;
947 case IXGBE_ACI_CAPS_FD:
948 ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
949 break;
950 default:
951 /* Don't list common capabilities as unknown */
952 if (!found)
953 break;
954 }
955 }
956
957 }
958
959 /**
960 * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps
961 * @hw: pointer to the HW struct
962 * @func_p: pointer to function capabilities structure
963 * @cap: pointer to the capability element to parse
964 *
965 * Extract function capabilities for IXGBE_ACI_CAPS_VF.
966 */
ixgbe_parse_vf_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_p,struct ixgbe_aci_cmd_list_caps_elem * cap)967 static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
968 struct ixgbe_hw_func_caps *func_p,
969 struct ixgbe_aci_cmd_list_caps_elem *cap)
970 {
971 u32 logical_id = IXGBE_LE32_TO_CPU(cap->logical_id);
972 u32 number = IXGBE_LE32_TO_CPU(cap->number);
973
974 UNREFERENCED_1PARAMETER(hw);
975
976 func_p->num_allocd_vfs = number;
977 func_p->vf_base_id = logical_id;
978 }
979
980 /**
981 * ixgbe_get_num_per_func - determine number of resources per PF
982 * @hw: pointer to the HW structure
983 * @max: value to be evenly split between each PF
984 *
985 * Determine the number of valid functions by going through the bitmap returned
986 * from parsing capabilities and use this to calculate the number of resources
987 * per PF based on the max value passed in.
988 *
989 * Return: the number of resources per PF or 0, if no PH are available.
990 */
ixgbe_get_num_per_func(struct ixgbe_hw * hw,u32 max)991 static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
992 {
993 u8 funcs;
994
995 #define IXGBE_CAPS_VALID_FUNCS_M 0xFF
996 funcs = ixgbe_hweight8(hw->dev_caps.common_cap.valid_functions &
997 IXGBE_CAPS_VALID_FUNCS_M);
998
999 if (!funcs)
1000 return 0;
1001
1002 return max / funcs;
1003 }
1004
1005 /**
1006 * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
1007 * @hw: pointer to the HW struct
1008 * @func_p: pointer to function capabilities structure
1009 * @cap: pointer to the capability element to parse
1010 *
1011 * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
1012 */
ixgbe_parse_vsi_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_p,struct ixgbe_aci_cmd_list_caps_elem * cap)1013 static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
1014 struct ixgbe_hw_func_caps *func_p,
1015 struct ixgbe_aci_cmd_list_caps_elem *cap)
1016 {
1017 func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
1018 }
1019
1020 /**
1021 * ixgbe_parse_func_caps - Parse function capabilities
1022 * @hw: pointer to the HW struct
1023 * @func_p: pointer to function capabilities structure
1024 * @buf: buffer containing the function capability records
1025 * @cap_count: the number of capabilities
1026 *
1027 * Helper function to parse function (0x000A) capabilities list. For
1028 * capabilities shared between device and function, this relies on
1029 * ixgbe_parse_common_caps.
1030 *
1031 * Loop through the list of provided capabilities and extract the relevant
1032 * data into the function capabilities structured.
1033 */
ixgbe_parse_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_p,void * buf,u32 cap_count)1034 static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
1035 struct ixgbe_hw_func_caps *func_p,
1036 void *buf, u32 cap_count)
1037 {
1038 struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
1039 u32 i;
1040
1041 cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
1042
1043 memset(func_p, 0, sizeof(*func_p));
1044
1045 for (i = 0; i < cap_count; i++) {
1046 u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
1047 ixgbe_parse_common_caps(hw, &func_p->common_cap,
1048 &cap_resp[i], "func caps");
1049
1050 switch (cap) {
1051 case IXGBE_ACI_CAPS_VF:
1052 ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
1053 break;
1054 case IXGBE_ACI_CAPS_VSI:
1055 ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
1056 break;
1057 default:
1058 /* Don't list common capabilities as unknown */
1059 break;
1060 }
1061 }
1062
1063 }
1064
1065 /**
1066 * ixgbe_aci_list_caps - query function/device capabilities
1067 * @hw: pointer to the HW struct
1068 * @buf: a buffer to hold the capabilities
1069 * @buf_size: size of the buffer
1070 * @cap_count: if not NULL, set to the number of capabilities reported
1071 * @opc: capabilities type to discover, device or function
1072 *
1073 * Get the function (0x000A) or device (0x000B) capabilities description from
1074 * firmware and store it in the buffer.
1075 *
1076 * If the cap_count pointer is not NULL, then it is set to the number of
1077 * capabilities firmware will report. Note that if the buffer size is too
1078 * small, it is possible the command will return IXGBE_ERR_OUT_OF_MEM. The
1079 * cap_count will still be updated in this case. It is recommended that the
1080 * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
1081 * buffer that firmware could return) to avoid this.
1082 *
1083 * Return: the exit code of the operation.
1084 * Exit code of IXGBE_ERR_OUT_OF_MEM means the buffer size is too small.
1085 */
ixgbe_aci_list_caps(struct ixgbe_hw * hw,void * buf,u16 buf_size,u32 * cap_count,enum ixgbe_aci_opc opc)1086 s32 ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
1087 u32 *cap_count, enum ixgbe_aci_opc opc)
1088 {
1089 struct ixgbe_aci_cmd_list_caps *cmd;
1090 struct ixgbe_aci_desc desc;
1091 s32 status;
1092
1093 cmd = &desc.params.get_cap;
1094
1095 if (opc != ixgbe_aci_opc_list_func_caps &&
1096 opc != ixgbe_aci_opc_list_dev_caps)
1097 return IXGBE_ERR_PARAM;
1098
1099 ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
1100 status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
1101
1102 if (cap_count)
1103 *cap_count = IXGBE_LE32_TO_CPU(cmd->count);
1104
1105 return status;
1106 }
1107
1108 /**
1109 * ixgbe_discover_dev_caps - Read and extract device capabilities
1110 * @hw: pointer to the hardware structure
1111 * @dev_caps: pointer to device capabilities structure
1112 *
1113 * Read the device capabilities and extract them into the dev_caps structure
1114 * for later use.
1115 *
1116 * Return: the exit code of the operation.
1117 */
ixgbe_discover_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_caps)1118 s32 ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
1119 struct ixgbe_hw_dev_caps *dev_caps)
1120 {
1121 u32 status, cap_count = 0;
1122 u8 *cbuf = NULL;
1123
1124 cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
1125 if (!cbuf)
1126 return IXGBE_ERR_OUT_OF_MEM;
1127 /* Although the driver doesn't know the number of capabilities the
1128 * device will return, we can simply send a 4KB buffer, the maximum
1129 * possible size that firmware can return.
1130 */
1131 cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
1132 sizeof(struct ixgbe_aci_cmd_list_caps_elem);
1133
1134 status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
1135 &cap_count,
1136 ixgbe_aci_opc_list_dev_caps);
1137 if (!status)
1138 ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
1139
1140 if (cbuf)
1141 ixgbe_free(hw, cbuf);
1142
1143 return status;
1144 }
1145
1146 /**
1147 * ixgbe_discover_func_caps - Read and extract function capabilities
1148 * @hw: pointer to the hardware structure
1149 * @func_caps: pointer to function capabilities structure
1150 *
1151 * Read the function capabilities and extract them into the func_caps structure
1152 * for later use.
1153 *
1154 * Return: the exit code of the operation.
1155 */
ixgbe_discover_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_caps)1156 s32 ixgbe_discover_func_caps(struct ixgbe_hw *hw,
1157 struct ixgbe_hw_func_caps *func_caps)
1158 {
1159 u32 cap_count = 0;
1160 u8 *cbuf = NULL;
1161 s32 status;
1162
1163 cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
1164 if(!cbuf)
1165 return IXGBE_ERR_OUT_OF_MEM;
1166 /* Although the driver doesn't know the number of capabilities the
1167 * device will return, we can simply send a 4KB buffer, the maximum
1168 * possible size that firmware can return.
1169 */
1170 cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
1171 sizeof(struct ixgbe_aci_cmd_list_caps_elem);
1172
1173 status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
1174 &cap_count,
1175 ixgbe_aci_opc_list_func_caps);
1176 if (!status)
1177 ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
1178
1179 if (cbuf)
1180 ixgbe_free(hw, cbuf);
1181
1182 return status;
1183 }
1184
1185 /**
1186 * ixgbe_get_caps - get info about the HW
1187 * @hw: pointer to the hardware structure
1188 *
1189 * Retrieve both device and function capabilities.
1190 *
1191 * Return: the exit code of the operation.
1192 */
ixgbe_get_caps(struct ixgbe_hw * hw)1193 s32 ixgbe_get_caps(struct ixgbe_hw *hw)
1194 {
1195 s32 status;
1196
1197 status = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
1198 if (status)
1199 return status;
1200
1201 return ixgbe_discover_func_caps(hw, &hw->func_caps);
1202 }
1203
1204 /**
1205 * ixgbe_aci_disable_rxen - disable RX
1206 * @hw: pointer to the HW struct
1207 *
1208 * Request a safe disable of Receive Enable using ACI command (0x000C).
1209 *
1210 * Return: the exit code of the operation.
1211 */
ixgbe_aci_disable_rxen(struct ixgbe_hw * hw)1212 s32 ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
1213 {
1214 struct ixgbe_aci_cmd_disable_rxen *cmd;
1215 struct ixgbe_aci_desc desc;
1216
1217 UNREFERENCED_1PARAMETER(hw);
1218
1219 cmd = &desc.params.disable_rxen;
1220
1221 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
1222
1223 cmd->lport_num = (u8)hw->bus.func;
1224
1225 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1226 }
1227
1228 /**
1229 * ixgbe_aci_get_phy_caps - returns PHY capabilities
1230 * @hw: pointer to the HW struct
1231 * @qual_mods: report qualified modules
1232 * @report_mode: report mode capabilities
1233 * @pcaps: structure for PHY capabilities to be filled
1234 *
1235 * Returns the various PHY capabilities supported on the Port
1236 * using ACI command (0x0600).
1237 *
1238 * Return: the exit code of the operation.
1239 */
ixgbe_aci_get_phy_caps(struct ixgbe_hw * hw,bool qual_mods,u8 report_mode,struct ixgbe_aci_cmd_get_phy_caps_data * pcaps)1240 s32 ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
1241 struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
1242 {
1243 struct ixgbe_aci_cmd_get_phy_caps *cmd;
1244 u16 pcaps_size = sizeof(*pcaps);
1245 struct ixgbe_aci_desc desc;
1246 s32 status;
1247
1248 cmd = &desc.params.get_phy;
1249
1250 if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
1251 return IXGBE_ERR_PARAM;
1252
1253 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
1254
1255 if (qual_mods)
1256 cmd->param0 |= IXGBE_CPU_TO_LE16(IXGBE_ACI_GET_PHY_RQM);
1257
1258 cmd->param0 |= IXGBE_CPU_TO_LE16(report_mode);
1259 status = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
1260
1261 if (status == IXGBE_SUCCESS &&
1262 report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
1263 hw->phy.phy_type_low = IXGBE_LE64_TO_CPU(pcaps->phy_type_low);
1264 hw->phy.phy_type_high = IXGBE_LE64_TO_CPU(pcaps->phy_type_high);
1265 memcpy(hw->link.link_info.module_type, &pcaps->module_type,
1266 sizeof(hw->link.link_info.module_type));
1267 }
1268
1269 return status;
1270 }
1271
1272 /**
1273 * ixgbe_phy_caps_equals_cfg - check if capabilities match the PHY config
1274 * @phy_caps: PHY capabilities
1275 * @phy_cfg: PHY configuration
1276 *
1277 * Helper function to determine if PHY capabilities match PHY
1278 * configuration
1279 *
1280 * Return: true if PHY capabilities match PHY configuration.
1281 */
1282 bool
ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data * phy_caps,struct ixgbe_aci_cmd_set_phy_cfg_data * phy_cfg)1283 ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *phy_caps,
1284 struct ixgbe_aci_cmd_set_phy_cfg_data *phy_cfg)
1285 {
1286 u8 caps_mask, cfg_mask;
1287
1288 if (!phy_caps || !phy_cfg)
1289 return false;
1290
1291 /* These bits are not common between capabilities and configuration.
1292 * Do not use them to determine equality.
1293 */
1294 caps_mask = IXGBE_ACI_PHY_CAPS_MASK & ~(IXGBE_ACI_PHY_AN_MODE |
1295 IXGBE_ACI_PHY_EN_MOD_QUAL);
1296 cfg_mask = IXGBE_ACI_PHY_ENA_VALID_MASK &
1297 ~IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
1298
1299 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
1300 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
1301 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
1302 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
1303 phy_caps->eee_cap != phy_cfg->eee_cap ||
1304 phy_caps->eeer_value != phy_cfg->eeer_value ||
1305 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
1306 return false;
1307
1308 return true;
1309 }
1310
1311 /**
1312 * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
1313 * @caps: PHY ability structure to copy data from
1314 * @cfg: PHY configuration structure to copy data to
1315 *
1316 * Helper function to copy data from PHY capabilities data structure
1317 * to PHY configuration data structure
1318 */
ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data * caps,struct ixgbe_aci_cmd_set_phy_cfg_data * cfg)1319 void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
1320 struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
1321 {
1322 if (!caps || !cfg)
1323 return;
1324
1325 memset(cfg, 0, sizeof(*cfg));
1326 cfg->phy_type_low = caps->phy_type_low;
1327 cfg->phy_type_high = caps->phy_type_high;
1328 cfg->caps = caps->caps;
1329 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
1330 cfg->eee_cap = caps->eee_cap;
1331 cfg->eeer_value = caps->eeer_value;
1332 cfg->link_fec_opt = caps->link_fec_options;
1333 cfg->module_compliance_enforcement =
1334 caps->module_compliance_enforcement;
1335 }
1336
1337 /**
1338 * ixgbe_aci_set_phy_cfg - set PHY configuration
1339 * @hw: pointer to the HW struct
1340 * @cfg: structure with PHY configuration data to be set
1341 *
1342 * Set the various PHY configuration parameters supported on the Port
1343 * using ACI command (0x0601).
1344 * One or more of the Set PHY config parameters may be ignored in an MFP
1345 * mode as the PF may not have the privilege to set some of the PHY Config
1346 * parameters.
1347 *
1348 * Return: the exit code of the operation.
1349 */
ixgbe_aci_set_phy_cfg(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_set_phy_cfg_data * cfg)1350 s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
1351 struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
1352 {
1353 struct ixgbe_aci_desc desc;
1354 s32 status;
1355
1356 if (!cfg)
1357 return IXGBE_ERR_PARAM;
1358
1359 /* Ensure that only valid bits of cfg->caps can be turned on. */
1360 if (cfg->caps & ~IXGBE_ACI_PHY_ENA_VALID_MASK) {
1361 cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
1362 }
1363
1364 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
1365 desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
1366
1367 status = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
1368
1369 if (!status)
1370 hw->phy.curr_user_phy_cfg = *cfg;
1371
1372 return status;
1373 }
1374
1375 /**
1376 * ixgbe_aci_set_link_restart_an - set up link and restart AN
1377 * @hw: pointer to the HW struct
1378 * @ena_link: if true: enable link, if false: disable link
1379 *
1380 * Function sets up the link and restarts the Auto-Negotiation over the link.
1381 *
1382 * Return: the exit code of the operation.
1383 */
ixgbe_aci_set_link_restart_an(struct ixgbe_hw * hw,bool ena_link)1384 s32 ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
1385 {
1386 struct ixgbe_aci_cmd_restart_an *cmd;
1387 struct ixgbe_aci_desc desc;
1388
1389 cmd = &desc.params.restart_an;
1390
1391 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
1392
1393 cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
1394 if (ena_link)
1395 cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
1396 else
1397 cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
1398
1399 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1400 }
1401
1402 /**
1403 * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
1404 * @hw: pointer to the HW struct
1405 *
1406 * Try to identify the media type based on the phy type.
1407 * If more than one media type, the ixgbe_media_type_unknown is returned.
1408 * First, phy_type_low is checked, then phy_type_high.
1409 * If none are identified, the ixgbe_media_type_unknown is returned
1410 *
1411 * Return: type of a media based on phy type in form of enum.
1412 */
1413 static enum ixgbe_media_type
ixgbe_get_media_type_from_phy_type(struct ixgbe_hw * hw)1414 ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
1415 {
1416 struct ixgbe_link_status *hw_link_info;
1417
1418 if (!hw)
1419 return ixgbe_media_type_unknown;
1420
1421 hw_link_info = &hw->link.link_info;
1422 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
1423 /* If more than one media type is selected, report unknown */
1424 return ixgbe_media_type_unknown;
1425
1426 if (hw_link_info->phy_type_low) {
1427 /* 1G SGMII is a special case where some DA cable PHYs
1428 * may show this as an option when it really shouldn't
1429 * be since SGMII is meant to be between a MAC and a PHY
1430 * in a backplane. Try to detect this case and handle it
1431 */
1432 if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
1433 (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
1434 IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
1435 hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
1436 IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
1437 return ixgbe_media_type_da;
1438
1439 switch (hw_link_info->phy_type_low) {
1440 case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
1441 case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
1442 case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
1443 case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
1444 return ixgbe_media_type_fiber;
1445 case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
1446 return ixgbe_media_type_fiber;
1447 case IXGBE_PHY_TYPE_LOW_100BASE_TX:
1448 case IXGBE_PHY_TYPE_LOW_1000BASE_T:
1449 case IXGBE_PHY_TYPE_LOW_2500BASE_T:
1450 case IXGBE_PHY_TYPE_LOW_5GBASE_T:
1451 case IXGBE_PHY_TYPE_LOW_10GBASE_T:
1452 return ixgbe_media_type_copper;
1453 case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
1454 return ixgbe_media_type_da;
1455 case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
1456 case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
1457 case IXGBE_PHY_TYPE_LOW_2500BASE_X:
1458 case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
1459 case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
1460 case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
1461 return ixgbe_media_type_backplane;
1462 }
1463 } else {
1464 switch (hw_link_info->phy_type_high) {
1465 case IXGBE_PHY_TYPE_HIGH_10BASE_T:
1466 return ixgbe_media_type_copper;
1467 }
1468 }
1469 return ixgbe_media_type_unknown;
1470 }
1471
1472 /**
1473 * ixgbe_update_link_info - update status of the HW network link
1474 * @hw: pointer to the HW struct
1475 *
1476 * Update the status of the HW network link.
1477 *
1478 * Return: the exit code of the operation.
1479 */
ixgbe_update_link_info(struct ixgbe_hw * hw)1480 s32 ixgbe_update_link_info(struct ixgbe_hw *hw)
1481 {
1482 struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
1483 struct ixgbe_link_status *li;
1484 s32 status;
1485
1486 if (!hw)
1487 return IXGBE_ERR_PARAM;
1488
1489 li = &hw->link.link_info;
1490
1491 status = ixgbe_aci_get_link_info(hw, true, NULL);
1492 if (status)
1493 return status;
1494
1495 if (li->link_info & IXGBE_ACI_MEDIA_AVAILABLE) {
1496 pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
1497 ixgbe_malloc(hw, sizeof(*pcaps));
1498 if (!pcaps)
1499 return IXGBE_ERR_OUT_OF_MEM;
1500
1501 status = ixgbe_aci_get_phy_caps(hw, false,
1502 IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
1503 pcaps);
1504
1505 if (status == IXGBE_SUCCESS)
1506 memcpy(li->module_type, &pcaps->module_type,
1507 sizeof(li->module_type));
1508
1509 ixgbe_free(hw, pcaps);
1510 }
1511
1512 return status;
1513 }
1514
1515 /**
1516 * ixgbe_get_link_status - get status of the HW network link
1517 * @hw: pointer to the HW struct
1518 * @link_up: pointer to bool (true/false = linkup/linkdown)
1519 *
1520 * Variable link_up is true if link is up, false if link is down.
1521 * The variable link_up is invalid if status is non zero. As a
1522 * result of this call, link status reporting becomes enabled
1523 *
1524 * Return: the exit code of the operation.
1525 */
ixgbe_get_link_status(struct ixgbe_hw * hw,bool * link_up)1526 s32 ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
1527 {
1528 s32 status = IXGBE_SUCCESS;
1529
1530 if (!hw || !link_up)
1531 return IXGBE_ERR_PARAM;
1532
1533 if (hw->link.get_link_info) {
1534 status = ixgbe_update_link_info(hw);
1535 if (status) {
1536 return status;
1537 }
1538 }
1539
1540 *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
1541
1542 return status;
1543 }
1544
1545 /**
1546 * ixgbe_aci_get_link_info - get the link status
1547 * @hw: pointer to the HW struct
1548 * @ena_lse: enable/disable LinkStatusEvent reporting
1549 * @link: pointer to link status structure - optional
1550 *
1551 * Get the current Link Status using ACI command (0x607).
1552 * The current link can be optionally provided to update
1553 * the status.
1554 *
1555 * Return: the link status of the adapter.
1556 */
ixgbe_aci_get_link_info(struct ixgbe_hw * hw,bool ena_lse,struct ixgbe_link_status * link)1557 s32 ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
1558 struct ixgbe_link_status *link)
1559 {
1560 struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
1561 struct ixgbe_aci_cmd_get_link_status *resp;
1562 struct ixgbe_link_status *li_old, *li;
1563 struct ixgbe_fc_info *hw_fc_info;
1564 struct ixgbe_aci_desc desc;
1565 bool tx_pause, rx_pause;
1566 u8 cmd_flags;
1567 s32 status;
1568
1569 if (!hw)
1570 return IXGBE_ERR_PARAM;
1571
1572 li_old = &hw->link.link_info_old;
1573 li = &hw->link.link_info;
1574 hw_fc_info = &hw->fc;
1575
1576 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
1577 cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
1578 resp = &desc.params.get_link_status;
1579 resp->cmd_flags = cmd_flags;
1580
1581 status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
1582
1583 if (status != IXGBE_SUCCESS)
1584 return status;
1585
1586 /* save off old link status information */
1587 *li_old = *li;
1588
1589 /* update current link status information */
1590 li->link_speed = IXGBE_LE16_TO_CPU(link_data.link_speed);
1591 li->phy_type_low = IXGBE_LE64_TO_CPU(link_data.phy_type_low);
1592 li->phy_type_high = IXGBE_LE64_TO_CPU(link_data.phy_type_high);
1593 li->link_info = link_data.link_info;
1594 li->link_cfg_err = link_data.link_cfg_err;
1595 li->an_info = link_data.an_info;
1596 li->ext_info = link_data.ext_info;
1597 li->max_frame_size = IXGBE_LE16_TO_CPU(link_data.max_frame_size);
1598 li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
1599 li->topo_media_conflict = link_data.topo_media_conflict;
1600 li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
1601 IXGBE_ACI_CFG_PACING_TYPE_M);
1602
1603 /* update fc info */
1604 tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
1605 rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
1606 if (tx_pause && rx_pause)
1607 hw_fc_info->current_mode = ixgbe_fc_full;
1608 else if (tx_pause)
1609 hw_fc_info->current_mode = ixgbe_fc_tx_pause;
1610 else if (rx_pause)
1611 hw_fc_info->current_mode = ixgbe_fc_rx_pause;
1612 else
1613 hw_fc_info->current_mode = ixgbe_fc_none;
1614
1615 li->lse_ena = !!(resp->cmd_flags & IXGBE_ACI_LSE_IS_ENABLED);
1616
1617 /* save link status information */
1618 if (link)
1619 *link = *li;
1620
1621 /* flag cleared so calling functions don't call AQ again */
1622 hw->link.get_link_info = false;
1623
1624 return IXGBE_SUCCESS;
1625 }
1626
1627 /**
1628 * ixgbe_aci_set_event_mask - set event mask
1629 * @hw: pointer to the HW struct
1630 * @port_num: port number of the physical function
1631 * @mask: event mask to be set
1632 *
1633 * Set the event mask using ACI command (0x0613).
1634 *
1635 * Return: the exit code of the operation.
1636 */
ixgbe_aci_set_event_mask(struct ixgbe_hw * hw,u8 port_num,u16 mask)1637 s32 ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
1638 {
1639 struct ixgbe_aci_cmd_set_event_mask *cmd;
1640 struct ixgbe_aci_desc desc;
1641
1642 cmd = &desc.params.set_event_mask;
1643
1644 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
1645
1646 cmd->event_mask = IXGBE_CPU_TO_LE16(mask);
1647 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1648 }
1649
1650 /**
1651 * ixgbe_configure_lse - enable/disable link status events
1652 * @hw: pointer to the HW struct
1653 * @activate: bool value deciding if lse should be enabled nor disabled
1654 * @mask: event mask to be set; a set bit means deactivation of the
1655 * corresponding event
1656 *
1657 * Set the event mask and then enable or disable link status events
1658 *
1659 * Return: the exit code of the operation.
1660 */
ixgbe_configure_lse(struct ixgbe_hw * hw,bool activate,u16 mask)1661 s32 ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
1662 {
1663 s32 rc;
1664
1665 rc = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
1666 if (rc) {
1667 return rc;
1668 }
1669
1670 /* Enabling link status events generation by fw */
1671 rc = ixgbe_aci_get_link_info(hw, activate, NULL);
1672 if (rc) {
1673 return rc;
1674 }
1675 return IXGBE_SUCCESS;
1676 }
1677
1678 /**
1679 * ixgbe_aci_get_netlist_node - get a node handle
1680 * @hw: pointer to the hw struct
1681 * @cmd: get_link_topo AQ structure
1682 * @node_part_number: output node part number if node found
1683 * @node_handle: output node handle parameter if node found
1684 *
1685 * Get the netlist node and assigns it to
1686 * the provided handle using ACI command (0x06E0).
1687 *
1688 * Return: the exit code of the operation.
1689 */
ixgbe_aci_get_netlist_node(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_get_link_topo * cmd,u8 * node_part_number,u16 * node_handle)1690 s32 ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
1691 struct ixgbe_aci_cmd_get_link_topo *cmd,
1692 u8 *node_part_number, u16 *node_handle)
1693 {
1694 struct ixgbe_aci_desc desc;
1695
1696 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
1697 desc.params.get_link_topo = *cmd;
1698
1699 if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
1700 return IXGBE_ERR_NOT_SUPPORTED;
1701
1702 if (node_handle)
1703 *node_handle =
1704 IXGBE_LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
1705 if (node_part_number)
1706 *node_part_number = desc.params.get_link_topo.node_part_num;
1707
1708 return IXGBE_SUCCESS;
1709 }
1710
1711 /**
1712 * ixgbe_find_netlist_node - find a node handle
1713 * @hw: pointer to the hw struct
1714 * @node_type_ctx: type of netlist node to look for
1715 * @node_part_number: node part number to look for
1716 * @node_handle: output parameter if node found - optional
1717 *
1718 * Find and return the node handle for a given node type and part number in the
1719 * netlist. When found IXGBE_SUCCESS is returned, IXGBE_ERR_NOT_SUPPORTED
1720 * otherwise. If @node_handle provided, it would be set to found node handle.
1721 *
1722 * Return: the exit code of the operation.
1723 */
ixgbe_find_netlist_node(struct ixgbe_hw * hw,u8 node_type_ctx,u8 node_part_number,u16 * node_handle)1724 s32 ixgbe_find_netlist_node(struct ixgbe_hw *hw, u8 node_type_ctx,
1725 u8 node_part_number, u16 *node_handle)
1726 {
1727 struct ixgbe_aci_cmd_get_link_topo cmd;
1728 u8 rec_node_part_number;
1729 u16 rec_node_handle;
1730 s32 status;
1731 u8 idx;
1732
1733 for (idx = 0; idx < IXGBE_MAX_NETLIST_SIZE; idx++) {
1734 memset(&cmd, 0, sizeof(cmd));
1735
1736 cmd.addr.topo_params.node_type_ctx =
1737 (node_type_ctx << IXGBE_ACI_LINK_TOPO_NODE_TYPE_S);
1738 cmd.addr.topo_params.index = idx;
1739
1740 status = ixgbe_aci_get_netlist_node(hw, &cmd,
1741 &rec_node_part_number,
1742 &rec_node_handle);
1743 if (status)
1744 return status;
1745
1746 if (rec_node_part_number == node_part_number) {
1747 if (node_handle)
1748 *node_handle = rec_node_handle;
1749 return IXGBE_SUCCESS;
1750 }
1751 }
1752
1753 return IXGBE_ERR_NOT_SUPPORTED;
1754 }
1755
1756 /**
1757 * ixgbe_aci_read_i2c - read I2C register value
1758 * @hw: pointer to the hw struct
1759 * @topo_addr: topology address for a device to communicate with
1760 * @bus_addr: 7-bit I2C bus address
1761 * @addr: I2C memory address (I2C offset) with up to 16 bits
1762 * @params: I2C parameters: bit [7] - Repeated start,
1763 * bits [6:5] data offset size,
1764 * bit [4] - I2C address type, bits [3:0] - data size
1765 * to read (0-16 bytes)
1766 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
1767 *
1768 * Read the value of the I2C pin register using ACI command (0x06E2).
1769 *
1770 * Return: the exit code of the operation.
1771 */
ixgbe_aci_read_i2c(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,u8 * data)1772 s32 ixgbe_aci_read_i2c(struct ixgbe_hw *hw,
1773 struct ixgbe_aci_cmd_link_topo_addr topo_addr,
1774 u16 bus_addr, __le16 addr, u8 params, u8 *data)
1775 {
1776 struct ixgbe_aci_desc desc = { 0 };
1777 struct ixgbe_aci_cmd_i2c *cmd;
1778 u8 data_size;
1779 s32 status;
1780
1781 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_i2c);
1782 cmd = &desc.params.read_write_i2c;
1783
1784 if (!data)
1785 return IXGBE_ERR_PARAM;
1786
1787 data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
1788 IXGBE_ACI_I2C_DATA_SIZE_S;
1789
1790 cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
1791 cmd->topo_addr = topo_addr;
1792 cmd->i2c_params = params;
1793 cmd->i2c_addr = addr;
1794
1795 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1796 if (!status) {
1797 struct ixgbe_aci_cmd_read_i2c_resp *resp;
1798 u8 i;
1799
1800 resp = &desc.params.read_i2c_resp;
1801 for (i = 0; i < data_size; i++) {
1802 *data = resp->i2c_data[i];
1803 data++;
1804 }
1805 }
1806
1807 return status;
1808 }
1809
1810 /**
1811 * ixgbe_aci_write_i2c - write a value to I2C register
1812 * @hw: pointer to the hw struct
1813 * @topo_addr: topology address for a device to communicate with
1814 * @bus_addr: 7-bit I2C bus address
1815 * @addr: I2C memory address (I2C offset) with up to 16 bits
1816 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size
1817 * to write (0-7 bytes)
1818 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
1819 *
1820 * Write a value to the I2C pin register using ACI command (0x06E3).
1821 *
1822 * Return: the exit code of the operation.
1823 */
ixgbe_aci_write_i2c(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,u8 * data)1824 s32 ixgbe_aci_write_i2c(struct ixgbe_hw *hw,
1825 struct ixgbe_aci_cmd_link_topo_addr topo_addr,
1826 u16 bus_addr, __le16 addr, u8 params, u8 *data)
1827 {
1828 struct ixgbe_aci_desc desc = { 0 };
1829 struct ixgbe_aci_cmd_i2c *cmd;
1830 u8 i, data_size;
1831
1832 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_i2c);
1833 cmd = &desc.params.read_write_i2c;
1834
1835 data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
1836 IXGBE_ACI_I2C_DATA_SIZE_S;
1837
1838 /* data_size limited to 4 */
1839 if (data_size > 4)
1840 return IXGBE_ERR_PARAM;
1841
1842 cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
1843 cmd->topo_addr = topo_addr;
1844 cmd->i2c_params = params;
1845 cmd->i2c_addr = addr;
1846
1847 for (i = 0; i < data_size; i++) {
1848 cmd->i2c_data[i] = *data;
1849 data++;
1850 }
1851
1852 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1853 }
1854
1855 /**
1856 * ixgbe_aci_set_port_id_led - set LED value for the given port
1857 * @hw: pointer to the HW struct
1858 * @orig_mode: set LED original mode
1859 *
1860 * Set LED value for the given port (0x06E9)
1861 *
1862 * Return: the exit code of the operation.
1863 */
ixgbe_aci_set_port_id_led(struct ixgbe_hw * hw,bool orig_mode)1864 s32 ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode)
1865 {
1866 struct ixgbe_aci_cmd_set_port_id_led *cmd;
1867 struct ixgbe_aci_desc desc;
1868
1869 cmd = &desc.params.set_port_id_led;
1870
1871 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led);
1872
1873 cmd->lport_num = (u8)hw->bus.func;
1874 cmd->lport_num_valid = IXGBE_ACI_PORT_ID_PORT_NUM_VALID;
1875
1876 if (orig_mode)
1877 cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_ORIG;
1878 else
1879 cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_BLINK;
1880
1881 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1882 }
1883
1884 /**
1885 * ixgbe_aci_set_gpio - set GPIO pin state
1886 * @hw: pointer to the hw struct
1887 * @gpio_ctrl_handle: GPIO controller node handle
1888 * @pin_idx: IO Number of the GPIO that needs to be set
1889 * @value: SW provide IO value to set in the LSB
1890 *
1891 * Set the GPIO pin state that is a part of the topology
1892 * using ACI command (0x06EC).
1893 *
1894 * Return: the exit code of the operation.
1895 */
ixgbe_aci_set_gpio(struct ixgbe_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool value)1896 s32 ixgbe_aci_set_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
1897 bool value)
1898 {
1899 struct ixgbe_aci_cmd_gpio *cmd;
1900 struct ixgbe_aci_desc desc;
1901
1902 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_gpio);
1903 cmd = &desc.params.read_write_gpio;
1904 cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
1905 cmd->gpio_num = pin_idx;
1906 cmd->gpio_val = value ? 1 : 0;
1907
1908 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1909 }
1910
1911 /**
1912 * ixgbe_aci_get_gpio - get GPIO pin state
1913 * @hw: pointer to the hw struct
1914 * @gpio_ctrl_handle: GPIO controller node handle
1915 * @pin_idx: IO Number of the GPIO that needs to be set
1916 * @value: IO value read
1917 *
1918 * Get the value of a GPIO signal which is part of the topology
1919 * using ACI command (0x06ED).
1920 *
1921 * Return: the exit code of the operation.
1922 */
ixgbe_aci_get_gpio(struct ixgbe_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool * value)1923 s32 ixgbe_aci_get_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
1924 bool *value)
1925 {
1926 struct ixgbe_aci_cmd_gpio *cmd;
1927 struct ixgbe_aci_desc desc;
1928 s32 status;
1929
1930 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_gpio);
1931 cmd = &desc.params.read_write_gpio;
1932 cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
1933 cmd->gpio_num = pin_idx;
1934
1935 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1936 if (status)
1937 return status;
1938
1939 *value = !!cmd->gpio_val;
1940 return IXGBE_SUCCESS;
1941 }
1942
1943 /**
1944 * ixgbe_aci_sff_eeprom - read/write SFF EEPROM
1945 * @hw: pointer to the HW struct
1946 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
1947 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
1948 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
1949 * @page: QSFP page
1950 * @page_bank_ctrl: configuration of SFF/CMIS paging and banking control
1951 * @data: pointer to data buffer to be read/written to the I2C device.
1952 * @length: 1-16 for read, 1 for write.
1953 * @write: 0 read, 1 for write.
1954 *
1955 * Read/write SFF EEPROM using ACI command (0x06EE).
1956 *
1957 * Return: the exit code of the operation.
1958 */
ixgbe_aci_sff_eeprom(struct ixgbe_hw * hw,u16 lport,u8 bus_addr,u16 mem_addr,u8 page,u8 page_bank_ctrl,u8 * data,u8 length,bool write)1959 s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr,
1960 u16 mem_addr, u8 page, u8 page_bank_ctrl, u8 *data,
1961 u8 length, bool write)
1962 {
1963 struct ixgbe_aci_cmd_sff_eeprom *cmd;
1964 struct ixgbe_aci_desc desc;
1965 s32 status;
1966
1967 if (!data || (mem_addr & 0xff00))
1968 return IXGBE_ERR_PARAM;
1969
1970 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_sff_eeprom);
1971 cmd = &desc.params.read_write_sff_param;
1972 desc.flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
1973 cmd->lport_num = (u8)(lport & 0xff);
1974 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
1975 cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(((bus_addr >> 1) &
1976 IXGBE_ACI_SFF_I2CBUS_7BIT_M) |
1977 ((page_bank_ctrl <<
1978 IXGBE_ACI_SFF_PAGE_BANK_CTRL_S) &
1979 IXGBE_ACI_SFF_PAGE_BANK_CTRL_M));
1980 cmd->i2c_offset = IXGBE_CPU_TO_LE16(mem_addr & 0xff);
1981 cmd->module_page = page;
1982 if (write)
1983 cmd->i2c_bus_addr |= IXGBE_CPU_TO_LE16(IXGBE_ACI_SFF_IS_WRITE);
1984
1985 status = ixgbe_aci_send_cmd(hw, &desc, data, length);
1986 return status;
1987 }
1988
1989 /**
1990 * ixgbe_aci_prog_topo_dev_nvm - program Topology Device NVM
1991 * @hw: pointer to the hardware structure
1992 * @topo_params: pointer to structure storing topology parameters for a device
1993 *
1994 * Program Topology Device NVM using ACI command (0x06F2).
1995 *
1996 * Return: the exit code of the operation.
1997 */
ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_link_topo_params * topo_params)1998 s32 ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw *hw,
1999 struct ixgbe_aci_cmd_link_topo_params *topo_params)
2000 {
2001 struct ixgbe_aci_cmd_prog_topo_dev_nvm *cmd;
2002 struct ixgbe_aci_desc desc;
2003
2004 cmd = &desc.params.prog_topo_dev_nvm;
2005
2006 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_prog_topo_dev_nvm);
2007
2008 memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
2009
2010 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2011 }
2012
2013 /**
2014 * ixgbe_aci_read_topo_dev_nvm - read Topology Device NVM
2015 * @hw: pointer to the hardware structure
2016 * @topo_params: pointer to structure storing topology parameters for a device
2017 * @start_address: byte offset in the topology device NVM
2018 * @data: pointer to data buffer
2019 * @data_size: number of bytes to be read from the topology device NVM
2020 * Read Topology Device NVM (0x06F3)
2021 *
2022 * Read Topology of Device NVM using ACI command (0x06F3).
2023 *
2024 * Return: the exit code of the operation.
2025 */
ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_link_topo_params * topo_params,u32 start_address,u8 * data,u8 data_size)2026 s32 ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw *hw,
2027 struct ixgbe_aci_cmd_link_topo_params *topo_params,
2028 u32 start_address, u8 *data, u8 data_size)
2029 {
2030 struct ixgbe_aci_cmd_read_topo_dev_nvm *cmd;
2031 struct ixgbe_aci_desc desc;
2032 s32 status;
2033
2034 if (!data || data_size == 0 ||
2035 data_size > IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
2036 return IXGBE_ERR_PARAM;
2037
2038 cmd = &desc.params.read_topo_dev_nvm;
2039
2040 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_topo_dev_nvm);
2041
2042 desc.datalen = IXGBE_CPU_TO_LE16(data_size);
2043 memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
2044 cmd->start_address = IXGBE_CPU_TO_LE32(start_address);
2045
2046 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2047 if (status)
2048 return status;
2049
2050 memcpy(data, cmd->data_read, data_size);
2051
2052 return IXGBE_SUCCESS;
2053 }
2054
2055 /**
2056 * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
2057 * @hw: pointer to the HW structure
2058 * @access: NVM access type (read or write)
2059 *
2060 * Request NVM ownership.
2061 *
2062 * Return: the exit code of the operation.
2063 */
ixgbe_acquire_nvm(struct ixgbe_hw * hw,enum ixgbe_aci_res_access_type access)2064 s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw,
2065 enum ixgbe_aci_res_access_type access)
2066 {
2067 u32 fla;
2068
2069 /* Skip if we are in blank NVM programming mode */
2070 fla = IXGBE_READ_REG(hw, GLNVM_FLA);
2071 if ((fla & GLNVM_FLA_LOCKED_M) == 0)
2072 return IXGBE_SUCCESS;
2073
2074 return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
2075 IXGBE_NVM_TIMEOUT);
2076 }
2077
2078 /**
2079 * ixgbe_release_nvm - Generic request for releasing the NVM ownership
2080 * @hw: pointer to the HW structure
2081 *
2082 * Release NVM ownership.
2083 */
ixgbe_release_nvm(struct ixgbe_hw * hw)2084 void ixgbe_release_nvm(struct ixgbe_hw *hw)
2085 {
2086 u32 fla;
2087
2088 /* Skip if we are in blank NVM programming mode */
2089 fla = IXGBE_READ_REG(hw, GLNVM_FLA);
2090 if ((fla & GLNVM_FLA_LOCKED_M) == 0)
2091 return;
2092
2093 ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
2094 }
2095
2096
2097 /**
2098 * ixgbe_aci_read_nvm - read NVM
2099 * @hw: pointer to the HW struct
2100 * @module_typeid: module pointer location in words from the NVM beginning
2101 * @offset: byte offset from the module beginning
2102 * @length: length of the section to be read (in bytes from the offset)
2103 * @data: command buffer (size [bytes] = length)
2104 * @last_command: tells if this is the last command in a series
2105 * @read_shadow_ram: tell if this is a shadow RAM read
2106 *
2107 * Read the NVM using ACI command (0x0701).
2108 *
2109 * Return: the exit code of the operation.
2110 */
ixgbe_aci_read_nvm(struct ixgbe_hw * hw,u16 module_typeid,u32 offset,u16 length,void * data,bool last_command,bool read_shadow_ram)2111 s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
2112 u16 length, void *data, bool last_command,
2113 bool read_shadow_ram)
2114 {
2115 struct ixgbe_aci_desc desc;
2116 struct ixgbe_aci_cmd_nvm *cmd;
2117
2118 cmd = &desc.params.nvm;
2119
2120 if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
2121 return IXGBE_ERR_PARAM;
2122
2123 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
2124
2125 if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
2126 cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
2127
2128 /* If this is the last command in a series, set the proper flag. */
2129 if (last_command)
2130 cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
2131 cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
2132 cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
2133 cmd->offset_high = (offset >> 16) & 0xFF;
2134 cmd->length = IXGBE_CPU_TO_LE16(length);
2135
2136 return ixgbe_aci_send_cmd(hw, &desc, data, length);
2137 }
2138
2139 /**
2140 * ixgbe_aci_erase_nvm - erase NVM sector
2141 * @hw: pointer to the HW struct
2142 * @module_typeid: module pointer location in words from the NVM beginning
2143 *
2144 * Erase the NVM sector using the ACI command (0x0702).
2145 *
2146 * Return: the exit code of the operation.
2147 */
ixgbe_aci_erase_nvm(struct ixgbe_hw * hw,u16 module_typeid)2148 s32 ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid)
2149 {
2150 struct ixgbe_aci_desc desc;
2151 struct ixgbe_aci_cmd_nvm *cmd;
2152 s32 status;
2153 __le16 len;
2154
2155 /* read a length value from SR, so module_typeid is equal to 0 */
2156 /* calculate offset where module size is placed from bytes to words */
2157 /* set last command and read from SR values to true */
2158 status = ixgbe_aci_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true,
2159 true);
2160 if (status)
2161 return status;
2162
2163 cmd = &desc.params.nvm;
2164
2165 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_erase);
2166
2167 cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
2168 cmd->length = len;
2169 cmd->offset_low = 0;
2170 cmd->offset_high = 0;
2171
2172 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2173 }
2174
2175 /**
2176 * ixgbe_aci_update_nvm - update NVM
2177 * @hw: pointer to the HW struct
2178 * @module_typeid: module pointer location in words from the NVM beginning
2179 * @offset: byte offset from the module beginning
2180 * @length: length of the section to be written (in bytes from the offset)
2181 * @data: command buffer (size [bytes] = length)
2182 * @last_command: tells if this is the last command in a series
2183 * @command_flags: command parameters
2184 *
2185 * Update the NVM using the ACI command (0x0703).
2186 *
2187 * Return: the exit code of the operation.
2188 */
ixgbe_aci_update_nvm(struct ixgbe_hw * hw,u16 module_typeid,u32 offset,u16 length,void * data,bool last_command,u8 command_flags)2189 s32 ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
2190 u32 offset, u16 length, void *data,
2191 bool last_command, u8 command_flags)
2192 {
2193 struct ixgbe_aci_desc desc;
2194 struct ixgbe_aci_cmd_nvm *cmd;
2195
2196 cmd = &desc.params.nvm;
2197
2198 /* In offset the highest byte must be zeroed. */
2199 if (offset & 0xFF000000)
2200 return IXGBE_ERR_PARAM;
2201
2202 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_write);
2203
2204 cmd->cmd_flags |= command_flags;
2205
2206 /* If this is the last command in a series, set the proper flag. */
2207 if (last_command)
2208 cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
2209 cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
2210 cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
2211 cmd->offset_high = (offset >> 16) & 0xFF;
2212 cmd->length = IXGBE_CPU_TO_LE16(length);
2213
2214 desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
2215
2216 return ixgbe_aci_send_cmd(hw, &desc, data, length);
2217 }
2218
2219 /**
2220 * ixgbe_aci_read_nvm_cfg - read an NVM config block
2221 * @hw: pointer to the HW struct
2222 * @cmd_flags: NVM access admin command bits
2223 * @field_id: field or feature ID
2224 * @data: buffer for result
2225 * @buf_size: buffer size
2226 * @elem_count: pointer to count of elements read by FW
2227 *
2228 * Reads a single or multiple feature/field ID and data using ACI command
2229 * (0x0704).
2230 *
2231 * Return: the exit code of the operation.
2232 */
ixgbe_aci_read_nvm_cfg(struct ixgbe_hw * hw,u8 cmd_flags,u16 field_id,void * data,u16 buf_size,u16 * elem_count)2233 s32 ixgbe_aci_read_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
2234 u16 field_id, void *data, u16 buf_size,
2235 u16 *elem_count)
2236 {
2237 struct ixgbe_aci_cmd_nvm_cfg *cmd;
2238 struct ixgbe_aci_desc desc;
2239 s32 status;
2240
2241 cmd = &desc.params.nvm_cfg;
2242
2243 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_read);
2244
2245 cmd->cmd_flags = cmd_flags;
2246 cmd->id = IXGBE_CPU_TO_LE16(field_id);
2247
2248 status = ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
2249 if (!status && elem_count)
2250 *elem_count = IXGBE_LE16_TO_CPU(cmd->count);
2251
2252 return status;
2253 }
2254
2255 /**
2256 * ixgbe_aci_write_nvm_cfg - write an NVM config block
2257 * @hw: pointer to the HW struct
2258 * @cmd_flags: NVM access admin command bits
2259 * @data: buffer for result
2260 * @buf_size: buffer size
2261 * @elem_count: count of elements to be written
2262 *
2263 * Writes a single or multiple feature/field ID and data using ACI command
2264 * (0x0705).
2265 *
2266 * Return: the exit code of the operation.
2267 */
ixgbe_aci_write_nvm_cfg(struct ixgbe_hw * hw,u8 cmd_flags,void * data,u16 buf_size,u16 elem_count)2268 s32 ixgbe_aci_write_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
2269 void *data, u16 buf_size, u16 elem_count)
2270 {
2271 struct ixgbe_aci_cmd_nvm_cfg *cmd;
2272 struct ixgbe_aci_desc desc;
2273
2274 cmd = &desc.params.nvm_cfg;
2275
2276 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_write);
2277 desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
2278
2279 cmd->count = IXGBE_CPU_TO_LE16(elem_count);
2280 cmd->cmd_flags = cmd_flags;
2281
2282 return ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
2283 }
2284
2285 /**
2286 * ixgbe_nvm_validate_checksum - validate checksum
2287 * @hw: pointer to the HW struct
2288 *
2289 * Verify NVM PFA checksum validity using ACI command (0x0706).
2290 * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
2291 * The function acquires and then releases the NVM ownership.
2292 *
2293 * Return: the exit code of the operation.
2294 */
ixgbe_nvm_validate_checksum(struct ixgbe_hw * hw)2295 s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
2296 {
2297 struct ixgbe_aci_cmd_nvm_checksum *cmd;
2298 struct ixgbe_aci_desc desc;
2299 s32 status;
2300
2301 status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2302 if (status)
2303 return status;
2304
2305 cmd = &desc.params.nvm_checksum;
2306
2307 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
2308 cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
2309
2310 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2311
2312 ixgbe_release_nvm(hw);
2313
2314 if (!status)
2315 if (IXGBE_LE16_TO_CPU(cmd->checksum) !=
2316 IXGBE_ACI_NVM_CHECKSUM_CORRECT) {
2317 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
2318 "Invalid Shadow Ram checksum");
2319 status = IXGBE_ERR_NVM_CHECKSUM;
2320 }
2321
2322 return status;
2323 }
2324
2325 /**
2326 * ixgbe_nvm_recalculate_checksum - recalculate checksum
2327 * @hw: pointer to the HW struct
2328 *
2329 * Recalculate NVM PFA checksum using ACI command (0x0706).
2330 * The function acquires and then releases the NVM ownership.
2331 *
2332 * Return: the exit code of the operation.
2333 */
ixgbe_nvm_recalculate_checksum(struct ixgbe_hw * hw)2334 s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw)
2335 {
2336 struct ixgbe_aci_cmd_nvm_checksum *cmd;
2337 struct ixgbe_aci_desc desc;
2338 s32 status;
2339
2340 status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
2341 if (status)
2342 return status;
2343
2344 cmd = &desc.params.nvm_checksum;
2345
2346 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
2347 cmd->flags = IXGBE_ACI_NVM_CHECKSUM_RECALC;
2348
2349 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2350
2351 ixgbe_release_nvm(hw);
2352
2353 return status;
2354 }
2355
2356 /**
2357 * ixgbe_nvm_write_activate - NVM activate write
2358 * @hw: pointer to the HW struct
2359 * @cmd_flags: flags for write activate command
2360 * @response_flags: response indicators from firmware
2361 *
2362 * Update the control word with the required banks' validity bits
2363 * and dumps the Shadow RAM to flash using ACI command (0x0707).
2364 *
2365 * cmd_flags controls which banks to activate, the preservation level to use
2366 * when activating the NVM bank, and whether an EMP reset is required for
2367 * activation.
2368 *
2369 * Note that the 16bit cmd_flags value is split between two separate 1 byte
2370 * flag values in the descriptor.
2371 *
2372 * On successful return of the firmware command, the response_flags variable
2373 * is updated with the flags reported by firmware indicating certain status,
2374 * such as whether EMP reset is enabled.
2375 *
2376 * Return: the exit code of the operation.
2377 */
ixgbe_nvm_write_activate(struct ixgbe_hw * hw,u16 cmd_flags,u8 * response_flags)2378 s32 ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
2379 u8 *response_flags)
2380 {
2381 struct ixgbe_aci_desc desc;
2382 struct ixgbe_aci_cmd_nvm *cmd;
2383 s32 status;
2384
2385 cmd = &desc.params.nvm;
2386 ixgbe_fill_dflt_direct_cmd_desc(&desc,
2387 ixgbe_aci_opc_nvm_write_activate);
2388
2389 cmd->cmd_flags = LO_BYTE(cmd_flags);
2390 cmd->offset_high = HI_BYTE(cmd_flags);
2391
2392 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2393 if (!status && response_flags)
2394 *response_flags = cmd->cmd_flags;
2395
2396 return status;
2397 }
2398
2399 /**
2400 * ixgbe_get_flash_bank_offset - Get offset into requested flash bank
2401 * @hw: pointer to the HW structure
2402 * @bank: whether to read from the active or inactive flash bank
2403 * @module: the module to read from
2404 *
2405 * Based on the module, lookup the module offset from the beginning of the
2406 * flash.
2407 *
2408 * Return: the flash offset. Note that a value of zero is invalid and must be
2409 * treated as an error.
2410 */
ixgbe_get_flash_bank_offset(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u16 module)2411 static u32 ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw,
2412 enum ixgbe_bank_select bank,
2413 u16 module)
2414 {
2415 struct ixgbe_bank_info *banks = &hw->flash.banks;
2416 enum ixgbe_flash_bank active_bank;
2417 bool second_bank_active;
2418 u32 offset, size;
2419
2420 switch (module) {
2421 case E610_SR_1ST_NVM_BANK_PTR:
2422 offset = banks->nvm_ptr;
2423 size = banks->nvm_size;
2424 active_bank = banks->nvm_bank;
2425 break;
2426 case E610_SR_1ST_OROM_BANK_PTR:
2427 offset = banks->orom_ptr;
2428 size = banks->orom_size;
2429 active_bank = banks->orom_bank;
2430 break;
2431 case E610_SR_NETLIST_BANK_PTR:
2432 offset = banks->netlist_ptr;
2433 size = banks->netlist_size;
2434 active_bank = banks->netlist_bank;
2435 break;
2436 default:
2437 return 0;
2438 }
2439
2440 switch (active_bank) {
2441 case IXGBE_1ST_FLASH_BANK:
2442 second_bank_active = false;
2443 break;
2444 case IXGBE_2ND_FLASH_BANK:
2445 second_bank_active = true;
2446 break;
2447 default:
2448 return 0;
2449 }
2450
2451 /* The second flash bank is stored immediately following the first
2452 * bank. Based on whether the 1st or 2nd bank is active, and whether
2453 * we want the active or inactive bank, calculate the desired offset.
2454 */
2455 switch (bank) {
2456 case IXGBE_ACTIVE_FLASH_BANK:
2457 return offset + (second_bank_active ? size : 0);
2458 case IXGBE_INACTIVE_FLASH_BANK:
2459 return offset + (second_bank_active ? 0 : size);
2460 }
2461
2462 return 0;
2463 }
2464
2465 /**
2466 * ixgbe_read_flash_module - Read a word from one of the main NVM modules
2467 * @hw: pointer to the HW structure
2468 * @bank: which bank of the module to read
2469 * @module: the module to read
2470 * @offset: the offset into the module in bytes
2471 * @data: storage for the word read from the flash
2472 * @length: bytes of data to read
2473 *
2474 * Read data from the specified flash module. The bank parameter indicates
2475 * whether or not to read from the active bank or the inactive bank of that
2476 * module.
2477 *
2478 * The word will be read using flat NVM access, and relies on the
2479 * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks()
2480 * during initialization.
2481 *
2482 * Return: the exit code of the operation.
2483 */
ixgbe_read_flash_module(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u16 module,u32 offset,u8 * data,u32 length)2484 static s32 ixgbe_read_flash_module(struct ixgbe_hw *hw,
2485 enum ixgbe_bank_select bank,
2486 u16 module, u32 offset, u8 *data, u32 length)
2487 {
2488 s32 status;
2489 u32 start;
2490
2491 start = ixgbe_get_flash_bank_offset(hw, bank, module);
2492 if (!start) {
2493 return IXGBE_ERR_PARAM;
2494 }
2495
2496 status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2497 if (status)
2498 return status;
2499
2500 status = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false);
2501
2502 ixgbe_release_nvm(hw);
2503
2504 return status;
2505 }
2506
2507 /**
2508 * ixgbe_read_netlist_module - Read data from the netlist module area
2509 * @hw: pointer to the HW structure
2510 * @bank: whether to read from the active or inactive module
2511 * @offset: offset into the netlist to read from
2512 * @data: storage for returned word value
2513 *
2514 * Read a word from the specified netlist bank.
2515 *
2516 * Return: the exit code of the operation.
2517 */
ixgbe_read_netlist_module(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 offset,u16 * data)2518 static s32 ixgbe_read_netlist_module(struct ixgbe_hw *hw,
2519 enum ixgbe_bank_select bank,
2520 u32 offset, u16 *data)
2521 {
2522 __le16 data_local;
2523 s32 status;
2524
2525 status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
2526 offset * sizeof(u16),
2527 (u8 *)&data_local,
2528 sizeof(u16));
2529 if (!status)
2530 *data = IXGBE_LE16_TO_CPU(data_local);
2531
2532 return status;
2533 }
2534
2535 /**
2536 * ixgbe_read_nvm_module - Read from the active main NVM module
2537 * @hw: pointer to the HW structure
2538 * @bank: whether to read from active or inactive NVM module
2539 * @offset: offset into the NVM module to read, in words
2540 * @data: storage for returned word value
2541 *
2542 * Read the specified word from the active NVM module. This includes the CSS
2543 * header at the start of the NVM module.
2544 *
2545 * Return: the exit code of the operation.
2546 */
ixgbe_read_nvm_module(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 offset,u16 * data)2547 static s32 ixgbe_read_nvm_module(struct ixgbe_hw *hw,
2548 enum ixgbe_bank_select bank,
2549 u32 offset, u16 *data)
2550 {
2551 __le16 data_local;
2552 s32 status;
2553
2554 status = ixgbe_read_flash_module(hw, bank, E610_SR_1ST_NVM_BANK_PTR,
2555 offset * sizeof(u16),
2556 (u8 *)&data_local,
2557 sizeof(u16));
2558 if (!status)
2559 *data = IXGBE_LE16_TO_CPU(data_local);
2560
2561 return status;
2562 }
2563
2564 /**
2565 * ixgbe_get_nvm_css_hdr_len - Read the CSS header length from the
2566 * NVM CSS header
2567 * @hw: pointer to the HW struct
2568 * @bank: whether to read from the active or inactive flash bank
2569 * @hdr_len: storage for header length in words
2570 *
2571 * Read the CSS header length from the NVM CSS header and add the
2572 * Authentication header size, and then convert to words.
2573 *
2574 * Return: the exit code of the operation.
2575 */
ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 * hdr_len)2576 static s32 ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw,
2577 enum ixgbe_bank_select bank,
2578 u32 *hdr_len)
2579 {
2580 u16 hdr_len_l, hdr_len_h;
2581 u32 hdr_len_dword;
2582 s32 status;
2583
2584 status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L,
2585 &hdr_len_l);
2586 if (status)
2587 return status;
2588
2589 status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H,
2590 &hdr_len_h);
2591 if (status)
2592 return status;
2593
2594 /* CSS header length is in DWORD, so convert to words and add
2595 * authentication header size
2596 */
2597 hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
2598 *hdr_len = (hdr_len_dword * 2) + IXGBE_NVM_AUTH_HEADER_LEN;
2599
2600 return IXGBE_SUCCESS;
2601 }
2602
2603 /**
2604 * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank
2605 * @hw: pointer to the HW structure
2606 * @bank: whether to read from the active or inactive NVM module
2607 * @offset: offset into the Shadow RAM copy to read, in words
2608 * @data: storage for returned word value
2609 *
2610 * Read the specified word from the copy of the Shadow RAM found in the
2611 * specified NVM module.
2612 *
2613 * Return: the exit code of the operation.
2614 */
ixgbe_read_nvm_sr_copy(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 offset,u16 * data)2615 static s32 ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw,
2616 enum ixgbe_bank_select bank,
2617 u32 offset, u16 *data)
2618 {
2619 u32 hdr_len;
2620 s32 status;
2621
2622 status = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
2623 if (status)
2624 return status;
2625
2626 hdr_len = ROUND_UP(hdr_len, 32);
2627
2628 return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data);
2629 }
2630
2631 /**
2632 * ixgbe_get_nvm_minsrevs - Get the minsrevs values from flash
2633 * @hw: pointer to the HW struct
2634 * @minsrevs: structure to store NVM and OROM minsrev values
2635 *
2636 * Read the Minimum Security Revision TLV and extract
2637 * the revision values from the flash image
2638 * into a readable structure for processing.
2639 *
2640 * Return: the exit code of the operation.
2641 */
ixgbe_get_nvm_minsrevs(struct ixgbe_hw * hw,struct ixgbe_minsrev_info * minsrevs)2642 s32 ixgbe_get_nvm_minsrevs(struct ixgbe_hw *hw,
2643 struct ixgbe_minsrev_info *minsrevs)
2644 {
2645 struct ixgbe_aci_cmd_nvm_minsrev data;
2646 s32 status;
2647 u16 valid;
2648
2649 status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2650 if (status)
2651 return status;
2652
2653 status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID,
2654 0, sizeof(data), &data,
2655 true, false);
2656
2657 ixgbe_release_nvm(hw);
2658
2659 if (status)
2660 return status;
2661
2662 valid = IXGBE_LE16_TO_CPU(data.validity);
2663
2664 /* Extract NVM minimum security revision */
2665 if (valid & IXGBE_ACI_NVM_MINSREV_NVM_VALID) {
2666 u16 minsrev_l = IXGBE_LE16_TO_CPU(data.nvm_minsrev_l);
2667 u16 minsrev_h = IXGBE_LE16_TO_CPU(data.nvm_minsrev_h);
2668
2669 minsrevs->nvm = minsrev_h << 16 | minsrev_l;
2670 minsrevs->nvm_valid = true;
2671 }
2672
2673 /* Extract the OROM minimum security revision */
2674 if (valid & IXGBE_ACI_NVM_MINSREV_OROM_VALID) {
2675 u16 minsrev_l = IXGBE_LE16_TO_CPU(data.orom_minsrev_l);
2676 u16 minsrev_h = IXGBE_LE16_TO_CPU(data.orom_minsrev_h);
2677
2678 minsrevs->orom = minsrev_h << 16 | minsrev_l;
2679 minsrevs->orom_valid = true;
2680 }
2681
2682 return IXGBE_SUCCESS;
2683 }
2684
2685 /**
2686 * ixgbe_update_nvm_minsrevs - Update minsrevs TLV data in flash
2687 * @hw: pointer to the HW struct
2688 * @minsrevs: minimum security revision information
2689 *
2690 * Update the NVM or Option ROM minimum security revision fields in the PFA
2691 * area of the flash. Reads the minsrevs->nvm_valid and minsrevs->orom_valid
2692 * fields to determine what update is being requested. If the valid bit is not
2693 * set for that module, then the associated minsrev will be left as is.
2694 *
2695 * Return: the exit code of the operation.
2696 */
ixgbe_update_nvm_minsrevs(struct ixgbe_hw * hw,struct ixgbe_minsrev_info * minsrevs)2697 s32 ixgbe_update_nvm_minsrevs(struct ixgbe_hw *hw,
2698 struct ixgbe_minsrev_info *minsrevs)
2699 {
2700 struct ixgbe_aci_cmd_nvm_minsrev data;
2701 s32 status;
2702
2703 if (!minsrevs->nvm_valid && !minsrevs->orom_valid) {
2704 return IXGBE_ERR_PARAM;
2705 }
2706
2707 status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
2708 if (status)
2709 return status;
2710
2711 /* Get current data */
2712 status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
2713 sizeof(data), &data, true, false);
2714 if (status)
2715 goto exit_release_res;
2716
2717 if (minsrevs->nvm_valid) {
2718 data.nvm_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->nvm & 0xFFFF);
2719 data.nvm_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->nvm >> 16);
2720 data.validity |=
2721 IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_NVM_VALID);
2722 }
2723
2724 if (minsrevs->orom_valid) {
2725 data.orom_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->orom & 0xFFFF);
2726 data.orom_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->orom >> 16);
2727 data.validity |=
2728 IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_OROM_VALID);
2729 }
2730
2731 /* Update flash data */
2732 status = ixgbe_aci_update_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
2733 sizeof(data), &data, false,
2734 IXGBE_ACI_NVM_SPECIAL_UPDATE);
2735 if (status)
2736 goto exit_release_res;
2737
2738 /* Dump the Shadow RAM to the flash */
2739 status = ixgbe_nvm_write_activate(hw, 0, NULL);
2740
2741 exit_release_res:
2742 ixgbe_release_nvm(hw);
2743
2744 return status;
2745 }
2746
2747 /**
2748 * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header
2749 * @hw: pointer to the HW struct
2750 * @bank: whether to read from the active or inactive flash bank
2751 * @srev: storage for security revision
2752 *
2753 * Read the security revision out of the CSS header of the active NVM module
2754 * bank.
2755 *
2756 * Return: the exit code of the operation.
2757 */
ixgbe_get_nvm_srev(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 * srev)2758 static s32 ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
2759 enum ixgbe_bank_select bank, u32 *srev)
2760 {
2761 u16 srev_l, srev_h;
2762 s32 status;
2763
2764 status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l);
2765 if (status)
2766 return status;
2767
2768 status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h);
2769 if (status)
2770 return status;
2771
2772 *srev = srev_h << 16 | srev_l;
2773
2774 return IXGBE_SUCCESS;
2775 }
2776
2777 /**
2778 * ixgbe_get_nvm_ver_info - Read NVM version information
2779 * @hw: pointer to the HW struct
2780 * @bank: whether to read from the active or inactive flash bank
2781 * @nvm: pointer to NVM info structure
2782 *
2783 * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
2784 * in the nvm info structure.
2785 *
2786 * Return: the exit code of the operation.
2787 */
ixgbe_get_nvm_ver_info(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,struct ixgbe_nvm_info * nvm)2788 static s32 ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw,
2789 enum ixgbe_bank_select bank,
2790 struct ixgbe_nvm_info *nvm)
2791 {
2792 u16 eetrack_lo, eetrack_hi, ver;
2793 s32 status;
2794
2795 status = ixgbe_read_nvm_sr_copy(hw, bank,
2796 E610_SR_NVM_DEV_STARTER_VER, &ver);
2797 if (status) {
2798 return status;
2799 }
2800
2801 nvm->major = (ver & E610_NVM_VER_HI_MASK) >> E610_NVM_VER_HI_SHIFT;
2802 nvm->minor = (ver & E610_NVM_VER_LO_MASK) >> E610_NVM_VER_LO_SHIFT;
2803
2804 status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_LO,
2805 &eetrack_lo);
2806 if (status) {
2807 return status;
2808 }
2809 status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_HI,
2810 &eetrack_hi);
2811 if (status) {
2812 return status;
2813 }
2814
2815 nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
2816
2817 status = ixgbe_get_nvm_srev(hw, bank, &nvm->srev);
2818
2819 return IXGBE_SUCCESS;
2820 }
2821
2822 /**
2823 * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
2824 * @hw: pointer to the HW structure
2825 * @nvm: storage for Option ROM version information
2826 *
2827 * Reads the NVM EETRACK ID, Map version, and security revision of the
2828 * inactive NVM bank. Used to access version data for a pending update that
2829 * has not yet been activated.
2830 *
2831 * Return: the exit code of the operation.
2832 */
ixgbe_get_inactive_nvm_ver(struct ixgbe_hw * hw,struct ixgbe_nvm_info * nvm)2833 s32 ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
2834 {
2835 return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm);
2836 }
2837
2838 /**
2839 * ixgbe_get_active_nvm_ver - Read Option ROM version from the active bank
2840 * @hw: pointer to the HW structure
2841 * @nvm: storage for Option ROM version information
2842 *
2843 * Reads the NVM EETRACK ID, Map version, and security revision of the
2844 * active NVM bank.
2845 *
2846 * Return: the exit code of the operation.
2847 */
ixgbe_get_active_nvm_ver(struct ixgbe_hw * hw,struct ixgbe_nvm_info * nvm)2848 s32 ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
2849 {
2850 return ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, nvm);
2851 }
2852
2853 /**
2854 * ixgbe_get_netlist_info
2855 * @hw: pointer to the HW struct
2856 * @bank: whether to read from the active or inactive flash bank
2857 * @netlist: pointer to netlist version info structure
2858 *
2859 * Get the netlist version information from the requested bank. Reads the Link
2860 * Topology section to find the Netlist ID block and extract the relevant
2861 * information into the netlist version structure.
2862 *
2863 * Return: the exit code of the operation.
2864 */
ixgbe_get_netlist_info(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,struct ixgbe_netlist_info * netlist)2865 static s32 ixgbe_get_netlist_info(struct ixgbe_hw *hw,
2866 enum ixgbe_bank_select bank,
2867 struct ixgbe_netlist_info *netlist)
2868 {
2869 u16 module_id, length, node_count, i;
2870 u16 *id_blk;
2871 s32 status;
2872
2873 status = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET,
2874 &module_id);
2875 if (status)
2876 return status;
2877
2878 if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID) {
2879 return IXGBE_ERR_NVM;
2880 }
2881
2882 status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN,
2883 &length);
2884 if (status)
2885 return status;
2886
2887 /* sanity check that we have at least enough words to store the
2888 * netlist ID block
2889 */
2890 if (length < IXGBE_NETLIST_ID_BLK_SIZE) {
2891 return IXGBE_ERR_NVM;
2892 }
2893
2894 status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT,
2895 &node_count);
2896 if (status)
2897 return status;
2898 node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M;
2899
2900 id_blk = (u16 *)ixgbe_calloc(hw, IXGBE_NETLIST_ID_BLK_SIZE,
2901 sizeof(*id_blk));
2902 if (!id_blk)
2903 return IXGBE_ERR_NO_SPACE;
2904
2905 /* Read out the entire Netlist ID Block at once. */
2906 status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
2907 IXGBE_NETLIST_ID_BLK_OFFSET(node_count) * sizeof(u16),
2908 (u8 *)id_blk,
2909 IXGBE_NETLIST_ID_BLK_SIZE * sizeof(u16));
2910 if (status)
2911 goto exit_error;
2912
2913 for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++)
2914 id_blk[i] = IXGBE_LE16_TO_CPU(((__le16 *)id_blk)[i]);
2915
2916 netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
2917 id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW];
2918 netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
2919 id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW];
2920 netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
2921 id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW];
2922 netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 |
2923 id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW];
2924 netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER];
2925 /* Read the left most 4 bytes of SHA */
2926 netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
2927 id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
2928
2929 exit_error:
2930 ixgbe_free(hw, id_blk);
2931
2932 return status;
2933 }
2934
2935 /**
2936 * ixgbe_get_inactive_netlist_ver
2937 * @hw: pointer to the HW struct
2938 * @netlist: pointer to netlist version info structure
2939 *
2940 * Read the netlist version data from the inactive netlist bank. Used to
2941 * extract version data of a pending flash update in order to display the
2942 * version data.
2943 *
2944 * Return: the exit code of the operation.
2945 */
ixgbe_get_inactive_netlist_ver(struct ixgbe_hw * hw,struct ixgbe_netlist_info * netlist)2946 s32 ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
2947 struct ixgbe_netlist_info *netlist)
2948 {
2949 return ixgbe_get_netlist_info(hw, IXGBE_INACTIVE_FLASH_BANK, netlist);
2950 }
2951
2952 /**
2953 * ixgbe_read_sr_pointer - Read the value of a Shadow RAM pointer word
2954 * @hw: pointer to the HW structure
2955 * @offset: the word offset of the Shadow RAM word to read
2956 * @pointer: pointer value read from Shadow RAM
2957 *
2958 * Read the given Shadow RAM word, and convert it to a pointer value specified
2959 * in bytes. This function assumes the specified offset is a valid pointer
2960 * word.
2961 *
2962 * Each pointer word specifies whether it is stored in word size or 4KB
2963 * sector size by using the highest bit. The reported pointer value will be in
2964 * bytes, intended for flat NVM reads.
2965 *
2966 * Return: the exit code of the operation.
2967 */
ixgbe_read_sr_pointer(struct ixgbe_hw * hw,u16 offset,u32 * pointer)2968 static s32 ixgbe_read_sr_pointer(struct ixgbe_hw *hw, u16 offset, u32 *pointer)
2969 {
2970 s32 status;
2971 u16 value;
2972
2973 status = ixgbe_read_ee_aci_E610(hw, offset, &value);
2974 if (status)
2975 return status;
2976
2977 /* Determine if the pointer is in 4KB or word units */
2978 if (value & IXGBE_SR_NVM_PTR_4KB_UNITS)
2979 *pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * 4 * 1024;
2980 else
2981 *pointer = value * 2;
2982
2983 return IXGBE_SUCCESS;
2984 }
2985
2986 /**
2987 * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word
2988 * @hw: pointer to the HW structure
2989 * @offset: the word offset of the Shadow RAM to read
2990 * @size: size value read from the Shadow RAM
2991 *
2992 * Read the given Shadow RAM word, and convert it to an area size value
2993 * specified in bytes. This function assumes the specified offset is a valid
2994 * area size word.
2995 *
2996 * Each area size word is specified in 4KB sector units. This function reports
2997 * the size in bytes, intended for flat NVM reads.
2998 *
2999 * Return: the exit code of the operation.
3000 */
ixgbe_read_sr_area_size(struct ixgbe_hw * hw,u16 offset,u32 * size)3001 static s32 ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size)
3002 {
3003 s32 status;
3004 u16 value;
3005
3006 status = ixgbe_read_ee_aci_E610(hw, offset, &value);
3007 if (status)
3008 return status;
3009
3010 /* Area sizes are always specified in 4KB units */
3011 *size = value * 4 * 1024;
3012
3013 return IXGBE_SUCCESS;
3014 }
3015
3016 /**
3017 * ixgbe_discover_flash_size - Discover the available flash size.
3018 * @hw: pointer to the HW struct
3019 *
3020 * The device flash could be up to 16MB in size. However, it is possible that
3021 * the actual size is smaller. Use bisection to determine the accessible size
3022 * of flash memory.
3023 *
3024 * Return: the exit code of the operation.
3025 */
ixgbe_discover_flash_size(struct ixgbe_hw * hw)3026 static s32 ixgbe_discover_flash_size(struct ixgbe_hw *hw)
3027 {
3028 u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
3029 s32 status;
3030
3031 status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
3032 if (status)
3033 return status;
3034
3035 while ((max_size - min_size) > 1) {
3036 u32 offset = (max_size + min_size) / 2;
3037 u32 len = 1;
3038 u8 data;
3039
3040 status = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
3041 if (status == IXGBE_ERR_ACI_ERROR &&
3042 hw->aci.last_status == IXGBE_ACI_RC_EINVAL) {
3043 status = IXGBE_SUCCESS;
3044 max_size = offset;
3045 } else if (!status) {
3046 min_size = offset;
3047 } else {
3048 /* an unexpected error occurred */
3049 goto err_read_flat_nvm;
3050 }
3051 }
3052
3053 hw->flash.flash_size = max_size;
3054
3055 err_read_flat_nvm:
3056 ixgbe_release_nvm(hw);
3057
3058 return status;
3059 }
3060
3061 /**
3062 * ixgbe_determine_active_flash_banks - Discover active bank for each module
3063 * @hw: pointer to the HW struct
3064 *
3065 * Read the Shadow RAM control word and determine which banks are active for
3066 * the NVM, OROM, and Netlist modules. Also read and calculate the associated
3067 * pointer and size. These values are then cached into the ixgbe_flash_info
3068 * structure for later use in order to calculate the correct offset to read
3069 * from the active module.
3070 *
3071 * Return: the exit code of the operation.
3072 */
ixgbe_determine_active_flash_banks(struct ixgbe_hw * hw)3073 static s32 ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw)
3074 {
3075 struct ixgbe_bank_info *banks = &hw->flash.banks;
3076 u16 ctrl_word;
3077 s32 status;
3078
3079 status = ixgbe_read_ee_aci_E610(hw, E610_SR_NVM_CTRL_WORD, &ctrl_word);
3080 if (status) {
3081 return status;
3082 }
3083
3084 /* Check that the control word indicates validity */
3085 if ((ctrl_word & IXGBE_SR_CTRL_WORD_1_M) >> IXGBE_SR_CTRL_WORD_1_S !=
3086 IXGBE_SR_CTRL_WORD_VALID) {
3087 return IXGBE_ERR_CONFIG;
3088 }
3089
3090 if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK))
3091 banks->nvm_bank = IXGBE_1ST_FLASH_BANK;
3092 else
3093 banks->nvm_bank = IXGBE_2ND_FLASH_BANK;
3094
3095 if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK))
3096 banks->orom_bank = IXGBE_1ST_FLASH_BANK;
3097 else
3098 banks->orom_bank = IXGBE_2ND_FLASH_BANK;
3099
3100 if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK))
3101 banks->netlist_bank = IXGBE_1ST_FLASH_BANK;
3102 else
3103 banks->netlist_bank = IXGBE_2ND_FLASH_BANK;
3104
3105 status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_NVM_BANK_PTR,
3106 &banks->nvm_ptr);
3107 if (status) {
3108 return status;
3109 }
3110
3111 status = ixgbe_read_sr_area_size(hw, E610_SR_NVM_BANK_SIZE,
3112 &banks->nvm_size);
3113 if (status) {
3114 return status;
3115 }
3116
3117 status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_OROM_BANK_PTR,
3118 &banks->orom_ptr);
3119 if (status) {
3120 return status;
3121 }
3122
3123 status = ixgbe_read_sr_area_size(hw, E610_SR_OROM_BANK_SIZE,
3124 &banks->orom_size);
3125 if (status) {
3126 return status;
3127 }
3128
3129 status = ixgbe_read_sr_pointer(hw, E610_SR_NETLIST_BANK_PTR,
3130 &banks->netlist_ptr);
3131 if (status) {
3132 return status;
3133 }
3134
3135 status = ixgbe_read_sr_area_size(hw, E610_SR_NETLIST_BANK_SIZE,
3136 &banks->netlist_size);
3137 if (status) {
3138 return status;
3139 }
3140
3141 return IXGBE_SUCCESS;
3142 }
3143
3144 /**
3145 * ixgbe_init_nvm - initializes NVM setting
3146 * @hw: pointer to the HW struct
3147 *
3148 * Read and populate NVM settings such as Shadow RAM size,
3149 * max_timeout, and blank_nvm_mode
3150 *
3151 * Return: the exit code of the operation.
3152 */
ixgbe_init_nvm(struct ixgbe_hw * hw)3153 s32 ixgbe_init_nvm(struct ixgbe_hw *hw)
3154 {
3155 struct ixgbe_flash_info *flash = &hw->flash;
3156 u32 fla, gens_stat, status;
3157 u8 sr_size;
3158
3159 /* The SR size is stored regardless of the NVM programming mode
3160 * as the blank mode may be used in the factory line.
3161 */
3162 gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
3163 sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
3164
3165 /* Switching to words (sr_size contains power of 2) */
3166 flash->sr_words = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
3167
3168 /* Check if we are in the normal or blank NVM programming mode */
3169 fla = IXGBE_READ_REG(hw, GLNVM_FLA);
3170 if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
3171 flash->blank_nvm_mode = false;
3172 } else {
3173 /* Blank programming mode */
3174 flash->blank_nvm_mode = true;
3175 return IXGBE_ERR_NVM_BLANK_MODE;
3176 }
3177
3178 status = ixgbe_discover_flash_size(hw);
3179 if (status) {
3180 return status;
3181 }
3182
3183 status = ixgbe_determine_active_flash_banks(hw);
3184 if (status) {
3185 return status;
3186 }
3187
3188 status = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
3189 &flash->nvm);
3190 if (status) {
3191 return status;
3192 }
3193
3194 /* read the netlist version information */
3195 status = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK,
3196 &flash->netlist);
3197
3198 return IXGBE_SUCCESS;
3199 }
3200
3201 /**
3202 * ixgbe_sanitize_operate - Clear the user data
3203 * @hw: pointer to the HW struct
3204 *
3205 * Clear user data from NVM using ACI command (0x070C).
3206 *
3207 * Return: the exit code of the operation.
3208 */
ixgbe_sanitize_operate(struct ixgbe_hw * hw)3209 s32 ixgbe_sanitize_operate(struct ixgbe_hw *hw)
3210 {
3211 s32 status;
3212 u8 values;
3213
3214 u8 cmd_flags = IXGBE_ACI_SANITIZE_REQ_OPERATE |
3215 IXGBE_ACI_SANITIZE_OPERATE_SUBJECT_CLEAR;
3216
3217 status = ixgbe_sanitize_nvm(hw, cmd_flags, &values);
3218 if (status)
3219 return status;
3220 if ((!(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
3221 !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE)) ||
3222 ((values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
3223 !(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS)) ||
3224 ((values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE) &&
3225 !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS)))
3226 return IXGBE_ERR_ACI_ERROR;
3227
3228 return IXGBE_SUCCESS;
3229 }
3230
3231 /**
3232 * ixgbe_sanitize_nvm - Sanitize NVM
3233 * @hw: pointer to the HW struct
3234 * @cmd_flags: flag to the ACI command
3235 * @values: values returned from the command
3236 *
3237 * Sanitize NVM using ACI command (0x070C).
3238 *
3239 * Return: the exit code of the operation.
3240 */
ixgbe_sanitize_nvm(struct ixgbe_hw * hw,u8 cmd_flags,u8 * values)3241 s32 ixgbe_sanitize_nvm(struct ixgbe_hw *hw, u8 cmd_flags, u8 *values)
3242 {
3243 struct ixgbe_aci_desc desc;
3244 struct ixgbe_aci_cmd_nvm_sanitization *cmd;
3245 s32 status;
3246
3247 cmd = &desc.params.nvm_sanitization;
3248 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_sanitization);
3249 cmd->cmd_flags = cmd_flags;
3250
3251 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3252 if (values)
3253 *values = cmd->values;
3254
3255 return status;
3256 }
3257
3258 /**
3259 * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
3260 * @hw: pointer to the HW structure
3261 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
3262 * @data: word read from the Shadow RAM
3263 *
3264 * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
3265 *
3266 * Return: the exit code of the operation.
3267 */
ixgbe_read_sr_word_aci(struct ixgbe_hw * hw,u16 offset,u16 * data)3268 s32 ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data)
3269 {
3270 u32 bytes = sizeof(u16);
3271 __le16 data_local;
3272 s32 status;
3273
3274 status = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
3275 (u8 *)&data_local, true);
3276 if (status)
3277 return status;
3278
3279 *data = IXGBE_LE16_TO_CPU(data_local);
3280 return IXGBE_SUCCESS;
3281 }
3282
3283 /**
3284 * ixgbe_read_sr_buf_aci - Reads Shadow RAM buf via ACI
3285 * @hw: pointer to the HW structure
3286 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
3287 * @words: (in) number of words to read; (out) number of words actually read
3288 * @data: words read from the Shadow RAM
3289 *
3290 * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is
3291 * taken before reading the buffer and later released.
3292 *
3293 * Return: the exit code of the operation.
3294 */
ixgbe_read_sr_buf_aci(struct ixgbe_hw * hw,u16 offset,u16 * words,u16 * data)3295 s32 ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
3296 u16 *data)
3297 {
3298 u32 bytes = *words * 2, i;
3299 s32 status;
3300
3301 status = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
3302
3303 *words = bytes / 2;
3304
3305 for (i = 0; i < *words; i++)
3306 data[i] = IXGBE_LE16_TO_CPU(((__le16 *)data)[i]);
3307
3308 return status;
3309 }
3310
3311 /**
3312 * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
3313 * @hw: pointer to the HW struct
3314 * @offset: offset from beginning of NVM
3315 * @length: (in) number of bytes to read; (out) number of bytes actually read
3316 * @data: buffer to return data in (sized to fit the specified length)
3317 * @read_shadow_ram: if true, read from shadow RAM instead of NVM
3318 *
3319 * Reads a portion of the NVM, as a flat memory space. This function correctly
3320 * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
3321 * from being exceeded in case of Shadow RAM read requests and ensures that no
3322 * single read request exceeds the maximum 4KB read for a single admin command.
3323 *
3324 * Returns a status code on failure. Note that the data pointer may be
3325 * partially updated if some reads succeed before a failure.
3326 *
3327 * Return: the exit code of the operation.
3328 */
ixgbe_read_flat_nvm(struct ixgbe_hw * hw,u32 offset,u32 * length,u8 * data,bool read_shadow_ram)3329 s32 ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
3330 u8 *data, bool read_shadow_ram)
3331 {
3332 u32 inlen = *length;
3333 u32 bytes_read = 0;
3334 bool last_cmd;
3335 s32 status;
3336
3337 *length = 0;
3338
3339 /* Verify the length of the read if this is for the Shadow RAM */
3340 if (read_shadow_ram && ((offset + inlen) >
3341 (hw->eeprom.word_size * 2u))) {
3342 return IXGBE_ERR_PARAM;
3343 }
3344
3345 do {
3346 u32 read_size, sector_offset;
3347
3348 /* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
3349 * Additionally, a read from the Shadow RAM may not cross over
3350 * a sector boundary. Conveniently, the sector size is also 4KB.
3351 */
3352 sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
3353 read_size = MIN_T(u32,
3354 IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
3355 inlen - bytes_read);
3356
3357 last_cmd = !(bytes_read + read_size < inlen);
3358
3359 /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
3360 * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
3361 * maximum size guarantees that it will fit within the 2 bytes.
3362 */
3363 status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
3364 offset, (u16)read_size,
3365 data + bytes_read, last_cmd,
3366 read_shadow_ram);
3367 if (status)
3368 break;
3369
3370 bytes_read += read_size;
3371 offset += read_size;
3372 } while (!last_cmd);
3373
3374 *length = bytes_read;
3375 return status;
3376 }
3377
3378 /**
3379 * ixgbe_check_sr_access_params - verify params for Shadow RAM R/W operations.
3380 * @hw: pointer to the HW structure
3381 * @offset: offset in words from module start
3382 * @words: number of words to access
3383 *
3384 * Check if all the parameters are valid
3385 * before performing any Shadow RAM read/write operations.
3386 *
3387 * Return: the exit code of the operation.
3388 * * - IXGBE_SUCCESS - success.
3389 * * - IXGBE_ERR_PARAM - NVM error: offset beyond SR limit or
3390 * NVM error: tried to access more words then the set limit or
3391 * NVM error: cannot spread over two sectors.
3392 */
ixgbe_check_sr_access_params(struct ixgbe_hw * hw,u32 offset,u16 words)3393 static s32 ixgbe_check_sr_access_params(struct ixgbe_hw *hw, u32 offset,
3394 u16 words)
3395 {
3396 if ((offset + words) > hw->eeprom.word_size) {
3397 return IXGBE_ERR_PARAM;
3398 }
3399
3400 if (words > IXGBE_SR_SECTOR_SIZE_IN_WORDS) {
3401 /* We can access only up to 4KB (one sector),
3402 * in one Admin Command write
3403 */
3404 return IXGBE_ERR_PARAM;
3405 }
3406
3407 if (((offset + (words - 1)) / IXGBE_SR_SECTOR_SIZE_IN_WORDS) !=
3408 (offset / IXGBE_SR_SECTOR_SIZE_IN_WORDS)) {
3409 /* A single access cannot spread over two sectors */
3410 return IXGBE_ERR_PARAM;
3411 }
3412
3413 return IXGBE_SUCCESS;
3414 }
3415
3416 /**
3417 * ixgbe_write_sr_word_aci - Writes Shadow RAM word
3418 * @hw: pointer to the HW structure
3419 * @offset: offset of the Shadow RAM word to write
3420 * @data: word to write to the Shadow RAM
3421 *
3422 * Writes a 16 bit word to the Shadow RAM using the admin command.
3423 * NVM ownership must be acquired before calling this function and released
3424 * by a caller. To commit SR to NVM update checksum function should be called.
3425 *
3426 * Return: the exit code of the operation.
3427 */
ixgbe_write_sr_word_aci(struct ixgbe_hw * hw,u32 offset,const u16 * data)3428 s32 ixgbe_write_sr_word_aci(struct ixgbe_hw *hw, u32 offset, const u16 *data)
3429 {
3430 __le16 data_local = IXGBE_CPU_TO_LE16(*data);
3431 s32 status;
3432
3433 status = ixgbe_check_sr_access_params(hw, offset, 1);
3434 if (!status)
3435 status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
3436 BYTES_PER_WORD, &data_local,
3437 false, 0);
3438
3439 return status;
3440 }
3441
3442 /**
3443 * ixgbe_write_sr_buf_aci - Writes Shadow RAM buf
3444 * @hw: pointer to the HW structure
3445 * @offset: offset of the Shadow RAM buffer to write
3446 * @words: number of words to write
3447 * @data: words to write to the Shadow RAM
3448 *
3449 * Writes a 16 bit word to the Shadow RAM using the admin command.
3450 * NVM ownership must be acquired before calling this function and released
3451 * by a caller. To commit SR to NVM update checksum function should be called.
3452 *
3453 * Return: the exit code of the operation.
3454 */
ixgbe_write_sr_buf_aci(struct ixgbe_hw * hw,u32 offset,u16 words,const u16 * data)3455 s32 ixgbe_write_sr_buf_aci(struct ixgbe_hw *hw, u32 offset, u16 words,
3456 const u16 *data)
3457 {
3458 __le16 *data_local;
3459 s32 status;
3460 void *vmem;
3461 u32 i;
3462
3463 vmem = ixgbe_calloc(hw, words, sizeof(u16));
3464 if (!vmem)
3465 return IXGBE_ERR_OUT_OF_MEM;
3466 data_local = (__le16 *)vmem;
3467
3468 for (i = 0; i < words; i++)
3469 data_local[i] = IXGBE_CPU_TO_LE16(data[i]);
3470
3471 /* Here we will only write one buffer as the size of the modules
3472 * mirrored in the Shadow RAM is always less than 4K.
3473 */
3474 status = ixgbe_check_sr_access_params(hw, offset, words);
3475 if (!status)
3476 status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
3477 BYTES_PER_WORD * words,
3478 data_local, false, 0);
3479
3480 ixgbe_free(hw, vmem);
3481
3482 return status;
3483 }
3484
3485 /**
3486 * ixgbe_aci_alternate_write - write to alternate structure
3487 * @hw: pointer to the hardware structure
3488 * @reg_addr0: address of first dword to be written
3489 * @reg_val0: value to be written under 'reg_addr0'
3490 * @reg_addr1: address of second dword to be written
3491 * @reg_val1: value to be written under 'reg_addr1'
3492 *
3493 * Write one or two dwords to alternate structure using ACI command (0x0900).
3494 * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
3495 *
3496 * Return: 0 on success and error code on failure.
3497 */
ixgbe_aci_alternate_write(struct ixgbe_hw * hw,u32 reg_addr0,u32 reg_val0,u32 reg_addr1,u32 reg_val1)3498 s32 ixgbe_aci_alternate_write(struct ixgbe_hw *hw, u32 reg_addr0,
3499 u32 reg_val0, u32 reg_addr1, u32 reg_val1)
3500 {
3501 struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
3502 struct ixgbe_aci_desc desc;
3503 s32 status;
3504
3505 cmd = &desc.params.read_write_alt_direct;
3506
3507 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_alt_direct);
3508 cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
3509 cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
3510 cmd->dword0_value = IXGBE_CPU_TO_LE32(reg_val0);
3511 cmd->dword1_value = IXGBE_CPU_TO_LE32(reg_val1);
3512
3513 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3514
3515 return status;
3516 }
3517
3518 /**
3519 * ixgbe_aci_alternate_read - read from alternate structure
3520 * @hw: pointer to the hardware structure
3521 * @reg_addr0: address of first dword to be read
3522 * @reg_val0: pointer for data read from 'reg_addr0'
3523 * @reg_addr1: address of second dword to be read
3524 * @reg_val1: pointer for data read from 'reg_addr1'
3525 *
3526 * Read one or two dwords from alternate structure using ACI command (0x0902).
3527 * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
3528 * If 'reg_val1' pointer is not passed then only register at 'reg_addr0'
3529 * is read.
3530 *
3531 * Return: 0 on success and error code on failure.
3532 */
ixgbe_aci_alternate_read(struct ixgbe_hw * hw,u32 reg_addr0,u32 * reg_val0,u32 reg_addr1,u32 * reg_val1)3533 s32 ixgbe_aci_alternate_read(struct ixgbe_hw *hw, u32 reg_addr0,
3534 u32 *reg_val0, u32 reg_addr1, u32 *reg_val1)
3535 {
3536 struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
3537 struct ixgbe_aci_desc desc;
3538 s32 status;
3539
3540 cmd = &desc.params.read_write_alt_direct;
3541
3542 if (!reg_val0)
3543 return IXGBE_ERR_PARAM;
3544
3545 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_alt_direct);
3546 cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
3547 cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
3548
3549 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3550
3551 if (status == IXGBE_SUCCESS) {
3552 *reg_val0 = IXGBE_LE32_TO_CPU(cmd->dword0_value);
3553
3554 if (reg_val1)
3555 *reg_val1 = IXGBE_LE32_TO_CPU(cmd->dword1_value);
3556 }
3557
3558 return status;
3559 }
3560
3561 /**
3562 * ixgbe_aci_alternate_write_done - check if writing to alternate structure
3563 * is done
3564 * @hw: pointer to the HW structure.
3565 * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
3566 * @reset_needed: indicates the SW should trigger GLOBAL reset
3567 *
3568 * Indicates to the FW that alternate structures have been changed.
3569 *
3570 * Return: 0 on success and error code on failure.
3571 */
ixgbe_aci_alternate_write_done(struct ixgbe_hw * hw,u8 bios_mode,bool * reset_needed)3572 s32 ixgbe_aci_alternate_write_done(struct ixgbe_hw *hw, u8 bios_mode,
3573 bool *reset_needed)
3574 {
3575 struct ixgbe_aci_cmd_done_alt_write *cmd;
3576 struct ixgbe_aci_desc desc;
3577 s32 status;
3578
3579 cmd = &desc.params.done_alt_write;
3580
3581 if (!reset_needed)
3582 return IXGBE_ERR_PARAM;
3583
3584 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_done_alt_write);
3585 cmd->flags = bios_mode;
3586
3587 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3588 if (!status)
3589 *reset_needed = (IXGBE_LE16_TO_CPU(cmd->flags) &
3590 IXGBE_ACI_RESP_RESET_NEEDED) != 0;
3591
3592 return status;
3593 }
3594
3595 /**
3596 * ixgbe_aci_alternate_clear - clear alternate structure
3597 * @hw: pointer to the HW structure.
3598 *
3599 * Clear the alternate structures of the port from which the function
3600 * is called.
3601 *
3602 * Return: 0 on success and error code on failure.
3603 */
ixgbe_aci_alternate_clear(struct ixgbe_hw * hw)3604 s32 ixgbe_aci_alternate_clear(struct ixgbe_hw *hw)
3605 {
3606 struct ixgbe_aci_desc desc;
3607 s32 status;
3608
3609 ixgbe_fill_dflt_direct_cmd_desc(&desc,
3610 ixgbe_aci_opc_clear_port_alt_write);
3611
3612 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3613
3614 return status;
3615 }
3616
3617 /**
3618 * ixgbe_aci_get_internal_data - get internal FW/HW data
3619 * @hw: pointer to the hardware structure
3620 * @cluster_id: specific cluster to dump
3621 * @table_id: table ID within cluster
3622 * @start: index of line in the block to read
3623 * @buf: dump buffer
3624 * @buf_size: dump buffer size
3625 * @ret_buf_size: return buffer size (returned by FW)
3626 * @ret_next_cluster: next cluster to read (returned by FW)
3627 * @ret_next_table: next block to read (returned by FW)
3628 * @ret_next_index: next index to read (returned by FW)
3629 *
3630 * Get internal FW/HW data using ACI command (0xFF08) for debug purposes.
3631 *
3632 * Return: the exit code of the operation.
3633 */
ixgbe_aci_get_internal_data(struct ixgbe_hw * hw,u16 cluster_id,u16 table_id,u32 start,void * buf,u16 buf_size,u16 * ret_buf_size,u16 * ret_next_cluster,u16 * ret_next_table,u32 * ret_next_index)3634 s32 ixgbe_aci_get_internal_data(struct ixgbe_hw *hw, u16 cluster_id,
3635 u16 table_id, u32 start, void *buf,
3636 u16 buf_size, u16 *ret_buf_size,
3637 u16 *ret_next_cluster, u16 *ret_next_table,
3638 u32 *ret_next_index)
3639 {
3640 struct ixgbe_aci_cmd_debug_dump_internals *cmd;
3641 struct ixgbe_aci_desc desc;
3642 s32 status;
3643
3644 cmd = &desc.params.debug_dump;
3645
3646 if (buf_size == 0 || !buf)
3647 return IXGBE_ERR_PARAM;
3648
3649 ixgbe_fill_dflt_direct_cmd_desc(&desc,
3650 ixgbe_aci_opc_debug_dump_internals);
3651
3652 cmd->cluster_id = IXGBE_CPU_TO_LE16(cluster_id);
3653 cmd->table_id = IXGBE_CPU_TO_LE16(table_id);
3654 cmd->idx = IXGBE_CPU_TO_LE32(start);
3655
3656 status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
3657
3658 if (!status) {
3659 if (ret_buf_size)
3660 *ret_buf_size = IXGBE_LE16_TO_CPU(desc.datalen);
3661 if (ret_next_cluster)
3662 *ret_next_cluster = IXGBE_LE16_TO_CPU(cmd->cluster_id);
3663 if (ret_next_table)
3664 *ret_next_table = IXGBE_LE16_TO_CPU(cmd->table_id);
3665 if (ret_next_index)
3666 *ret_next_index = IXGBE_LE32_TO_CPU(cmd->idx);
3667 }
3668
3669 return status;
3670 }
3671
3672 /**
3673 * ixgbe_validate_nvm_rw_reg - Check that an NVM access request is valid
3674 * @cmd: NVM access command structure
3675 *
3676 * Validates that an NVM access structure is request to read or write a valid
3677 * register offset. First validates that the module and flags are correct, and
3678 * then ensures that the register offset is one of the accepted registers.
3679 *
3680 * Return: 0 if the register access is valid, out of range error code otherwise.
3681 */
3682 static s32
ixgbe_validate_nvm_rw_reg(struct ixgbe_nvm_access_cmd * cmd)3683 ixgbe_validate_nvm_rw_reg(struct ixgbe_nvm_access_cmd *cmd)
3684 {
3685 u16 i;
3686
3687 switch (cmd->offset) {
3688 case GL_HICR:
3689 case GL_HICR_EN: /* Note, this register is read only */
3690 case GL_FWSTS:
3691 case GL_MNG_FWSM:
3692 case GLNVM_GENS:
3693 case GLNVM_FLA:
3694 case GL_FWRESETCNT:
3695 return 0;
3696 default:
3697 break;
3698 }
3699
3700 for (i = 0; i <= GL_HIDA_MAX_INDEX; i++)
3701 if (cmd->offset == (u32)GL_HIDA(i))
3702 return 0;
3703
3704 for (i = 0; i <= GL_HIBA_MAX_INDEX; i++)
3705 if (cmd->offset == (u32)GL_HIBA(i))
3706 return 0;
3707
3708 /* All other register offsets are not valid */
3709 return IXGBE_ERR_OUT_OF_RANGE;
3710 }
3711
3712 /**
3713 * ixgbe_nvm_access_read - Handle an NVM read request
3714 * @hw: pointer to the HW struct
3715 * @cmd: NVM access command to process
3716 * @data: storage for the register value read
3717 *
3718 * Process an NVM access request to read a register.
3719 *
3720 * Return: 0 if the register read is valid and successful,
3721 * out of range error code otherwise.
3722 */
ixgbe_nvm_access_read(struct ixgbe_hw * hw,struct ixgbe_nvm_access_cmd * cmd,struct ixgbe_nvm_access_data * data)3723 static s32 ixgbe_nvm_access_read(struct ixgbe_hw *hw,
3724 struct ixgbe_nvm_access_cmd *cmd,
3725 struct ixgbe_nvm_access_data *data)
3726 {
3727 s32 status;
3728
3729 /* Always initialize the output data, even on failure */
3730 memset(&data->regval, 0, cmd->data_size);
3731
3732 /* Make sure this is a valid read/write access request */
3733 status = ixgbe_validate_nvm_rw_reg(cmd);
3734 if (status)
3735 return status;
3736
3737 DEBUGOUT1("NVM access: reading register %08x\n", cmd->offset);
3738
3739 /* Read the register and store the contents in the data field */
3740 data->regval = IXGBE_READ_REG(hw, cmd->offset);
3741
3742 return 0;
3743 }
3744
3745 /**
3746 * ixgbe_nvm_access_write - Handle an NVM write request
3747 * @hw: pointer to the HW struct
3748 * @cmd: NVM access command to process
3749 * @data: NVM access data to write
3750 *
3751 * Process an NVM access request to write a register.
3752 *
3753 * Return: 0 if the register write is valid and successful,
3754 * out of range error code otherwise.
3755 */
ixgbe_nvm_access_write(struct ixgbe_hw * hw,struct ixgbe_nvm_access_cmd * cmd,struct ixgbe_nvm_access_data * data)3756 static s32 ixgbe_nvm_access_write(struct ixgbe_hw *hw,
3757 struct ixgbe_nvm_access_cmd *cmd,
3758 struct ixgbe_nvm_access_data *data)
3759 {
3760 s32 status;
3761
3762 /* Make sure this is a valid read/write access request */
3763 status = ixgbe_validate_nvm_rw_reg(cmd);
3764 if (status)
3765 return status;
3766
3767 /* Reject requests to write to read-only registers */
3768 switch (cmd->offset) {
3769 case GL_HICR_EN:
3770 return IXGBE_ERR_OUT_OF_RANGE;
3771 default:
3772 break;
3773 }
3774
3775 DEBUGOUT2("NVM access: writing register %08x with value %08x\n",
3776 cmd->offset, data->regval);
3777
3778 /* Write the data field to the specified register */
3779 IXGBE_WRITE_REG(hw, cmd->offset, data->regval);
3780
3781 return 0;
3782 }
3783
3784 /**
3785 * ixgbe_handle_nvm_access - Handle an NVM access request
3786 * @hw: pointer to the HW struct
3787 * @cmd: NVM access command info
3788 * @data: pointer to read or return data
3789 *
3790 * Process an NVM access request. Read the command structure information and
3791 * determine if it is valid. If not, report an error indicating the command
3792 * was invalid.
3793 *
3794 * For valid commands, perform the necessary function, copying the data into
3795 * the provided data buffer.
3796 *
3797 * Return: 0 if the nvm access request is valid and successful,
3798 * error code otherwise.
3799 */
ixgbe_handle_nvm_access(struct ixgbe_hw * hw,struct ixgbe_nvm_access_cmd * cmd,struct ixgbe_nvm_access_data * data)3800 s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
3801 struct ixgbe_nvm_access_cmd *cmd,
3802 struct ixgbe_nvm_access_data *data)
3803 {
3804 switch (cmd->command) {
3805 case IXGBE_NVM_CMD_READ:
3806 return ixgbe_nvm_access_read(hw, cmd, data);
3807 case IXGBE_NVM_CMD_WRITE:
3808 return ixgbe_nvm_access_write(hw, cmd, data);
3809 default:
3810 return IXGBE_ERR_PARAM;
3811 }
3812 }
3813
3814 /**
3815 * ixgbe_aci_set_health_status_config - Configure FW health events
3816 * @hw: pointer to the HW struct
3817 * @event_source: type of diagnostic events to enable
3818 *
3819 * Configure the health status event types that the firmware will send to this
3820 * PF using ACI command (0xFF20). The supported event types are: PF-specific,
3821 * all PFs, and global.
3822 *
3823 * Return: the exit code of the operation.
3824 */
ixgbe_aci_set_health_status_config(struct ixgbe_hw * hw,u8 event_source)3825 s32 ixgbe_aci_set_health_status_config(struct ixgbe_hw *hw, u8 event_source)
3826 {
3827 struct ixgbe_aci_cmd_set_health_status_config *cmd;
3828 struct ixgbe_aci_desc desc;
3829
3830 cmd = &desc.params.set_health_status_config;
3831
3832 ixgbe_fill_dflt_direct_cmd_desc(&desc,
3833 ixgbe_aci_opc_set_health_status_config);
3834
3835 cmd->event_source = event_source;
3836
3837 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3838 }
3839
3840 /**
3841 * ixgbe_init_ops_E610 - Inits func ptrs and MAC type
3842 * @hw: pointer to hardware structure
3843 *
3844 * Initialize the function pointers and assign the MAC type for E610.
3845 * Does not touch the hardware.
3846 *
3847 * Return: the exit code of the operation.
3848 */
ixgbe_init_ops_E610(struct ixgbe_hw * hw)3849 s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw)
3850 {
3851 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
3852 struct ixgbe_mac_info *mac = &hw->mac;
3853 struct ixgbe_phy_info *phy = &hw->phy;
3854 s32 ret_val;
3855
3856 ret_val = ixgbe_init_ops_X550(hw);
3857
3858 /* MAC */
3859 mac->ops.reset_hw = ixgbe_reset_hw_E610;
3860 mac->ops.start_hw = ixgbe_start_hw_E610;
3861 mac->ops.get_media_type = ixgbe_get_media_type_E610;
3862 mac->ops.get_supported_physical_layer =
3863 ixgbe_get_supported_physical_layer_E610;
3864 mac->ops.get_san_mac_addr = NULL;
3865 mac->ops.set_san_mac_addr = NULL;
3866 mac->ops.get_wwn_prefix = NULL;
3867 mac->ops.setup_link = ixgbe_setup_link_E610;
3868 mac->ops.check_link = ixgbe_check_link_E610;
3869 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_E610;
3870 mac->ops.setup_fc = ixgbe_setup_fc_E610;
3871 mac->ops.fc_autoneg = ixgbe_fc_autoneg_E610;
3872 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_E610;
3873 mac->ops.disable_rx = ixgbe_disable_rx_E610;
3874 mac->ops.setup_eee = ixgbe_setup_eee_E610;
3875 mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_E610;
3876 mac->ops.fw_rollback_mode = ixgbe_fw_rollback_mode_E610;
3877 mac->ops.get_fw_tsam_mode = ixgbe_get_fw_tsam_mode_E610;
3878 mac->ops.get_fw_version = ixgbe_aci_get_fw_ver;
3879 mac->ops.get_nvm_version = ixgbe_get_active_nvm_ver;
3880 mac->ops.get_thermal_sensor_data = NULL;
3881 mac->ops.init_thermal_sensor_thresh = NULL;
3882
3883 /* PHY */
3884 phy->ops.init = ixgbe_init_phy_ops_E610;
3885 phy->ops.identify = ixgbe_identify_phy_E610;
3886 phy->eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
3887 IXGBE_LINK_SPEED_100_FULL |
3888 IXGBE_LINK_SPEED_1GB_FULL;
3889 phy->eee_speeds_advertised = phy->eee_speeds_supported;
3890
3891 /* Additional ops overrides for e610 to go here */
3892 eeprom->ops.init_params = ixgbe_init_eeprom_params_E610;
3893 eeprom->ops.read = ixgbe_read_ee_aci_E610;
3894 eeprom->ops.read_buffer = ixgbe_read_ee_aci_buffer_E610;
3895 eeprom->ops.write = ixgbe_write_ee_aci_E610;
3896 eeprom->ops.write_buffer = ixgbe_write_ee_aci_buffer_E610;
3897 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_E610;
3898 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_E610;
3899 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_E610;
3900 eeprom->ops.read_pba_string = ixgbe_read_pba_string_E610;
3901
3902 /* Initialize bus function number */
3903 hw->mac.ops.set_lan_id(hw);
3904
3905 return ret_val;
3906 }
3907
3908 /**
3909 * ixgbe_reset_hw_E610 - Perform hardware reset
3910 * @hw: pointer to hardware structure
3911 *
3912 * Resets the hardware by resetting the transmit and receive units, masks
3913 * and clears all interrupts, and perform a reset.
3914 *
3915 * Return: the exit code of the operation.
3916 */
ixgbe_reset_hw_E610(struct ixgbe_hw * hw)3917 s32 ixgbe_reset_hw_E610(struct ixgbe_hw *hw)
3918 {
3919 u32 swfw_mask = hw->phy.phy_semaphore_mask;
3920 u32 ctrl, i;
3921 s32 status;
3922
3923 DEBUGFUNC("ixgbe_reset_hw_E610");
3924
3925 /* Call adapter stop to disable tx/rx and clear interrupts */
3926 status = hw->mac.ops.stop_adapter(hw);
3927 if (status != IXGBE_SUCCESS)
3928 goto reset_hw_out;
3929
3930 /* flush pending Tx transactions */
3931 ixgbe_clear_tx_pending(hw);
3932
3933 status = hw->phy.ops.init(hw);
3934 if (status != IXGBE_SUCCESS)
3935 DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
3936 status);
3937 mac_reset_top:
3938 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
3939 if (status != IXGBE_SUCCESS) {
3940 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
3941 "semaphore failed with %d", status);
3942 return IXGBE_ERR_SWFW_SYNC;
3943 }
3944 ctrl = IXGBE_CTRL_RST;
3945 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
3946 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
3947 IXGBE_WRITE_FLUSH(hw);
3948 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
3949
3950 /* Poll for reset bit to self-clear indicating reset is complete */
3951 for (i = 0; i < 10; i++) {
3952 usec_delay(1);
3953 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
3954 if (!(ctrl & IXGBE_CTRL_RST_MASK))
3955 break;
3956 }
3957
3958 if (ctrl & IXGBE_CTRL_RST_MASK) {
3959 status = IXGBE_ERR_RESET_FAILED;
3960 ERROR_REPORT1(IXGBE_ERROR_POLLING,
3961 "Reset polling failed to complete.\n");
3962 }
3963 msec_delay(100);
3964
3965 /*
3966 * Double resets are required for recovery from certain error
3967 * conditions. Between resets, it is necessary to stall to allow time
3968 * for any pending HW events to complete.
3969 */
3970 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
3971 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3972 goto mac_reset_top;
3973 }
3974
3975 /* Set the Rx packet buffer size. */
3976 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
3977
3978 /* Store the permanent mac address */
3979 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
3980
3981 /*
3982 * Store MAC address from RAR0, clear receive address registers, and
3983 * clear the multicast table. Also reset num_rar_entries to 128,
3984 * since we modify this value when programming the SAN MAC address.
3985 */
3986 hw->mac.num_rar_entries = 128;
3987 hw->mac.ops.init_rx_addrs(hw);
3988
3989 reset_hw_out:
3990 return status;
3991 }
3992
3993 /**
3994 * ixgbe_start_hw_E610 - Prepare hardware for Tx/Rx
3995 * @hw: pointer to hardware structure
3996 *
3997 * Gets firmware version and if API version matches it
3998 * starts the hardware using the generic start_hw function
3999 * and the generation start_hw function.
4000 * Then performs revision-specific operations, if any.
4001 **/
ixgbe_start_hw_E610(struct ixgbe_hw * hw)4002 s32 ixgbe_start_hw_E610(struct ixgbe_hw *hw)
4003 {
4004 s32 ret_val = IXGBE_SUCCESS;
4005
4006 ret_val = hw->mac.ops.get_fw_version(hw);
4007 if (ret_val)
4008 goto out;
4009
4010 ret_val = ixgbe_start_hw_generic(hw);
4011 if (ret_val != IXGBE_SUCCESS)
4012 goto out;
4013
4014 ixgbe_start_hw_gen2(hw);
4015
4016 out:
4017 return ret_val;
4018 }
4019
4020 /**
4021 * ixgbe_get_media_type_E610 - Gets media type
4022 * @hw: pointer to the HW struct
4023 *
4024 * In order to get the media type, the function gets PHY
4025 * capabilities and later on use them to identify the PHY type
4026 * checking phy_type_high and phy_type_low.
4027 *
4028 * Return: the type of media in form of ixgbe_media_type enum
4029 * or ixgbe_media_type_unknown in case of an error.
4030 */
ixgbe_get_media_type_E610(struct ixgbe_hw * hw)4031 enum ixgbe_media_type ixgbe_get_media_type_E610(struct ixgbe_hw *hw)
4032 {
4033 struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
4034 u64 phy_mask = 0;
4035 s32 rc;
4036 u8 i;
4037
4038 rc = ixgbe_update_link_info(hw);
4039 if (rc) {
4040 return ixgbe_media_type_unknown;
4041 }
4042
4043 /* If there is no link but PHY (dongle) is available SW should use
4044 * Get PHY Caps admin command instead of Get Link Status, find most
4045 * significant bit that is set in PHY types reported by the command
4046 * and use it to discover media type.
4047 */
4048 if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
4049 (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
4050 /* Get PHY Capabilities */
4051 rc = ixgbe_aci_get_phy_caps(hw, false,
4052 IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
4053 &pcaps);
4054 if (rc) {
4055 return ixgbe_media_type_unknown;
4056 }
4057
4058 /* Check if there is some bit set in phy_type_high */
4059 for (i = 64; i > 0; i--) {
4060 phy_mask = (u64)((u64)1 << (i - 1));
4061 if ((pcaps.phy_type_high & phy_mask) != 0) {
4062 /* If any bit is set treat it as PHY type */
4063 hw->link.link_info.phy_type_high = phy_mask;
4064 hw->link.link_info.phy_type_low = 0;
4065 break;
4066 }
4067 phy_mask = 0;
4068 }
4069
4070 /* If nothing found in phy_type_high search in phy_type_low */
4071 if (phy_mask == 0) {
4072 for (i = 64; i > 0; i--) {
4073 phy_mask = (u64)((u64)1 << (i - 1));
4074 if ((pcaps.phy_type_low & phy_mask) != 0) {
4075 /* If any bit is set treat it as PHY type */
4076 hw->link.link_info.phy_type_high = 0;
4077 hw->link.link_info.phy_type_low = phy_mask;
4078 break;
4079 }
4080 }
4081 }
4082
4083 }
4084
4085 /* Based on link status or search above try to discover media type */
4086 hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
4087
4088 return hw->phy.media_type;
4089 }
4090
4091 /**
4092 * ixgbe_get_supported_physical_layer_E610 - Returns physical layer type
4093 * @hw: pointer to hardware structure
4094 *
4095 * Determines physical layer capabilities of the current configuration.
4096 *
4097 * Return: the exit code of the operation.
4098 **/
ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw * hw)4099 u64 ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw *hw)
4100 {
4101 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
4102 struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
4103 u64 phy_type;
4104 s32 rc;
4105
4106 rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
4107 &pcaps);
4108 if (rc)
4109 return IXGBE_PHYSICAL_LAYER_UNKNOWN;
4110
4111 phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_low);
4112 if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_T)
4113 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
4114 if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_T)
4115 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
4116 if(phy_type & IXGBE_PHY_TYPE_LOW_100BASE_TX)
4117 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
4118 if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_LR)
4119 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_LR;
4120 if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_SR)
4121 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_SR;
4122 if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
4123 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
4124 if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
4125 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
4126 if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_SX)
4127 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_SX;
4128 if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_KX)
4129 physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_KX;
4130 if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_T)
4131 physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_T;
4132 if(phy_type & IXGBE_PHY_TYPE_LOW_5GBASE_T)
4133 physical_layer |= IXGBE_PHYSICAL_LAYER_5000BASE_T;
4134
4135 phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_high);
4136 if(phy_type & IXGBE_PHY_TYPE_HIGH_10BASE_T)
4137 physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
4138
4139 return physical_layer;
4140 }
4141
4142 /**
4143 * ixgbe_setup_link_E610 - Set up link
4144 * @hw: pointer to hardware structure
4145 * @speed: new link speed
4146 * @autoneg_wait: true when waiting for completion is needed
4147 *
4148 * Set up the link with the specified speed.
4149 *
4150 * Return: the exit code of the operation.
4151 */
ixgbe_setup_link_E610(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)4152 s32 ixgbe_setup_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
4153 bool autoneg_wait)
4154 {
4155 /* Simply request FW to perform proper PHY setup */
4156 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
4157 }
4158
4159 /**
4160 * ixgbe_check_link_E610 - Determine link and speed status
4161 * @hw: pointer to hardware structure
4162 * @speed: pointer to link speed
4163 * @link_up: true when link is up
4164 * @link_up_wait_to_complete: bool used to wait for link up or not
4165 *
4166 * Determine if the link is up and the current link speed
4167 * using ACI command (0x0607).
4168 *
4169 * Return: the exit code of the operation.
4170 */
ixgbe_check_link_E610(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)4171 s32 ixgbe_check_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4172 bool *link_up, bool link_up_wait_to_complete)
4173 {
4174 s32 rc;
4175 u32 i;
4176
4177 if (!speed || !link_up)
4178 return IXGBE_ERR_PARAM;
4179
4180 /* Set get_link_info flag to ensure that fresh
4181 * link information will be obtained from FW
4182 * by sending Get Link Status admin command. */
4183 hw->link.get_link_info = true;
4184
4185 /* Update link information in adapter context. */
4186 rc = ixgbe_get_link_status(hw, link_up);
4187 if (rc)
4188 return rc;
4189
4190 /* Wait for link up if it was requested. */
4191 if (link_up_wait_to_complete && *link_up == false) {
4192 for (i = 0; i < hw->mac.max_link_up_time; i++) {
4193 msec_delay(100);
4194 hw->link.get_link_info = true;
4195 rc = ixgbe_get_link_status(hw, link_up);
4196 if (rc)
4197 return rc;
4198 if (*link_up)
4199 break;
4200 }
4201 }
4202
4203 /* Use link information in adapter context updated by the call
4204 * to ixgbe_get_link_status() to determine current link speed.
4205 * Link speed information is valid only when link up was
4206 * reported by FW. */
4207 if (*link_up) {
4208 switch (hw->link.link_info.link_speed) {
4209 case IXGBE_ACI_LINK_SPEED_10MB:
4210 *speed = IXGBE_LINK_SPEED_10_FULL;
4211 break;
4212 case IXGBE_ACI_LINK_SPEED_100MB:
4213 *speed = IXGBE_LINK_SPEED_100_FULL;
4214 break;
4215 case IXGBE_ACI_LINK_SPEED_1000MB:
4216 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4217 break;
4218 case IXGBE_ACI_LINK_SPEED_2500MB:
4219 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4220 break;
4221 case IXGBE_ACI_LINK_SPEED_5GB:
4222 *speed = IXGBE_LINK_SPEED_5GB_FULL;
4223 break;
4224 case IXGBE_ACI_LINK_SPEED_10GB:
4225 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4226 break;
4227 default:
4228 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4229 break;
4230 }
4231 } else {
4232 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4233 }
4234
4235 return IXGBE_SUCCESS;
4236 }
4237
4238 /**
4239 * ixgbe_get_link_capabilities_E610 - Determine link capabilities
4240 * @hw: pointer to hardware structure
4241 * @speed: pointer to link speed
4242 * @autoneg: true when autoneg or autotry is enabled
4243 *
4244 * Determine speed and AN parameters of a link.
4245 *
4246 * Return: the exit code of the operation.
4247 */
ixgbe_get_link_capabilities_E610(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)4248 s32 ixgbe_get_link_capabilities_E610(struct ixgbe_hw *hw,
4249 ixgbe_link_speed *speed,
4250 bool *autoneg)
4251 {
4252 if (!speed || !autoneg)
4253 return IXGBE_ERR_PARAM;
4254
4255 *autoneg = true;
4256 *speed = hw->phy.speeds_supported;
4257
4258 return IXGBE_SUCCESS;
4259 }
4260
4261 /**
4262 * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
4263 * @hw: pointer to hardware structure
4264 * @cfg: PHY configuration data to set FC mode
4265 * @req_mode: FC mode to configure
4266 *
4267 * Configures PHY Flow Control according to the provided configuration.
4268 *
4269 * Return: the exit code of the operation.
4270 */
ixgbe_cfg_phy_fc(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_set_phy_cfg_data * cfg,enum ixgbe_fc_mode req_mode)4271 s32 ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
4272 struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
4273 enum ixgbe_fc_mode req_mode)
4274 {
4275 struct ixgbe_aci_cmd_get_phy_caps_data* pcaps = NULL;
4276 s32 status = IXGBE_SUCCESS;
4277 u8 pause_mask = 0x0;
4278
4279 if (!cfg)
4280 return IXGBE_ERR_PARAM;
4281
4282 switch (req_mode) {
4283 case ixgbe_fc_auto:
4284 {
4285 pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
4286 ixgbe_malloc(hw, sizeof(*pcaps));
4287 if (!pcaps) {
4288 status = IXGBE_ERR_OUT_OF_MEM;
4289 goto out;
4290 }
4291
4292 /* Query the value of FC that both the NIC and the attached
4293 * media can do. */
4294 status = ixgbe_aci_get_phy_caps(hw, false,
4295 IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, pcaps);
4296 if (status)
4297 goto out;
4298
4299 pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
4300 pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
4301
4302 break;
4303 }
4304 case ixgbe_fc_full:
4305 pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
4306 pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
4307 break;
4308 case ixgbe_fc_rx_pause:
4309 pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
4310 break;
4311 case ixgbe_fc_tx_pause:
4312 pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
4313 break;
4314 default:
4315 break;
4316 }
4317
4318 /* clear the old pause settings */
4319 cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
4320 IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
4321
4322 /* set the new capabilities */
4323 cfg->caps |= pause_mask;
4324
4325 out:
4326 if (pcaps)
4327 ixgbe_free(hw, pcaps);
4328 return status;
4329 }
4330
4331 /**
4332 * ixgbe_setup_fc_E610 - Set up flow control
4333 * @hw: pointer to hardware structure
4334 *
4335 * Set up flow control. This has to be done during init time.
4336 *
4337 * Return: the exit code of the operation.
4338 */
ixgbe_setup_fc_E610(struct ixgbe_hw * hw)4339 s32 ixgbe_setup_fc_E610(struct ixgbe_hw *hw)
4340 {
4341 struct ixgbe_aci_cmd_get_phy_caps_data pcaps = { 0 };
4342 struct ixgbe_aci_cmd_set_phy_cfg_data cfg = { 0 };
4343 s32 status;
4344
4345 /* Get the current PHY config */
4346 status = ixgbe_aci_get_phy_caps(hw, false,
4347 IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
4348 if (status)
4349 return status;
4350
4351 ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
4352
4353 /* Configure the set PHY data */
4354 status = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
4355 if (status)
4356 return status;
4357
4358 /* If the capabilities have changed, then set the new config */
4359 if (cfg.caps != pcaps.caps) {
4360 cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
4361
4362 status = ixgbe_aci_set_phy_cfg(hw, &cfg);
4363 if (status)
4364 return status;
4365 }
4366
4367 return status;
4368 }
4369
4370 /**
4371 * ixgbe_fc_autoneg_E610 - Configure flow control
4372 * @hw: pointer to hardware structure
4373 *
4374 * Configure Flow Control.
4375 */
ixgbe_fc_autoneg_E610(struct ixgbe_hw * hw)4376 void ixgbe_fc_autoneg_E610(struct ixgbe_hw *hw)
4377 {
4378 s32 status;
4379
4380 /* Get current link status.
4381 * Current FC mode will be stored in the hw context. */
4382 status = ixgbe_aci_get_link_info(hw, false, NULL);
4383 if (status) {
4384 goto out;
4385 }
4386
4387 /* Check if the link is up */
4388 if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)) {
4389 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4390 goto out;
4391 }
4392
4393 /* Check if auto-negotiation has completed */
4394 if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED)) {
4395 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4396 goto out;
4397 }
4398
4399 out:
4400 if (status == IXGBE_SUCCESS) {
4401 hw->fc.fc_was_autonegged = true;
4402 } else {
4403 hw->fc.fc_was_autonegged = false;
4404 hw->fc.current_mode = hw->fc.requested_mode;
4405 }
4406 }
4407
4408 /**
4409 * ixgbe_set_fw_drv_ver_E610 - Send driver version to FW
4410 * @hw: pointer to the HW structure
4411 * @maj: driver version major number
4412 * @minor: driver version minor number
4413 * @build: driver version build number
4414 * @sub: driver version sub build number
4415 * @len: length of driver_ver string
4416 * @driver_ver: driver string
4417 *
4418 * Send driver version number to Firmware using ACI command (0x0002).
4419 *
4420 * Return: the exit code of the operation.
4421 * IXGBE_SUCCESS - OK
4422 * IXGBE_ERR_PARAM - incorrect parameters were given
4423 * IXGBE_ERR_ACI_ERROR - encountered an error during sending the command
4424 * IXGBE_ERR_ACI_TIMEOUT - a timeout occurred
4425 * IXGBE_ERR_OUT_OF_MEM - ran out of memory
4426 */
ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw * hw,u8 maj,u8 minor,u8 build,u8 sub,u16 len,const char * driver_ver)4427 s32 ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw *hw, u8 maj, u8 minor, u8 build,
4428 u8 sub, u16 len, const char *driver_ver)
4429 {
4430 size_t limited_len = min(len, (u16)IXGBE_DRV_VER_STR_LEN_E610);
4431 struct ixgbe_driver_ver dv;
4432
4433 DEBUGFUNC("ixgbe_set_fw_drv_ver_E610");
4434
4435 if (!len || !driver_ver)
4436 return IXGBE_ERR_PARAM;
4437
4438 dv.major_ver = maj;
4439 dv.minor_ver = minor;
4440 dv.build_ver = build;
4441 dv.subbuild_ver = sub;
4442
4443 memset(dv.driver_string, 0, IXGBE_DRV_VER_STR_LEN_E610);
4444 memcpy(dv.driver_string, driver_ver, limited_len);
4445
4446 return ixgbe_aci_send_driver_ver(hw, &dv);
4447 }
4448
4449 /**
4450 * ixgbe_disable_rx_E610 - Disable RX unit
4451 * @hw: pointer to hardware structure
4452 *
4453 * Disable RX DMA unit on E610 with use of ACI command (0x000C).
4454 *
4455 * Return: the exit code of the operation.
4456 */
ixgbe_disable_rx_E610(struct ixgbe_hw * hw)4457 void ixgbe_disable_rx_E610(struct ixgbe_hw *hw)
4458 {
4459 u32 rxctrl;
4460
4461 DEBUGFUNC("ixgbe_disable_rx_E610");
4462
4463 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4464 if (rxctrl & IXGBE_RXCTRL_RXEN) {
4465 u32 pfdtxgswc;
4466 s32 status;
4467
4468 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4469 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
4470 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
4471 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4472 hw->mac.set_lben = true;
4473 } else {
4474 hw->mac.set_lben = false;
4475 }
4476
4477 status = ixgbe_aci_disable_rxen(hw);
4478
4479 /* If we fail - disable RX using register write */
4480 if (status) {
4481 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4482 if (rxctrl & IXGBE_RXCTRL_RXEN) {
4483 rxctrl &= ~IXGBE_RXCTRL_RXEN;
4484 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
4485 }
4486 }
4487 }
4488 }
4489
4490 /**
4491 * ixgbe_setup_eee_E610 - Enable/disable EEE support
4492 * @hw: pointer to the HW structure
4493 * @enable_eee: boolean flag to enable EEE
4494 *
4495 * Enables/disable EEE based on enable_eee flag.
4496 *
4497 * Return: the exit code of the operation.
4498 */
ixgbe_setup_eee_E610(struct ixgbe_hw * hw,bool enable_eee)4499 s32 ixgbe_setup_eee_E610(struct ixgbe_hw *hw, bool enable_eee)
4500 {
4501 struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
4502 struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
4503 u16 eee_cap = 0;
4504 s32 status;
4505
4506 status = ixgbe_aci_get_phy_caps(hw, false,
4507 IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
4508 if (status != IXGBE_SUCCESS)
4509 return status;
4510
4511 ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
4512
4513 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
4514 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
4515
4516 if (enable_eee) {
4517 if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX)
4518 eee_cap |= IXGBE_ACI_PHY_EEE_EN_100BASE_TX;
4519 if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T)
4520 eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_T;
4521 if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
4522 eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_KX;
4523 if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T)
4524 eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_T;
4525 if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
4526 eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_KR;
4527 if (phy_caps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T)
4528 eee_cap |= IXGBE_ACI_PHY_EEE_EN_10BASE_T;
4529 }
4530
4531 /* Set EEE capability for particular PHY types */
4532 phy_cfg.eee_cap = IXGBE_CPU_TO_LE16(eee_cap);
4533
4534 status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
4535
4536 return status;
4537 }
4538
4539 /**
4540 * ixgbe_fw_recovery_mode_E610 - Check FW NVM recovery mode
4541 * @hw: pointer to hardware structure
4542 *
4543 * Checks FW NVM recovery mode by
4544 * reading the value of the dedicated register.
4545 *
4546 * Return: true if FW is in recovery mode, otherwise false.
4547 */
ixgbe_fw_recovery_mode_E610(struct ixgbe_hw * hw)4548 bool ixgbe_fw_recovery_mode_E610(struct ixgbe_hw *hw)
4549 {
4550 u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
4551
4552 return !!(fwsm & GL_MNG_FWSM_FW_MODES_RECOVERY_M);
4553 }
4554
4555 /**
4556 * ixgbe_fw_rollback_mode_E610 - Check FW NVM Rollback
4557 * @hw: pointer to hardware structure
4558 *
4559 * Checks FW NVM Rollback mode by reading the
4560 * value of the dedicated register.
4561 *
4562 * Return: true if FW is in Rollback mode, otherwise false.
4563 */
ixgbe_fw_rollback_mode_E610(struct ixgbe_hw * hw)4564 bool ixgbe_fw_rollback_mode_E610(struct ixgbe_hw *hw)
4565 {
4566 u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
4567
4568 return !!(fwsm & GL_MNG_FWSM_FW_MODES_ROLLBACK_M);
4569 }
4570
4571 /**
4572 * ixgbe_get_fw_tsam_mode_E610 - Check FW NVM Thermal Sensor Autonomous Mode
4573 * @hw: pointer to hardware structure
4574 *
4575 * Checks Thermal Sensor Autonomous Mode by reading the
4576 * value of the dedicated register.
4577 *
4578 * Return: true if FW is in TSAM, otherwise false.
4579 */
ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw * hw)4580 bool ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw *hw)
4581 {
4582 u32 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_X550EM_a);
4583
4584 return !!(fwsm & IXGBE_FWSM_TS_ENABLED);
4585 }
4586
4587 /**
4588 * ixgbe_init_phy_ops_E610 - PHY specific init
4589 * @hw: pointer to hardware structure
4590 *
4591 * Initialize any function pointers that were not able to be
4592 * set during init_shared_code because the PHY type was not known.
4593 *
4594 * Return: the exit code of the operation.
4595 */
ixgbe_init_phy_ops_E610(struct ixgbe_hw * hw)4596 s32 ixgbe_init_phy_ops_E610(struct ixgbe_hw *hw)
4597 {
4598 struct ixgbe_mac_info *mac = &hw->mac;
4599 struct ixgbe_phy_info *phy = &hw->phy;
4600 s32 ret_val;
4601
4602 phy->ops.identify_sfp = ixgbe_identify_module_E610;
4603 phy->ops.read_reg = NULL; /* PHY reg access is not required */
4604 phy->ops.write_reg = NULL;
4605 phy->ops.read_reg_mdi = NULL;
4606 phy->ops.write_reg_mdi = NULL;
4607 phy->ops.setup_link = ixgbe_setup_phy_link_E610;
4608 phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_E610;
4609 phy->ops.read_i2c_byte = NULL; /* disabled for E610 */
4610 phy->ops.write_i2c_byte = NULL; /* disabled for E610 */
4611 phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_E610;
4612 phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_E610;
4613 phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_E610;
4614 phy->ops.i2c_bus_clear = NULL; /* do not use generic implementation */
4615 phy->ops.check_overtemp = ixgbe_check_overtemp_E610;
4616 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
4617 phy->ops.set_phy_power = ixgbe_set_phy_power_E610;
4618 else
4619 phy->ops.set_phy_power = NULL;
4620 phy->ops.enter_lplu = ixgbe_enter_lplu_E610;
4621 phy->ops.handle_lasi = NULL; /* no implementation for E610 */
4622 phy->ops.read_i2c_byte_unlocked = NULL; /* disabled for E610 */
4623 phy->ops.write_i2c_byte_unlocked = NULL; /* disabled for E610 */
4624
4625 /* TODO: Set functions pointers based on device ID */
4626
4627 /* Identify the PHY */
4628 ret_val = phy->ops.identify(hw);
4629 if (ret_val != IXGBE_SUCCESS)
4630 return ret_val;
4631
4632 /* TODO: Set functions pointers based on PHY type */
4633
4634 return ret_val;
4635 }
4636
4637 /**
4638 * ixgbe_identify_phy_E610 - Identify PHY
4639 * @hw: pointer to hardware structure
4640 *
4641 * Determine PHY type, supported speeds and PHY ID.
4642 *
4643 * Return: the exit code of the operation.
4644 */
ixgbe_identify_phy_E610(struct ixgbe_hw * hw)4645 s32 ixgbe_identify_phy_E610(struct ixgbe_hw *hw)
4646 {
4647 struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
4648 s32 rc;
4649
4650 /* Set PHY type */
4651 hw->phy.type = ixgbe_phy_fw;
4652
4653 rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
4654 &pcaps);
4655 if (rc)
4656 return rc;
4657
4658 if (!(pcaps.module_compliance_enforcement &
4659 IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
4660 /* Handle lenient mode */
4661 rc = ixgbe_aci_get_phy_caps(hw, false,
4662 IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
4663 &pcaps);
4664 if (rc)
4665 return rc;
4666 }
4667
4668 /* Determine supported speeds */
4669 hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
4670
4671 if (pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
4672 pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
4673 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
4674 if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
4675 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
4676 pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
4677 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
4678 if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T ||
4679 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
4680 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
4681 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
4682 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII ||
4683 pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
4684 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
4685 if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T ||
4686 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA ||
4687 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR ||
4688 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR ||
4689 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
4690 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
4691 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C ||
4692 pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
4693 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
4694
4695 /* 2.5 and 5 Gbps link speeds must be excluded from the
4696 * auto-negotiation set used during driver initialization due to
4697 * compatibility issues with certain switches. Those issues do not
4698 * exist in case of E610 2.5G SKU device (0x57b1).
4699 */
4700 if (!hw->phy.autoneg_advertised &&
4701 hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
4702 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
4703
4704 if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
4705 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
4706 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
4707 pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
4708 pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
4709 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
4710
4711 if (!hw->phy.autoneg_advertised &&
4712 hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
4713 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
4714
4715 if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
4716 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
4717 pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
4718 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
4719
4720 /* Set PHY ID */
4721 memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
4722
4723 return IXGBE_SUCCESS;
4724 }
4725
4726 /**
4727 * ixgbe_identify_module_E610 - Identify SFP module type
4728 * @hw: pointer to hardware structure
4729 *
4730 * Identify the SFP module type.
4731 *
4732 * Return: the exit code of the operation.
4733 */
ixgbe_identify_module_E610(struct ixgbe_hw * hw)4734 s32 ixgbe_identify_module_E610(struct ixgbe_hw *hw)
4735 {
4736 bool media_available;
4737 u8 module_type;
4738 s32 rc;
4739
4740 rc = ixgbe_update_link_info(hw);
4741 if (rc)
4742 goto err;
4743
4744 media_available =
4745 (hw->link.link_info.link_info &
4746 IXGBE_ACI_MEDIA_AVAILABLE) ? true : false;
4747
4748 if (media_available) {
4749 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
4750
4751 /* Get module type from hw context updated by ixgbe_update_link_info() */
4752 module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
4753
4754 if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
4755 (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
4756 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
4757 } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
4758 hw->phy.sfp_type = ixgbe_sfp_type_sr;
4759 } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
4760 (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
4761 hw->phy.sfp_type = ixgbe_sfp_type_lr;
4762 }
4763 rc = IXGBE_SUCCESS;
4764 } else {
4765 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
4766 rc = IXGBE_ERR_SFP_NOT_PRESENT;
4767 }
4768 err:
4769 return rc;
4770 }
4771
4772 /**
4773 * ixgbe_setup_phy_link_E610 - Sets up firmware-controlled PHYs
4774 * @hw: pointer to hardware structure
4775 *
4776 * Set the parameters for the firmware-controlled PHYs.
4777 *
4778 * Return: the exit code of the operation.
4779 */
ixgbe_setup_phy_link_E610(struct ixgbe_hw * hw)4780 s32 ixgbe_setup_phy_link_E610(struct ixgbe_hw *hw)
4781 {
4782 struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
4783 struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
4784 u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
4785 u64 sup_phy_type_low, sup_phy_type_high;
4786 s32 rc;
4787
4788 rc = ixgbe_aci_get_link_info(hw, false, NULL);
4789 if (rc) {
4790 goto err;
4791 }
4792
4793 /* If media is not available get default config */
4794 if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
4795 rmode = IXGBE_ACI_REPORT_DFLT_CFG;
4796
4797 rc = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
4798 if (rc) {
4799 goto err;
4800 }
4801
4802 sup_phy_type_low = pcaps.phy_type_low;
4803 sup_phy_type_high = pcaps.phy_type_high;
4804
4805 /* Get Active configuration to avoid unintended changes */
4806 rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
4807 &pcaps);
4808 if (rc) {
4809 goto err;
4810 }
4811 ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
4812
4813 /* Set default PHY types for a given speed */
4814 pcfg.phy_type_low = 0;
4815 pcfg.phy_type_high = 0;
4816
4817 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
4818 pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
4819 pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
4820 }
4821 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
4822 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
4823 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
4824 pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
4825 }
4826 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
4827 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
4828 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
4829 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
4830 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
4831 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
4832 pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
4833 }
4834 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
4835 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
4836 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
4837 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
4838 pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
4839 pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
4840 }
4841 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
4842 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
4843 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
4844 pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
4845 }
4846 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
4847 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
4848 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
4849 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
4850 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
4851 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
4852 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
4853 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
4854 pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
4855 }
4856
4857 /* Mask the set values to avoid requesting unsupported link types */
4858 pcfg.phy_type_low &= sup_phy_type_low;
4859 pcfg.phy_type_high &= sup_phy_type_high;
4860
4861 if (pcfg.phy_type_high != pcaps.phy_type_high ||
4862 pcfg.phy_type_low != pcaps.phy_type_low ||
4863 pcfg.caps != pcaps.caps) {
4864 pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
4865 pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
4866
4867 rc = ixgbe_aci_set_phy_cfg(hw, &pcfg);
4868 }
4869
4870 err:
4871 return rc;
4872 }
4873
4874 /**
4875 * ixgbe_get_phy_firmware_version_E610 - Gets the PHY Firmware Version
4876 * @hw: pointer to hardware structure
4877 * @firmware_version: pointer to the PHY Firmware Version
4878 *
4879 * Determines PHY FW version based on response to Get PHY Capabilities
4880 * admin command (0x0600).
4881 *
4882 * Return: the exit code of the operation.
4883 */
ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw * hw,u16 * firmware_version)4884 s32 ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw *hw,
4885 u16 *firmware_version)
4886 {
4887 struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
4888 s32 status;
4889
4890 if (!firmware_version)
4891 return IXGBE_ERR_PARAM;
4892
4893 status = ixgbe_aci_get_phy_caps(hw, false,
4894 IXGBE_ACI_REPORT_ACTIVE_CFG,
4895 &pcaps);
4896 if (status)
4897 return status;
4898
4899 /* TODO: determine which bytes of the 8-byte phy_fw_ver
4900 * field should be written to the 2-byte firmware_version
4901 * output argument. */
4902 memcpy(firmware_version, pcaps.phy_fw_ver, sizeof(u16));
4903
4904 return IXGBE_SUCCESS;
4905 }
4906
4907 /**
4908 * ixgbe_read_i2c_sff8472_E610 - Reads 8 bit word over I2C interface
4909 * @hw: pointer to hardware structure
4910 * @byte_offset: byte offset at address 0xA2
4911 * @sff8472_data: value read
4912 *
4913 * Performs byte read operation from SFP module's SFF-8472 data over I2C.
4914 *
4915 * Return: the exit code of the operation.
4916 **/
ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw * hw,u8 byte_offset,u8 * sff8472_data)4917 s32 ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw *hw, u8 byte_offset,
4918 u8 *sff8472_data)
4919 {
4920 return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR2,
4921 byte_offset, 0,
4922 IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
4923 sff8472_data, 1, false);
4924 }
4925
4926 /**
4927 * ixgbe_read_i2c_eeprom_E610 - Reads 8 bit EEPROM word over I2C interface
4928 * @hw: pointer to hardware structure
4929 * @byte_offset: EEPROM byte offset to read
4930 * @eeprom_data: value read
4931 *
4932 * Performs byte read operation from SFP module's EEPROM over I2C interface.
4933 *
4934 * Return: the exit code of the operation.
4935 **/
ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw * hw,u8 byte_offset,u8 * eeprom_data)4936 s32 ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
4937 u8 *eeprom_data)
4938 {
4939 return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
4940 byte_offset, 0,
4941 IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
4942 eeprom_data, 1, false);
4943 }
4944
4945 /**
4946 * ixgbe_write_i2c_eeprom_E610 - Writes 8 bit EEPROM word over I2C interface
4947 * @hw: pointer to hardware structure
4948 * @byte_offset: EEPROM byte offset to write
4949 * @eeprom_data: value to write
4950 *
4951 * Performs byte write operation to SFP module's EEPROM over I2C interface.
4952 *
4953 * Return: the exit code of the operation.
4954 **/
ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw * hw,u8 byte_offset,u8 eeprom_data)4955 s32 ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
4956 u8 eeprom_data)
4957 {
4958 return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
4959 byte_offset, 0,
4960 IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
4961 &eeprom_data, 1, true);
4962 }
4963
4964 /**
4965 * ixgbe_check_overtemp_E610 - Check firmware-controlled PHYs for overtemp
4966 * @hw: pointer to hardware structure
4967 *
4968 * Get the link status and check if the PHY temperature alarm detected.
4969 *
4970 * Return: the exit code of the operation.
4971 */
ixgbe_check_overtemp_E610(struct ixgbe_hw * hw)4972 s32 ixgbe_check_overtemp_E610(struct ixgbe_hw *hw)
4973 {
4974 struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
4975 struct ixgbe_aci_cmd_get_link_status *resp;
4976 struct ixgbe_aci_desc desc;
4977 s32 status = IXGBE_SUCCESS;
4978
4979 if (!hw)
4980 return IXGBE_ERR_PARAM;
4981
4982 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
4983 resp = &desc.params.get_link_status;
4984 resp->cmd_flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_LSE_NOP);
4985
4986 status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
4987 if (status != IXGBE_SUCCESS)
4988 return status;
4989
4990 if (link_data.ext_info & IXGBE_ACI_LINK_PHY_TEMP_ALARM) {
4991 ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4992 "PHY Temperature Alarm detected");
4993 status = IXGBE_ERR_OVERTEMP;
4994 }
4995
4996 return status;
4997 }
4998
4999 /**
5000 * ixgbe_set_phy_power_E610 - Control power for copper PHY
5001 * @hw: pointer to hardware structure
5002 * @on: true for on, false for off
5003 *
5004 * Set the power on/off of the PHY
5005 * by getting its capabilities and setting the appropriate
5006 * configuration parameters.
5007 *
5008 * Return: the exit code of the operation.
5009 */
ixgbe_set_phy_power_E610(struct ixgbe_hw * hw,bool on)5010 s32 ixgbe_set_phy_power_E610(struct ixgbe_hw *hw, bool on)
5011 {
5012 struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
5013 struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
5014 s32 status;
5015
5016 status = ixgbe_aci_get_phy_caps(hw, false,
5017 IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
5018 if (status != IXGBE_SUCCESS)
5019 return status;
5020
5021 ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
5022
5023 if (on) {
5024 phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
5025 } else {
5026 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
5027 }
5028
5029 /* PHY is already in requested power mode */
5030 if (phy_caps.caps == phy_cfg.caps)
5031 return IXGBE_SUCCESS;
5032
5033 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
5034 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
5035
5036 status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
5037
5038 return status;
5039 }
5040
5041 /**
5042 * ixgbe_enter_lplu_E610 - Transition to low power states
5043 * @hw: pointer to hardware structure
5044 *
5045 * Configures Low Power Link Up on transition to low power states
5046 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
5047 * X557 PHY immediately prior to entering LPLU.
5048 *
5049 * Return: the exit code of the operation.
5050 */
ixgbe_enter_lplu_E610(struct ixgbe_hw * hw)5051 s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw)
5052 {
5053 struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
5054 struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
5055 s32 status;
5056
5057 status = ixgbe_aci_get_phy_caps(hw, false,
5058 IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
5059 if (status != IXGBE_SUCCESS)
5060 return status;
5061
5062 ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
5063
5064 phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
5065
5066 status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
5067
5068 return status;
5069 }
5070
5071 /**
5072 * ixgbe_init_eeprom_params_E610 - Initialize EEPROM params
5073 * @hw: pointer to hardware structure
5074 *
5075 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
5076 * ixgbe_hw struct in order to set up EEPROM access.
5077 *
5078 * Return: the exit code of the operation.
5079 */
ixgbe_init_eeprom_params_E610(struct ixgbe_hw * hw)5080 s32 ixgbe_init_eeprom_params_E610(struct ixgbe_hw *hw)
5081 {
5082 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
5083 u32 gens_stat;
5084 u8 sr_size;
5085
5086 if (eeprom->type == ixgbe_eeprom_uninitialized) {
5087 eeprom->type = ixgbe_flash;
5088
5089 gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
5090 sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >>
5091 GLNVM_GENS_SR_SIZE_S;
5092
5093 /* Switching to words (sr_size contains power of 2) */
5094 eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
5095
5096 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
5097 eeprom->type, eeprom->word_size);
5098 }
5099
5100 return IXGBE_SUCCESS;
5101 }
5102
5103 /**
5104 * ixgbe_read_ee_aci_E610 - Read EEPROM word using the admin command.
5105 * @hw: pointer to hardware structure
5106 * @offset: offset of word in the EEPROM to read
5107 * @data: word read from the EEPROM
5108 *
5109 * Reads a 16 bit word from the EEPROM using the ACI.
5110 * If the EEPROM params are not initialized, the function
5111 * initialize them before proceeding with reading.
5112 * The function acquires and then releases the NVM ownership.
5113 *
5114 * Return: the exit code of the operation.
5115 */
ixgbe_read_ee_aci_E610(struct ixgbe_hw * hw,u16 offset,u16 * data)5116 s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data)
5117 {
5118 s32 status;
5119
5120 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5121 status = ixgbe_init_eeprom_params(hw);
5122 if (status)
5123 return status;
5124 }
5125
5126 status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
5127 if (status)
5128 return status;
5129
5130 status = ixgbe_read_sr_word_aci(hw, offset, data);
5131 ixgbe_release_nvm(hw);
5132
5133 return status;
5134 }
5135
5136 /**
5137 * ixgbe_read_ee_aci_buffer_E610- Read EEPROM word(s) using admin commands.
5138 * @hw: pointer to hardware structure
5139 * @offset: offset of word in the EEPROM to read
5140 * @words: number of words
5141 * @data: word(s) read from the EEPROM
5142 *
5143 * Reads a 16 bit word(s) from the EEPROM using the ACI.
5144 * If the EEPROM params are not initialized, the function
5145 * initialize them before proceeding with reading.
5146 * The function acquires and then releases the NVM ownership.
5147 *
5148 * Return: the exit code of the operation.
5149 */
ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)5150 s32 ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
5151 u16 words, u16 *data)
5152 {
5153 s32 status;
5154
5155 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5156 status = ixgbe_init_eeprom_params(hw);
5157 if (status)
5158 return status;
5159 }
5160
5161 status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
5162 if (status)
5163 return status;
5164
5165 status = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
5166 ixgbe_release_nvm(hw);
5167
5168 return status;
5169 }
5170
5171 /**
5172 * ixgbe_write_ee_aci_E610 - Write EEPROM word using the admin command.
5173 * @hw: pointer to hardware structure
5174 * @offset: offset of word in the EEPROM to write
5175 * @data: word write to the EEPROM
5176 *
5177 * Write a 16 bit word to the EEPROM using the ACI.
5178 * If the EEPROM params are not initialized, the function
5179 * initialize them before proceeding with writing.
5180 * The function acquires and then releases the NVM ownership.
5181 *
5182 * Return: the exit code of the operation.
5183 */
ixgbe_write_ee_aci_E610(struct ixgbe_hw * hw,u16 offset,u16 data)5184 s32 ixgbe_write_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 data)
5185 {
5186 s32 status;
5187
5188 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5189 status = ixgbe_init_eeprom_params(hw);
5190 if (status)
5191 return status;
5192 }
5193
5194 status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
5195 if (status)
5196 return status;
5197
5198 status = ixgbe_write_sr_word_aci(hw, (u32)offset, &data);
5199 ixgbe_release_nvm(hw);
5200
5201 return status;
5202 }
5203
5204 /**
5205 * ixgbe_write_ee_aci_buffer_E610 - Write EEPROM word(s) using admin commands.
5206 * @hw: pointer to hardware structure
5207 * @offset: offset of word in the EEPROM to write
5208 * @words: number of words
5209 * @data: word(s) write to the EEPROM
5210 *
5211 * Write a 16 bit word(s) to the EEPROM using the ACI.
5212 * If the EEPROM params are not initialized, the function
5213 * initialize them before proceeding with writing.
5214 * The function acquires and then releases the NVM ownership.
5215 *
5216 * Return: the exit code of the operation.
5217 */
ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)5218 s32 ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
5219 u16 words, u16 *data)
5220 {
5221 s32 status;
5222
5223 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5224 status = ixgbe_init_eeprom_params(hw);
5225 if (status)
5226 return status;
5227 }
5228
5229 status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
5230 if (status)
5231 return status;
5232
5233 status = ixgbe_write_sr_buf_aci(hw, (u32)offset, words, data);
5234 ixgbe_release_nvm(hw);
5235
5236 return status;
5237 }
5238
5239 /**
5240 * ixgbe_calc_eeprom_checksum_E610 - Calculates and returns the checksum
5241 * @hw: pointer to hardware structure
5242 *
5243 * Calculate SW Checksum that covers the whole 64kB shadow RAM
5244 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
5245 * is customer specific and unknown. Therefore, this function skips all maximum
5246 * possible size of VPD (1kB).
5247 * If the EEPROM params are not initialized, the function
5248 * initializes them before proceeding.
5249 * The function acquires and then releases the NVM ownership.
5250 *
5251 * Return: the negative error code on error, or the 16-bit checksum
5252 */
ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw * hw)5253 s32 ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw *hw)
5254 {
5255 bool nvm_acquired = false;
5256 u16 pcie_alt_module = 0;
5257 u16 checksum_local = 0;
5258 u16 checksum = 0;
5259 u16 vpd_module;
5260 void *vmem;
5261 s32 status;
5262 u16 *data;
5263 u16 i;
5264
5265 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5266 status = ixgbe_init_eeprom_params(hw);
5267 if (status)
5268 return status;
5269 }
5270
5271 vmem = ixgbe_calloc(hw, IXGBE_SR_SECTOR_SIZE_IN_WORDS, sizeof(u16));
5272 if (!vmem)
5273 return IXGBE_ERR_OUT_OF_MEM;
5274 data = (u16 *)vmem;
5275 status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
5276 if (status)
5277 goto ixgbe_calc_sr_checksum_exit;
5278 nvm_acquired = true;
5279
5280 /* read pointer to VPD area */
5281 status = ixgbe_read_sr_word_aci(hw, E610_SR_VPD_PTR, &vpd_module);
5282 if (status)
5283 goto ixgbe_calc_sr_checksum_exit;
5284
5285 /* read pointer to PCIe Alt Auto-load module */
5286 status = ixgbe_read_sr_word_aci(hw, E610_SR_PCIE_ALT_AUTO_LOAD_PTR,
5287 &pcie_alt_module);
5288 if (status)
5289 goto ixgbe_calc_sr_checksum_exit;
5290
5291 /* Calculate SW checksum that covers the whole 64kB shadow RAM
5292 * except the VPD and PCIe ALT Auto-load modules
5293 */
5294 for (i = 0; i < hw->eeprom.word_size; i++) {
5295 /* Read SR page */
5296 if ((i % IXGBE_SR_SECTOR_SIZE_IN_WORDS) == 0) {
5297 u16 words = IXGBE_SR_SECTOR_SIZE_IN_WORDS;
5298
5299 status = ixgbe_read_sr_buf_aci(hw, i, &words, data);
5300 if (status != IXGBE_SUCCESS)
5301 goto ixgbe_calc_sr_checksum_exit;
5302 }
5303
5304 /* Skip Checksum word */
5305 if (i == E610_SR_SW_CHECKSUM_WORD)
5306 continue;
5307 /* Skip VPD module (convert byte size to word count) */
5308 if (i >= (u32)vpd_module &&
5309 i < ((u32)vpd_module + E610_SR_VPD_SIZE_WORDS))
5310 continue;
5311 /* Skip PCIe ALT module (convert byte size to word count) */
5312 if (i >= (u32)pcie_alt_module &&
5313 i < ((u32)pcie_alt_module + E610_SR_PCIE_ALT_SIZE_WORDS))
5314 continue;
5315
5316 checksum_local += data[i % IXGBE_SR_SECTOR_SIZE_IN_WORDS];
5317 }
5318
5319 checksum = (u16)IXGBE_SR_SW_CHECKSUM_BASE - checksum_local;
5320
5321 ixgbe_calc_sr_checksum_exit:
5322 if(nvm_acquired)
5323 ixgbe_release_nvm(hw);
5324 ixgbe_free(hw, vmem);
5325
5326 if(!status)
5327 return (s32)checksum;
5328 else
5329 return status;
5330 }
5331
5332 /**
5333 * ixgbe_update_eeprom_checksum_E610 - Updates the EEPROM checksum and flash
5334 * @hw: pointer to hardware structure
5335 *
5336 * After writing EEPROM to Shadow RAM, software sends the admin command
5337 * to recalculate and update EEPROM checksum and instructs the hardware
5338 * to update the flash.
5339 * If the EEPROM params are not initialized, the function
5340 * initialize them before proceeding.
5341 * The function acquires and then releases the NVM ownership.
5342 *
5343 * Return: the exit code of the operation.
5344 */
ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw * hw)5345 s32 ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw *hw)
5346 {
5347 s32 status;
5348
5349 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5350 status = ixgbe_init_eeprom_params(hw);
5351 if (status)
5352 return status;
5353 }
5354
5355 status = ixgbe_nvm_recalculate_checksum(hw);
5356 if (status)
5357 return status;
5358 status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
5359 if (status)
5360 return status;
5361
5362 status = ixgbe_nvm_write_activate(hw, IXGBE_ACI_NVM_ACTIV_REQ_EMPR,
5363 NULL);
5364 ixgbe_release_nvm(hw);
5365
5366 return status;
5367 }
5368
5369 /**
5370 * ixgbe_validate_eeprom_checksum_E610 - Validate EEPROM checksum
5371 * @hw: pointer to hardware structure
5372 * @checksum_val: calculated checksum
5373 *
5374 * Performs checksum calculation and validates the EEPROM checksum. If the
5375 * caller does not need checksum_val, the value can be NULL.
5376 * If the EEPROM params are not initialized, the function
5377 * initialize them before proceeding.
5378 * The function acquires and then releases the NVM ownership.
5379 *
5380 * Return: the exit code of the operation.
5381 */
ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw * hw,u16 * checksum_val)5382 s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val)
5383 {
5384 u32 status;
5385
5386 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5387 status = ixgbe_init_eeprom_params(hw);
5388 if (status)
5389 return status;
5390 }
5391
5392 status = ixgbe_nvm_validate_checksum(hw);
5393
5394 if (status)
5395 return status;
5396
5397 if (checksum_val) {
5398 u16 tmp_checksum;
5399 status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
5400 if (status)
5401 return status;
5402
5403 status = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD,
5404 &tmp_checksum);
5405 ixgbe_release_nvm(hw);
5406
5407 if (!status)
5408 *checksum_val = tmp_checksum;
5409 }
5410
5411 return status;
5412 }
5413
5414 /**
5415 * ixgbe_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
5416 * @hw: pointer to hardware structure
5417 * @module_tlv: pointer to module TLV to return
5418 * @module_tlv_len: pointer to module TLV length to return
5419 * @module_type: module type requested
5420 *
5421 * Finds the requested sub module TLV type from the Preserved Field
5422 * Area (PFA) and returns the TLV pointer and length. The caller can
5423 * use these to read the variable length TLV value.
5424 *
5425 * Return: the exit code of the operation.
5426 */
ixgbe_get_pfa_module_tlv(struct ixgbe_hw * hw,u16 * module_tlv,u16 * module_tlv_len,u16 module_type)5427 static s32 ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv,
5428 u16 *module_tlv_len, u16 module_type)
5429 {
5430 u16 pfa_len, pfa_ptr, pfa_end_ptr;
5431 u16 next_tlv;
5432 s32 status;
5433
5434 status = ixgbe_read_ee_aci_E610(hw, E610_SR_PFA_PTR, &pfa_ptr);
5435 if (status != IXGBE_SUCCESS) {
5436 return status;
5437 }
5438 status = ixgbe_read_ee_aci_E610(hw, pfa_ptr, &pfa_len);
5439 if (status != IXGBE_SUCCESS) {
5440 return status;
5441 }
5442 /* Starting with first TLV after PFA length, iterate through the list
5443 * of TLVs to find the requested one.
5444 */
5445 next_tlv = pfa_ptr + 1;
5446 pfa_end_ptr = pfa_ptr + pfa_len;
5447 while (next_tlv < pfa_end_ptr) {
5448 u16 tlv_sub_module_type, tlv_len;
5449
5450 /* Read TLV type */
5451 status = ixgbe_read_ee_aci_E610(hw, next_tlv,
5452 &tlv_sub_module_type);
5453 if (status != IXGBE_SUCCESS) {
5454 break;
5455 }
5456 /* Read TLV length */
5457 status = ixgbe_read_ee_aci_E610(hw, next_tlv + 1, &tlv_len);
5458 if (status != IXGBE_SUCCESS) {
5459 break;
5460 }
5461 if (tlv_sub_module_type == module_type) {
5462 if (tlv_len) {
5463 *module_tlv = next_tlv;
5464 *module_tlv_len = tlv_len;
5465 return IXGBE_SUCCESS;
5466 }
5467 return IXGBE_ERR_INVAL_SIZE;
5468 }
5469 /* Check next TLV, i.e. current TLV pointer + length + 2 words
5470 * (for current TLV's type and length)
5471 */
5472 next_tlv = next_tlv + tlv_len + 2;
5473 }
5474 /* Module does not exist */
5475 return IXGBE_ERR_DOES_NOT_EXIST;
5476 }
5477
5478 /**
5479 * ixgbe_read_pba_string_E610 - Reads part number string from NVM
5480 * @hw: pointer to hardware structure
5481 * @pba_num: stores the part number string from the NVM
5482 * @pba_num_size: part number string buffer length
5483 *
5484 * Reads the part number string from the NVM.
5485 *
5486 * Return: the exit code of the operation.
5487 */
ixgbe_read_pba_string_E610(struct ixgbe_hw * hw,u8 * pba_num,u32 pba_num_size)5488 s32 ixgbe_read_pba_string_E610(struct ixgbe_hw *hw, u8 *pba_num,
5489 u32 pba_num_size)
5490 {
5491 u16 pba_tlv, pba_tlv_len;
5492 u16 pba_word, pba_size;
5493 s32 status;
5494 u16 i;
5495
5496 status = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
5497 E610_SR_PBA_BLOCK_PTR);
5498 if (status != IXGBE_SUCCESS) {
5499 return status;
5500 }
5501
5502 /* pba_size is the next word */
5503 status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2), &pba_size);
5504 if (status != IXGBE_SUCCESS) {
5505 return status;
5506 }
5507
5508 if (pba_tlv_len < pba_size) {
5509 return IXGBE_ERR_INVAL_SIZE;
5510 }
5511
5512 /* Subtract one to get PBA word count (PBA Size word is included in
5513 * total size)
5514 */
5515 pba_size--;
5516 if (pba_num_size < (((u32)pba_size * 2) + 1)) {
5517 return IXGBE_ERR_PARAM;
5518 }
5519
5520 for (i = 0; i < pba_size; i++) {
5521 status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2 + 1) + i,
5522 &pba_word);
5523 if (status != IXGBE_SUCCESS) {
5524 return status;
5525 }
5526
5527 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
5528 pba_num[(i * 2) + 1] = pba_word & 0xFF;
5529 }
5530 pba_num[(pba_size * 2)] = '\0';
5531
5532 return status;
5533 }
5534