xref: /freebsd/sys/dev/ixgbe/ixgbe_e610.c (revision 6b58d10fc6d51ddcf5ee81628ead74d3dadb9bf6)
1 /******************************************************************************
2   SPDX-License-Identifier: BSD-3-Clause
3 
4   Copyright (c) 2025, Intel Corporation
5   All rights reserved.
6 
7   Redistribution and use in source and binary forms, with or without
8   modification, are permitted provided that the following conditions are met:
9 
10    1. Redistributions of source code must retain the above copyright notice,
11       this list of conditions and the following disclaimer.
12 
13    2. Redistributions in binary form must reproduce the above copyright
14       notice, this list of conditions and the following disclaimer in the
15       documentation and/or other materials provided with the distribution.
16 
17    3. Neither the name of the Intel Corporation nor the names of its
18       contributors may be used to endorse or promote products derived from
19       this software without specific prior written permission.
20 
21   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31   POSSIBILITY OF SUCH DAMAGE.
32 
33 ******************************************************************************/
34 
35 #include "ixgbe_type.h"
36 #include "ixgbe_e610.h"
37 #include "ixgbe_x550.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40 #include "ixgbe_api.h"
41 
42 /**
43  * ixgbe_init_aci - initialization routine for Admin Command Interface
44  * @hw: pointer to the hardware structure
45  *
46  * Initialize the ACI lock.
47  */
ixgbe_init_aci(struct ixgbe_hw * hw)48 void ixgbe_init_aci(struct ixgbe_hw *hw)
49 {
50 	ixgbe_init_lock(&hw->aci.lock);
51 }
52 
53 /**
54  * ixgbe_shutdown_aci - shutdown routine for Admin Command Interface
55  * @hw: pointer to the hardware structure
56  *
57  * Destroy the ACI lock.
58  */
ixgbe_shutdown_aci(struct ixgbe_hw * hw)59 void ixgbe_shutdown_aci(struct ixgbe_hw *hw)
60 {
61 	ixgbe_destroy_lock(&hw->aci.lock);
62 }
63 
64 /**
65  * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
66  * be resent
67  * @opcode: ACI opcode
68  *
69  * Check if ACI command should be sent again depending on the provided opcode.
70  *
71  * Return: true if the sending command routine should be repeated,
72  * otherwise false.
73  */
ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)74 static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
75 {
76 	switch (opcode) {
77 	case ixgbe_aci_opc_disable_rxen:
78 	case ixgbe_aci_opc_get_phy_caps:
79 	case ixgbe_aci_opc_get_link_status:
80 	case ixgbe_aci_opc_get_link_topo:
81 		return true;
82 	}
83 
84 	return false;
85 }
86 
87 /**
88  * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
89  * Command Interface
90  * @hw: pointer to the HW struct
91  * @desc: descriptor describing the command
92  * @buf: buffer to use for indirect commands (NULL for direct commands)
93  * @buf_size: size of buffer for indirect commands (0 for direct commands)
94  *
95  * Admin Command is sent using CSR by setting descriptor and buffer in specific
96  * registers.
97  *
98  * Return: the exit code of the operation.
99  * * - IXGBE_SUCCESS - success.
100  * * - IXGBE_ERR_ACI_DISABLED - CSR mechanism is not enabled.
101  * * - IXGBE_ERR_ACI_BUSY - CSR mechanism is busy.
102  * * - IXGBE_ERR_PARAM - buf_size is too big or
103  * invalid argument buf or buf_size.
104  * * - IXGBE_ERR_ACI_TIMEOUT - Admin Command X command timeout.
105  * * - IXGBE_ERR_ACI_ERROR - Admin Command X invalid state of HICR register or
106  * Admin Command failed because of bad opcode was returned or
107  * Admin Command failed with error Y.
108  */
109 static s32
ixgbe_aci_send_cmd_execute(struct ixgbe_hw * hw,struct ixgbe_aci_desc * desc,void * buf,u16 buf_size)110 ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
111 			   void *buf, u16 buf_size)
112 {
113 	u32 hicr = 0, tmp_buf_size = 0, i = 0;
114 	u32 *raw_desc = (u32 *)desc;
115 	s32 status = IXGBE_SUCCESS;
116 	bool valid_buf = false;
117 	u32 *tmp_buf = NULL;
118 	u16 opcode = 0;
119 
120 	do {
121 		hw->aci.last_status = IXGBE_ACI_RC_OK;
122 
123 		/* It's necessary to check if mechanism is enabled */
124 		hicr = IXGBE_READ_REG(hw, PF_HICR);
125 		if (!(hicr & PF_HICR_EN)) {
126 			status = IXGBE_ERR_ACI_DISABLED;
127 			break;
128 		}
129 		if (hicr & PF_HICR_C) {
130 			hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
131 			status = IXGBE_ERR_ACI_BUSY;
132 			break;
133 		}
134 		opcode = desc->opcode;
135 
136 		if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE) {
137 			status = IXGBE_ERR_PARAM;
138 			break;
139 		}
140 
141 		if (buf)
142 			desc->flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF);
143 
144 		/* Check if buf and buf_size are proper params */
145 		if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF)) {
146 			if ((buf && buf_size == 0) ||
147 			    (buf == NULL && buf_size)) {
148 				status = IXGBE_ERR_PARAM;
149 				break;
150 			}
151 			if (buf && buf_size)
152 				valid_buf = true;
153 		}
154 
155 		if (valid_buf == true) {
156 			if (buf_size % 4 == 0)
157 				tmp_buf_size = buf_size;
158 			else
159 				tmp_buf_size = (buf_size & (u16)(~0x03)) + 4;
160 
161 			tmp_buf = (u32*)ixgbe_malloc(hw, tmp_buf_size);
162 			if (!tmp_buf)
163 				return IXGBE_ERR_OUT_OF_MEM;
164 
165 			/* tmp_buf will be firstly filled with 0xFF and after
166 			 * that the content of buf will be written into it.
167 			 * This approach lets us use valid buf_size and
168 			 * prevents us from reading past buf area
169 			 * when buf_size mod 4 not equal to 0.
170 			 */
171 			memset(tmp_buf, 0xFF, tmp_buf_size);
172 			memcpy(tmp_buf, buf, buf_size);
173 
174 			if (tmp_buf_size > IXGBE_ACI_LG_BUF)
175 				desc->flags |=
176 				IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_LB);
177 
178 			desc->datalen = IXGBE_CPU_TO_LE16(buf_size);
179 
180 			if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD)) {
181 				for (i = 0; i < tmp_buf_size / 4; i++) {
182 					IXGBE_WRITE_REG(hw, PF_HIBA(i),
183 						IXGBE_LE32_TO_CPU(tmp_buf[i]));
184 				}
185 			}
186 		}
187 
188 		/* Descriptor is written to specific registers */
189 		for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
190 			IXGBE_WRITE_REG(hw, PF_HIDA(i),
191 					IXGBE_LE32_TO_CPU(raw_desc[i]));
192 
193 		/* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
194 		 * PF_HICR_EV
195 		 */
196 		hicr = IXGBE_READ_REG(hw, PF_HICR);
197 		hicr = (hicr | PF_HICR_C) & ~(PF_HICR_SV | PF_HICR_EV);
198 		IXGBE_WRITE_REG(hw, PF_HICR, hicr);
199 
200 		/* Wait for sync Admin Command response */
201 		for (i = 0; i < IXGBE_ACI_SYNC_RESPONSE_TIMEOUT; i += 1) {
202 			hicr = IXGBE_READ_REG(hw, PF_HICR);
203 			if ((hicr & PF_HICR_SV) || !(hicr & PF_HICR_C))
204 				break;
205 
206 			msec_delay(1);
207 		}
208 
209 		/* Wait for async Admin Command response */
210 		if ((hicr & PF_HICR_SV) && (hicr & PF_HICR_C)) {
211 			for (i = 0; i < IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT;
212 			     i += 1) {
213 				hicr = IXGBE_READ_REG(hw, PF_HICR);
214 				if ((hicr & PF_HICR_EV) || !(hicr & PF_HICR_C))
215 					break;
216 
217 				msec_delay(1);
218 			}
219 		}
220 
221 		/* Read sync Admin Command response */
222 		if ((hicr & PF_HICR_SV)) {
223 			for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
224 				raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA(i));
225 				raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
226 			}
227 		}
228 
229 		/* Read async Admin Command response */
230 		if ((hicr & PF_HICR_EV) && !(hicr & PF_HICR_C)) {
231 			for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
232 				raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA_2(i));
233 				raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
234 			}
235 		}
236 
237 		/* Handle timeout and invalid state of HICR register */
238 		if (hicr & PF_HICR_C) {
239 			status = IXGBE_ERR_ACI_TIMEOUT;
240 			break;
241 		} else if (!(hicr & PF_HICR_SV) && !(hicr & PF_HICR_EV)) {
242 			status = IXGBE_ERR_ACI_ERROR;
243 			break;
244 		}
245 
246 		/* For every command other than 0x0014 treat opcode mismatch
247 		 * as an error. Response to 0x0014 command read from HIDA_2
248 		 * is a descriptor of an event which is expected to contain
249 		 * different opcode than the command.
250 		 */
251 		if (desc->opcode != opcode &&
252 		    opcode != IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
253 			status = IXGBE_ERR_ACI_ERROR;
254 			break;
255 		}
256 
257 		if (desc->retval != IXGBE_ACI_RC_OK) {
258 			hw->aci.last_status = (enum ixgbe_aci_err)desc->retval;
259 			status = IXGBE_ERR_ACI_ERROR;
260 			break;
261 		}
262 
263 		/* Write a response values to a buf */
264 		if (valid_buf && (desc->flags &
265 				  IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF))) {
266 			for (i = 0; i < tmp_buf_size / 4; i++) {
267 				tmp_buf[i] = IXGBE_READ_REG(hw, PF_HIBA(i));
268 				tmp_buf[i] = IXGBE_CPU_TO_LE32(tmp_buf[i]);
269 			}
270 			memcpy(buf, tmp_buf, buf_size);
271 		}
272 	} while (0);
273 
274 	if (tmp_buf)
275 		ixgbe_free(hw, tmp_buf);
276 
277 	return status;
278 }
279 
280 /**
281  * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
282  * @hw: pointer to the HW struct
283  * @desc: descriptor describing the command
284  * @buf: buffer to use for indirect commands (NULL for direct commands)
285  * @buf_size: size of buffer for indirect commands (0 for direct commands)
286  *
287  * Helper function to send FW Admin Commands to the FW Admin Command Interface.
288  *
289  * Retry sending the FW Admin Command multiple times to the FW ACI
290  * if the EBUSY Admin Command error is returned.
291  *
292  * Return: the exit code of the operation.
293  */
ixgbe_aci_send_cmd(struct ixgbe_hw * hw,struct ixgbe_aci_desc * desc,void * buf,u16 buf_size)294 s32 ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
295 		       void *buf, u16 buf_size)
296 {
297 	struct ixgbe_aci_desc desc_cpy;
298 	enum ixgbe_aci_err last_status;
299 	bool is_cmd_for_retry;
300 	u8 *buf_cpy = NULL;
301 	s32 status;
302 	u16 opcode;
303 	u8 idx = 0;
304 
305 	opcode = IXGBE_LE16_TO_CPU(desc->opcode);
306 	is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
307 	memset(&desc_cpy, 0, sizeof(desc_cpy));
308 
309 	if (is_cmd_for_retry) {
310 		if (buf) {
311 			buf_cpy = (u8 *)ixgbe_malloc(hw, buf_size);
312 			if (!buf_cpy)
313 				return IXGBE_ERR_OUT_OF_MEM;
314 		}
315 		memcpy(&desc_cpy, desc, sizeof(desc_cpy));
316 	}
317 
318 	do {
319 		ixgbe_acquire_lock(&hw->aci.lock);
320 		status = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
321 		last_status = hw->aci.last_status;
322 		ixgbe_release_lock(&hw->aci.lock);
323 
324 		if (!is_cmd_for_retry || status == IXGBE_SUCCESS ||
325 		    (last_status != IXGBE_ACI_RC_EBUSY && status != IXGBE_ERR_ACI_ERROR))
326 			break;
327 
328 		if (buf)
329 			memcpy(buf, buf_cpy, buf_size);
330 		memcpy(desc, &desc_cpy, sizeof(desc_cpy));
331 
332 		msec_delay(IXGBE_ACI_SEND_DELAY_TIME_MS);
333 	} while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE);
334 
335 	if (buf_cpy)
336 		ixgbe_free(hw, buf_cpy);
337 
338 	return status;
339 }
340 
341 /**
342  * ixgbe_aci_check_event_pending - check if there are any pending events
343  * @hw: pointer to the HW struct
344  *
345  * Determine if there are any pending events.
346  *
347  * Return: true if there are any currently pending events
348  * otherwise false.
349  */
ixgbe_aci_check_event_pending(struct ixgbe_hw * hw)350 bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
351 {
352 	u32 ep_bit_mask;
353 	u32 fwsts;
354 
355 	ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
356 
357 	/* Check state of Event Pending (EP) bit */
358 	fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
359 	return (fwsts & ep_bit_mask) ? true : false;
360 }
361 
362 /**
363  * ixgbe_aci_get_event - get an event from ACI
364  * @hw: pointer to the HW struct
365  * @e: event information structure
366  * @pending: optional flag signaling that there are more pending events
367  *
368  * Obtain an event from ACI and return its content
369  * through 'e' using ACI command (0x0014).
370  * Provide information if there are more events
371  * to retrieve through 'pending'.
372  *
373  * Return: the exit code of the operation.
374  */
ixgbe_aci_get_event(struct ixgbe_hw * hw,struct ixgbe_aci_event * e,bool * pending)375 s32 ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
376 			bool *pending)
377 {
378 	struct ixgbe_aci_desc desc;
379 	s32 status;
380 
381 	if (!e || (!e->msg_buf && e->buf_len) || (e->msg_buf && !e->buf_len))
382 		return IXGBE_ERR_PARAM;
383 
384 	ixgbe_acquire_lock(&hw->aci.lock);
385 
386 	/* Check if there are any events pending */
387 	if (!ixgbe_aci_check_event_pending(hw)) {
388 		status = IXGBE_ERR_ACI_NO_EVENTS;
389 		goto aci_get_event_exit;
390 	}
391 
392 	/* Obtain pending event */
393 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
394 	status = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
395 	if (status)
396 		goto aci_get_event_exit;
397 
398 	/* Returned 0x0014 opcode indicates that no event was obtained */
399 	if (desc.opcode == IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
400 		status = IXGBE_ERR_ACI_NO_EVENTS;
401 		goto aci_get_event_exit;
402 	}
403 
404 	/* Determine size of event data */
405 	e->msg_len = MIN_T(u16, IXGBE_LE16_TO_CPU(desc.datalen), e->buf_len);
406 	/* Write event descriptor to event info structure */
407 	memcpy(&e->desc, &desc, sizeof(e->desc));
408 
409 	/* Check if there are any further events pending */
410 	if (pending) {
411 		*pending = ixgbe_aci_check_event_pending(hw);
412 	}
413 
414 aci_get_event_exit:
415 	ixgbe_release_lock(&hw->aci.lock);
416 
417 	return status;
418 }
419 
420 /**
421  * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
422  * @desc: pointer to the temp descriptor (non DMA mem)
423  * @opcode: the opcode can be used to decide which flags to turn off or on
424  *
425  * Helper function to fill the descriptor desc with default values
426  * and the provided opcode.
427  */
ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc * desc,u16 opcode)428 void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
429 {
430 	/* zero out the desc */
431 	memset(desc, 0, sizeof(*desc));
432 	desc->opcode = IXGBE_CPU_TO_LE16(opcode);
433 	desc->flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_SI);
434 }
435 
436 /**
437  * ixgbe_aci_get_fw_ver - get the firmware version
438  * @hw: pointer to the HW struct
439  *
440  * Get the firmware version using ACI command (0x0001).
441  *
442  * Return: the exit code of the operation.
443  */
ixgbe_aci_get_fw_ver(struct ixgbe_hw * hw)444 s32 ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
445 {
446 	struct ixgbe_aci_cmd_get_ver *resp;
447 	struct ixgbe_aci_desc desc;
448 	s32 status;
449 
450 	resp = &desc.params.get_ver;
451 
452 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver);
453 
454 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
455 
456 	if (!status) {
457 		hw->fw_branch = resp->fw_branch;
458 		hw->fw_maj_ver = resp->fw_major;
459 		hw->fw_min_ver = resp->fw_minor;
460 		hw->fw_patch = resp->fw_patch;
461 		hw->fw_build = IXGBE_LE32_TO_CPU(resp->fw_build);
462 		hw->api_branch = resp->api_branch;
463 		hw->api_maj_ver = resp->api_major;
464 		hw->api_min_ver = resp->api_minor;
465 		hw->api_patch = resp->api_patch;
466 	}
467 
468 	return status;
469 }
470 
471 /**
472  * ixgbe_aci_send_driver_ver - send the driver version to firmware
473  * @hw: pointer to the HW struct
474  * @dv: driver's major, minor version
475  *
476  * Send the driver version to the firmware
477  * using the ACI command (0x0002).
478  *
479  * Return: the exit code of the operation.
480  * Returns IXGBE_ERR_PARAM, if dv is NULL.
481  */
ixgbe_aci_send_driver_ver(struct ixgbe_hw * hw,struct ixgbe_driver_ver * dv)482 s32 ixgbe_aci_send_driver_ver(struct ixgbe_hw *hw, struct ixgbe_driver_ver *dv)
483 {
484 	struct ixgbe_aci_cmd_driver_ver *cmd;
485 	struct ixgbe_aci_desc desc;
486 	u16 len;
487 
488 	cmd = &desc.params.driver_ver;
489 
490 	if (!dv)
491 		return IXGBE_ERR_PARAM;
492 
493 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_driver_ver);
494 
495 	desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
496 	cmd->major_ver = dv->major_ver;
497 	cmd->minor_ver = dv->minor_ver;
498 	cmd->build_ver = dv->build_ver;
499 	cmd->subbuild_ver = dv->subbuild_ver;
500 
501 	len = 0;
502 	while (len < sizeof(dv->driver_string) &&
503 	       IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
504 		len++;
505 
506 	return ixgbe_aci_send_cmd(hw, &desc, dv->driver_string, len);
507 }
508 
509 /**
510  * ixgbe_aci_req_res - request a common resource
511  * @hw: pointer to the HW struct
512  * @res: resource ID
513  * @access: access type
514  * @sdp_number: resource number
515  * @timeout: the maximum time in ms that the driver may hold the resource
516  *
517  * Requests a common resource using the ACI command (0x0008).
518  * Specifies the maximum time the driver may hold the resource.
519  * If the requested resource is currently occupied by some other driver,
520  * a busy return value is returned and the timeout field value indicates the
521  * maximum time the current owner has to free it.
522  *
523  * Return: the exit code of the operation.
524  */
525 static s32
ixgbe_aci_req_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res,enum ixgbe_aci_res_access_type access,u8 sdp_number,u32 * timeout)526 ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
527 		  enum ixgbe_aci_res_access_type access, u8 sdp_number,
528 		  u32 *timeout)
529 {
530 	struct ixgbe_aci_cmd_req_res *cmd_resp;
531 	struct ixgbe_aci_desc desc;
532 	s32 status;
533 
534 	cmd_resp = &desc.params.res_owner;
535 
536 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
537 
538 	cmd_resp->res_id = IXGBE_CPU_TO_LE16(res);
539 	cmd_resp->access_type = IXGBE_CPU_TO_LE16(access);
540 	cmd_resp->res_number = IXGBE_CPU_TO_LE32(sdp_number);
541 	cmd_resp->timeout = IXGBE_CPU_TO_LE32(*timeout);
542 	*timeout = 0;
543 
544 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
545 
546 	/* The completion specifies the maximum time in ms that the driver
547 	 * may hold the resource in the Timeout field.
548 	 * If the resource is held by some other driver, the command completes
549 	 * with a busy return value and the timeout field indicates the maximum
550 	 * time the current owner of the resource has to free it.
551 	 */
552 	if (!status || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
553 		*timeout = IXGBE_LE32_TO_CPU(cmd_resp->timeout);
554 
555 	return status;
556 }
557 
558 /**
559  * ixgbe_aci_release_res - release a common resource using ACI
560  * @hw: pointer to the HW struct
561  * @res: resource ID
562  * @sdp_number: resource number
563  *
564  * Release a common resource using ACI command (0x0009).
565  *
566  * Return: the exit code of the operation.
567  */
568 static s32
ixgbe_aci_release_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res,u8 sdp_number)569 ixgbe_aci_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
570 		      u8 sdp_number)
571 {
572 	struct ixgbe_aci_cmd_req_res *cmd;
573 	struct ixgbe_aci_desc desc;
574 
575 	cmd = &desc.params.res_owner;
576 
577 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
578 
579 	cmd->res_id = IXGBE_CPU_TO_LE16(res);
580 	cmd->res_number = IXGBE_CPU_TO_LE32(sdp_number);
581 
582 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
583 }
584 
585 /**
586  * ixgbe_acquire_res - acquire the ownership of a resource
587  * @hw: pointer to the HW structure
588  * @res: resource ID
589  * @access: access type (read or write)
590  * @timeout: timeout in milliseconds
591  *
592  * Make an attempt to acquire the ownership of a resource using
593  * the ixgbe_aci_req_res to utilize ACI.
594  * In case if some other driver has previously acquired the resource and
595  * performed any necessary updates, the IXGBE_ERR_ACI_NO_WORK is returned,
596  * and the caller does not obtain the resource and has no further work to do.
597  * If needed, the function will poll until the current lock owner timeouts.
598  *
599  * Return: the exit code of the operation.
600  */
ixgbe_acquire_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res,enum ixgbe_aci_res_access_type access,u32 timeout)601 s32 ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
602 		      enum ixgbe_aci_res_access_type access, u32 timeout)
603 {
604 #define IXGBE_RES_POLLING_DELAY_MS	10
605 	u32 delay = IXGBE_RES_POLLING_DELAY_MS;
606 	u32 res_timeout = timeout;
607 	u32 retry_timeout = 0;
608 	s32 status;
609 
610 	status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
611 
612 	/* A return code of IXGBE_ERR_ACI_NO_WORK means that another driver has
613 	 * previously acquired the resource and performed any necessary updates;
614 	 * in this case the caller does not obtain the resource and has no
615 	 * further work to do.
616 	 */
617 	if (status == IXGBE_ERR_ACI_NO_WORK)
618 		goto ixgbe_acquire_res_exit;
619 
620 	/* If necessary, poll until the current lock owner timeouts.
621 	 * Set retry_timeout to the timeout value reported by the FW in the
622 	 * response to the "Request Resource Ownership" (0x0008) Admin Command
623 	 * as it indicates the maximum time the current owner of the resource
624 	 * is allowed to hold it.
625 	 */
626 	retry_timeout = res_timeout;
627 	while (status && retry_timeout && res_timeout) {
628 		msec_delay(delay);
629 		retry_timeout = (retry_timeout > delay) ?
630 			retry_timeout - delay : 0;
631 		status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
632 
633 		if (status == IXGBE_ERR_ACI_NO_WORK)
634 			/* lock free, but no work to do */
635 			break;
636 
637 		if (!status)
638 			/* lock acquired */
639 			break;
640 	}
641 
642 ixgbe_acquire_res_exit:
643 	return status;
644 }
645 
646 /**
647  * ixgbe_release_res - release a common resource
648  * @hw: pointer to the HW structure
649  * @res: resource ID
650  *
651  * Release a common resource using ixgbe_aci_release_res.
652  */
ixgbe_release_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res)653 void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
654 {
655 	u32 total_delay = 0;
656 	s32 status;
657 
658 	status = ixgbe_aci_release_res(hw, res, 0);
659 
660 	/* There are some rare cases when trying to release the resource
661 	 * results in an admin command timeout, so handle them correctly.
662 	 */
663 	while ((status == IXGBE_ERR_ACI_TIMEOUT) &&
664 	       (total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT)) {
665 		msec_delay(1);
666 		status = ixgbe_aci_release_res(hw, res, 0);
667 		total_delay++;
668 	}
669 }
670 
671 /**
672  * ixgbe_parse_common_caps - Parse common device/function capabilities
673  * @hw: pointer to the HW struct
674  * @caps: pointer to common capabilities structure
675  * @elem: the capability element to parse
676  * @prefix: message prefix for tracing capabilities
677  *
678  * Given a capability element, extract relevant details into the common
679  * capability structure.
680  *
681  * Return: true if the capability matches one of the common capability ids,
682  * false otherwise.
683  */
684 static bool
ixgbe_parse_common_caps(struct ixgbe_hw * hw,struct ixgbe_hw_common_caps * caps,struct ixgbe_aci_cmd_list_caps_elem * elem,const char * prefix)685 ixgbe_parse_common_caps(struct ixgbe_hw *hw, struct ixgbe_hw_common_caps *caps,
686 			struct ixgbe_aci_cmd_list_caps_elem *elem,
687 			const char *prefix)
688 {
689 	u32 logical_id = IXGBE_LE32_TO_CPU(elem->logical_id);
690 	u32 phys_id = IXGBE_LE32_TO_CPU(elem->phys_id);
691 	u32 number = IXGBE_LE32_TO_CPU(elem->number);
692 	u16 cap = IXGBE_LE16_TO_CPU(elem->cap);
693 	bool found = true;
694 
695 	UNREFERENCED_1PARAMETER(hw);
696 
697 	switch (cap) {
698 	case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
699 		caps->valid_functions = number;
700 		break;
701 	case IXGBE_ACI_CAPS_SRIOV:
702 		caps->sr_iov_1_1 = (number == 1);
703 		break;
704 	case IXGBE_ACI_CAPS_VMDQ:
705 		caps->vmdq = (number == 1);
706 		break;
707 	case IXGBE_ACI_CAPS_DCB:
708 		caps->dcb = (number == 1);
709 		caps->active_tc_bitmap = logical_id;
710 		caps->maxtc = phys_id;
711 		break;
712 	case IXGBE_ACI_CAPS_RSS:
713 		caps->rss_table_size = number;
714 		caps->rss_table_entry_width = logical_id;
715 		break;
716 	case IXGBE_ACI_CAPS_RXQS:
717 		caps->num_rxq = number;
718 		caps->rxq_first_id = phys_id;
719 		break;
720 	case IXGBE_ACI_CAPS_TXQS:
721 		caps->num_txq = number;
722 		caps->txq_first_id = phys_id;
723 		break;
724 	case IXGBE_ACI_CAPS_MSIX:
725 		caps->num_msix_vectors = number;
726 		caps->msix_vector_first_id = phys_id;
727 		break;
728 	case IXGBE_ACI_CAPS_NVM_VER:
729 		break;
730 	case IXGBE_ACI_CAPS_NVM_MGMT:
731 		caps->sec_rev_disabled =
732 			(number & IXGBE_NVM_MGMT_SEC_REV_DISABLED) ?
733 			true : false;
734 		caps->update_disabled =
735 			(number & IXGBE_NVM_MGMT_UPDATE_DISABLED) ?
736 			true : false;
737 		caps->nvm_unified_update =
738 			(number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
739 			true : false;
740 		caps->netlist_auth =
741 			(number & IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT) ?
742 			true : false;
743 		break;
744 	case IXGBE_ACI_CAPS_MAX_MTU:
745 		caps->max_mtu = number;
746 		break;
747 	case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
748 		caps->pcie_reset_avoidance = (number > 0);
749 		break;
750 	case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
751 		caps->reset_restrict_support = (number == 1);
752 		break;
753 	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
754 	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
755 	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
756 	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
757 	{
758 		u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
759 
760 		caps->ext_topo_dev_img_ver_high[index] = number;
761 		caps->ext_topo_dev_img_ver_low[index] = logical_id;
762 		caps->ext_topo_dev_img_part_num[index] =
763 			(phys_id & IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
764 			IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S;
765 		caps->ext_topo_dev_img_load_en[index] =
766 			(phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
767 		caps->ext_topo_dev_img_prog_en[index] =
768 			(phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
769 		break;
770 	}
771 	case IXGBE_ACI_CAPS_OROM_RECOVERY_UPDATE:
772 		caps->orom_recovery_update = (number == 1);
773 		break;
774 	case IXGBE_ACI_CAPS_NEXT_CLUSTER_ID:
775 		caps->next_cluster_id_support = (number == 1);
776 		DEBUGOUT2("%s: next_cluster_id_support = %d\n",
777 			  prefix, caps->next_cluster_id_support);
778 		break;
779 	case IXGBE_ACI_CAPS_EEE:
780 		caps->eee_support = (u8)number;
781 		DEBUGOUT2("%s: eee_support = %x\n", prefix, caps->eee_support);
782 		break;
783 	default:
784 		/* Not one of the recognized common capabilities */
785 		found = false;
786 	}
787 
788 	return found;
789 }
790 
791 /**
792  * ixgbe_hweight8 - count set bits among the 8 lowest bits
793  * @w: variable storing set bits to count
794  *
795  * Return: the number of set bits among the 8 lowest bits in the provided value.
796  */
ixgbe_hweight8(u32 w)797 static u8 ixgbe_hweight8(u32 w)
798 {
799 	u8 hweight = 0, i;
800 
801 	for (i = 0; i < 8; i++)
802 		if (w & (1 << i))
803 			hweight++;
804 
805 	return hweight;
806 }
807 
808 /**
809  * ixgbe_hweight32 - count set bits among the 32 lowest bits
810  * @w: variable storing set bits to count
811  *
812  * Return: the number of set bits among the 32 lowest bits in the
813  * provided value.
814  */
ixgbe_hweight32(u32 w)815 static u8 ixgbe_hweight32(u32 w)
816 {
817 	u32 bitMask = 0x1, i;
818 	u8  bitCnt = 0;
819 
820 	for (i = 0; i < 32; i++)
821 	{
822 		if (w & bitMask)
823 			bitCnt++;
824 
825 		bitMask = bitMask << 0x1;
826 	}
827 
828 	return bitCnt;
829 }
830 
831 /**
832  * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
833  * @hw: pointer to the HW struct
834  * @dev_p: pointer to device capabilities structure
835  * @cap: capability element to parse
836  *
837  * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
838  */
839 static void
ixgbe_parse_valid_functions_cap(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)840 ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
841 				struct ixgbe_hw_dev_caps *dev_p,
842 				struct ixgbe_aci_cmd_list_caps_elem *cap)
843 {
844 	u32 number = IXGBE_LE32_TO_CPU(cap->number);
845 
846 	UNREFERENCED_1PARAMETER(hw);
847 
848 	dev_p->num_funcs = ixgbe_hweight32(number);
849 }
850 
851 /**
852  * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps
853  * @hw: pointer to the HW struct
854  * @dev_p: pointer to device capabilities structure
855  * @cap: capability element to parse
856  *
857  * Parse IXGBE_ACI_CAPS_VF for device capabilities.
858  */
ixgbe_parse_vf_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)859 static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
860 				    struct ixgbe_hw_dev_caps *dev_p,
861 				    struct ixgbe_aci_cmd_list_caps_elem *cap)
862 {
863 	u32 number = IXGBE_LE32_TO_CPU(cap->number);
864 
865 	UNREFERENCED_1PARAMETER(hw);
866 
867 	dev_p->num_vfs_exposed = number;
868 }
869 
870 /**
871  * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
872  * @hw: pointer to the HW struct
873  * @dev_p: pointer to device capabilities structure
874  * @cap: capability element to parse
875  *
876  * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
877  */
ixgbe_parse_vsi_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)878 static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
879 				     struct ixgbe_hw_dev_caps *dev_p,
880 				     struct ixgbe_aci_cmd_list_caps_elem *cap)
881 {
882 	u32 number = IXGBE_LE32_TO_CPU(cap->number);
883 
884 	UNREFERENCED_1PARAMETER(hw);
885 
886 	dev_p->num_vsi_allocd_to_host = number;
887 }
888 
889 /**
890  * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
891  * @hw: pointer to the HW struct
892  * @dev_p: pointer to device capabilities structure
893  * @cap: capability element to parse
894  *
895  * Parse IXGBE_ACI_CAPS_FD for device capabilities.
896  */
ixgbe_parse_fdir_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)897 static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
898 				      struct ixgbe_hw_dev_caps *dev_p,
899 				      struct ixgbe_aci_cmd_list_caps_elem *cap)
900 {
901 	u32 number = IXGBE_LE32_TO_CPU(cap->number);
902 
903 	UNREFERENCED_1PARAMETER(hw);
904 
905 	dev_p->num_flow_director_fltr = number;
906 }
907 
908 /**
909  * ixgbe_parse_dev_caps - Parse device capabilities
910  * @hw: pointer to the HW struct
911  * @dev_p: pointer to device capabilities structure
912  * @buf: buffer containing the device capability records
913  * @cap_count: the number of capabilities
914  *
915  * Helper device to parse device (0x000B) capabilities list. For
916  * capabilities shared between device and function, this relies on
917  * ixgbe_parse_common_caps.
918  *
919  * Loop through the list of provided capabilities and extract the relevant
920  * data into the device capabilities structured.
921  */
ixgbe_parse_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,void * buf,u32 cap_count)922 static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
923 				 struct ixgbe_hw_dev_caps *dev_p,
924 				 void *buf, u32 cap_count)
925 {
926 	struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
927 	u32 i;
928 
929 	cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
930 
931 	memset(dev_p, 0, sizeof(*dev_p));
932 
933 	for (i = 0; i < cap_count; i++) {
934 		u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
935 		bool found;
936 
937 		found = ixgbe_parse_common_caps(hw, &dev_p->common_cap,
938 					      &cap_resp[i], "dev caps");
939 
940 		switch (cap) {
941 		case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
942 			ixgbe_parse_valid_functions_cap(hw, dev_p,
943 							&cap_resp[i]);
944 			break;
945 		case IXGBE_ACI_CAPS_VF:
946 			ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
947 			break;
948 		case IXGBE_ACI_CAPS_VSI:
949 			ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
950 			break;
951 		case  IXGBE_ACI_CAPS_FD:
952 			ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
953 			break;
954 		default:
955 			/* Don't list common capabilities as unknown */
956 			if (!found)
957 				break;
958 		}
959 	}
960 
961 }
962 
963 /**
964  * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps
965  * @hw: pointer to the HW struct
966  * @func_p: pointer to function capabilities structure
967  * @cap: pointer to the capability element to parse
968  *
969  * Extract function capabilities for IXGBE_ACI_CAPS_VF.
970  */
ixgbe_parse_vf_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_p,struct ixgbe_aci_cmd_list_caps_elem * cap)971 static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
972 				     struct ixgbe_hw_func_caps *func_p,
973 				     struct ixgbe_aci_cmd_list_caps_elem *cap)
974 {
975 	u32 logical_id = IXGBE_LE32_TO_CPU(cap->logical_id);
976 	u32 number = IXGBE_LE32_TO_CPU(cap->number);
977 
978 	UNREFERENCED_1PARAMETER(hw);
979 
980 	func_p->num_allocd_vfs = number;
981 	func_p->vf_base_id = logical_id;
982 }
983 
984 /**
985  * ixgbe_get_num_per_func - determine number of resources per PF
986  * @hw: pointer to the HW structure
987  * @max: value to be evenly split between each PF
988  *
989  * Determine the number of valid functions by going through the bitmap returned
990  * from parsing capabilities and use this to calculate the number of resources
991  * per PF based on the max value passed in.
992  *
993  * Return: the number of resources per PF or 0, if no PH are available.
994  */
ixgbe_get_num_per_func(struct ixgbe_hw * hw,u32 max)995 static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
996 {
997 	u8 funcs;
998 
999 #define IXGBE_CAPS_VALID_FUNCS_M	0xFF
1000 	funcs = ixgbe_hweight8(hw->dev_caps.common_cap.valid_functions &
1001 			     IXGBE_CAPS_VALID_FUNCS_M);
1002 
1003 	if (!funcs)
1004 		return 0;
1005 
1006 	return max / funcs;
1007 }
1008 
1009 /**
1010  * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
1011  * @hw: pointer to the HW struct
1012  * @func_p: pointer to function capabilities structure
1013  * @cap: pointer to the capability element to parse
1014  *
1015  * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
1016  */
ixgbe_parse_vsi_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_p,struct ixgbe_aci_cmd_list_caps_elem * cap)1017 static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
1018 				      struct ixgbe_hw_func_caps *func_p,
1019 				      struct ixgbe_aci_cmd_list_caps_elem *cap)
1020 {
1021 	func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
1022 }
1023 
1024 /**
1025  * ixgbe_parse_func_caps - Parse function capabilities
1026  * @hw: pointer to the HW struct
1027  * @func_p: pointer to function capabilities structure
1028  * @buf: buffer containing the function capability records
1029  * @cap_count: the number of capabilities
1030  *
1031  * Helper function to parse function (0x000A) capabilities list. For
1032  * capabilities shared between device and function, this relies on
1033  * ixgbe_parse_common_caps.
1034  *
1035  * Loop through the list of provided capabilities and extract the relevant
1036  * data into the function capabilities structured.
1037  */
ixgbe_parse_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_p,void * buf,u32 cap_count)1038 static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
1039 				  struct ixgbe_hw_func_caps *func_p,
1040 				  void *buf, u32 cap_count)
1041 {
1042 	struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
1043 	u32 i;
1044 
1045 	cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
1046 
1047 	memset(func_p, 0, sizeof(*func_p));
1048 
1049 	for (i = 0; i < cap_count; i++) {
1050 		u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
1051 		ixgbe_parse_common_caps(hw, &func_p->common_cap,
1052 					&cap_resp[i], "func caps");
1053 
1054 		switch (cap) {
1055 		case IXGBE_ACI_CAPS_VF:
1056 			ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
1057 			break;
1058 		case IXGBE_ACI_CAPS_VSI:
1059 			ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
1060 			break;
1061 		default:
1062 			/* Don't list common capabilities as unknown */
1063 			break;
1064 		}
1065 	}
1066 
1067 }
1068 
1069 /**
1070  * ixgbe_aci_list_caps - query function/device capabilities
1071  * @hw: pointer to the HW struct
1072  * @buf: a buffer to hold the capabilities
1073  * @buf_size: size of the buffer
1074  * @cap_count: if not NULL, set to the number of capabilities reported
1075  * @opc: capabilities type to discover, device or function
1076  *
1077  * Get the function (0x000A) or device (0x000B) capabilities description from
1078  * firmware and store it in the buffer.
1079  *
1080  * If the cap_count pointer is not NULL, then it is set to the number of
1081  * capabilities firmware will report. Note that if the buffer size is too
1082  * small, it is possible the command will return IXGBE_ERR_OUT_OF_MEM. The
1083  * cap_count will still be updated in this case. It is recommended that the
1084  * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
1085  * buffer that firmware could return) to avoid this.
1086  *
1087  * Return: the exit code of the operation.
1088  * Exit code of IXGBE_ERR_OUT_OF_MEM means the buffer size is too small.
1089  */
ixgbe_aci_list_caps(struct ixgbe_hw * hw,void * buf,u16 buf_size,u32 * cap_count,enum ixgbe_aci_opc opc)1090 s32 ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
1091 			u32 *cap_count, enum ixgbe_aci_opc opc)
1092 {
1093 	struct ixgbe_aci_cmd_list_caps *cmd;
1094 	struct ixgbe_aci_desc desc;
1095 	s32 status;
1096 
1097 	cmd = &desc.params.get_cap;
1098 
1099 	if (opc != ixgbe_aci_opc_list_func_caps &&
1100 	    opc != ixgbe_aci_opc_list_dev_caps)
1101 		return IXGBE_ERR_PARAM;
1102 
1103 	ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
1104 	status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
1105 
1106 	if (cap_count)
1107 		*cap_count = IXGBE_LE32_TO_CPU(cmd->count);
1108 
1109 	return status;
1110 }
1111 
1112 /**
1113  * ixgbe_discover_dev_caps - Read and extract device capabilities
1114  * @hw: pointer to the hardware structure
1115  * @dev_caps: pointer to device capabilities structure
1116  *
1117  * Read the device capabilities and extract them into the dev_caps structure
1118  * for later use.
1119  *
1120  * Return: the exit code of the operation.
1121  */
ixgbe_discover_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_caps)1122 s32 ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
1123 			    struct ixgbe_hw_dev_caps *dev_caps)
1124 {
1125 	u32 status, cap_count = 0;
1126 	u8 *cbuf = NULL;
1127 
1128 	cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
1129 	if (!cbuf)
1130 		return IXGBE_ERR_OUT_OF_MEM;
1131 	/* Although the driver doesn't know the number of capabilities the
1132 	 * device will return, we can simply send a 4KB buffer, the maximum
1133 	 * possible size that firmware can return.
1134 	 */
1135 	cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
1136 		    sizeof(struct ixgbe_aci_cmd_list_caps_elem);
1137 
1138 	status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
1139 				     &cap_count,
1140 				     ixgbe_aci_opc_list_dev_caps);
1141 	if (!status)
1142 		ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
1143 
1144 	if (cbuf)
1145 		ixgbe_free(hw, cbuf);
1146 
1147 	return status;
1148 }
1149 
1150 /**
1151  * ixgbe_discover_func_caps - Read and extract function capabilities
1152  * @hw: pointer to the hardware structure
1153  * @func_caps: pointer to function capabilities structure
1154  *
1155  * Read the function capabilities and extract them into the func_caps structure
1156  * for later use.
1157  *
1158  * Return: the exit code of the operation.
1159  */
ixgbe_discover_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_caps)1160 s32 ixgbe_discover_func_caps(struct ixgbe_hw *hw,
1161 			     struct ixgbe_hw_func_caps *func_caps)
1162 {
1163 	u32 cap_count = 0;
1164 	u8 *cbuf = NULL;
1165 	s32 status;
1166 
1167 	cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
1168 	if(!cbuf)
1169 		return IXGBE_ERR_OUT_OF_MEM;
1170 	/* Although the driver doesn't know the number of capabilities the
1171 	 * device will return, we can simply send a 4KB buffer, the maximum
1172 	 * possible size that firmware can return.
1173 	 */
1174 	cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
1175 		    sizeof(struct ixgbe_aci_cmd_list_caps_elem);
1176 
1177 	status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
1178 				     &cap_count,
1179 				     ixgbe_aci_opc_list_func_caps);
1180 	if (!status)
1181 		ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
1182 
1183 	if (cbuf)
1184 		ixgbe_free(hw, cbuf);
1185 
1186 	return status;
1187 }
1188 
1189 /**
1190  * ixgbe_get_caps - get info about the HW
1191  * @hw: pointer to the hardware structure
1192  *
1193  * Retrieve both device and function capabilities.
1194  *
1195  * Return: the exit code of the operation.
1196  */
ixgbe_get_caps(struct ixgbe_hw * hw)1197 s32 ixgbe_get_caps(struct ixgbe_hw *hw)
1198 {
1199 	s32 status;
1200 
1201 	status = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
1202 	if (status)
1203 		return status;
1204 
1205 	return ixgbe_discover_func_caps(hw, &hw->func_caps);
1206 }
1207 
1208 /**
1209  * ixgbe_aci_disable_rxen - disable RX
1210  * @hw: pointer to the HW struct
1211  *
1212  * Request a safe disable of Receive Enable using ACI command (0x000C).
1213  *
1214  * Return: the exit code of the operation.
1215  */
ixgbe_aci_disable_rxen(struct ixgbe_hw * hw)1216 s32 ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
1217 {
1218 	struct ixgbe_aci_cmd_disable_rxen *cmd;
1219 	struct ixgbe_aci_desc desc;
1220 
1221 	UNREFERENCED_1PARAMETER(hw);
1222 
1223 	cmd = &desc.params.disable_rxen;
1224 
1225 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
1226 
1227 	cmd->lport_num = (u8)hw->bus.func;
1228 
1229 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1230 }
1231 
1232 /**
1233  * ixgbe_aci_get_phy_caps - returns PHY capabilities
1234  * @hw: pointer to the HW struct
1235  * @qual_mods: report qualified modules
1236  * @report_mode: report mode capabilities
1237  * @pcaps: structure for PHY capabilities to be filled
1238  *
1239  * Returns the various PHY capabilities supported on the Port
1240  * using ACI command (0x0600).
1241  *
1242  * Return: the exit code of the operation.
1243  */
ixgbe_aci_get_phy_caps(struct ixgbe_hw * hw,bool qual_mods,u8 report_mode,struct ixgbe_aci_cmd_get_phy_caps_data * pcaps)1244 s32 ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
1245 			   struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
1246 {
1247 	struct ixgbe_aci_cmd_get_phy_caps *cmd;
1248 	u16 pcaps_size = sizeof(*pcaps);
1249 	struct ixgbe_aci_desc desc;
1250 	s32 status;
1251 
1252 	cmd = &desc.params.get_phy;
1253 
1254 	if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
1255 		return IXGBE_ERR_PARAM;
1256 
1257 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
1258 
1259 	if (qual_mods)
1260 		cmd->param0 |= IXGBE_CPU_TO_LE16(IXGBE_ACI_GET_PHY_RQM);
1261 
1262 	cmd->param0 |= IXGBE_CPU_TO_LE16(report_mode);
1263 	status = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
1264 
1265 	if (status == IXGBE_SUCCESS &&
1266 	    report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
1267 		hw->phy.phy_type_low = IXGBE_LE64_TO_CPU(pcaps->phy_type_low);
1268 		hw->phy.phy_type_high = IXGBE_LE64_TO_CPU(pcaps->phy_type_high);
1269 		memcpy(hw->link.link_info.module_type, &pcaps->module_type,
1270 			   sizeof(hw->link.link_info.module_type));
1271 	}
1272 
1273 	return status;
1274 }
1275 
1276 /**
1277  * ixgbe_phy_caps_equals_cfg - check if capabilities match the PHY config
1278  * @phy_caps: PHY capabilities
1279  * @phy_cfg: PHY configuration
1280  *
1281  * Helper function to determine if PHY capabilities match PHY
1282  * configuration
1283  *
1284  * Return: true if PHY capabilities match PHY configuration.
1285  */
1286 bool
ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data * phy_caps,struct ixgbe_aci_cmd_set_phy_cfg_data * phy_cfg)1287 ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *phy_caps,
1288 			  struct ixgbe_aci_cmd_set_phy_cfg_data *phy_cfg)
1289 {
1290 	u8 caps_mask, cfg_mask;
1291 
1292 	if (!phy_caps || !phy_cfg)
1293 		return false;
1294 
1295 	/* These bits are not common between capabilities and configuration.
1296 	 * Do not use them to determine equality.
1297 	 */
1298 	caps_mask = IXGBE_ACI_PHY_CAPS_MASK & ~(IXGBE_ACI_PHY_AN_MODE |
1299 					      IXGBE_ACI_PHY_EN_MOD_QUAL);
1300 	cfg_mask = IXGBE_ACI_PHY_ENA_VALID_MASK &
1301 		   ~IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
1302 
1303 	if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
1304 	    phy_caps->phy_type_high != phy_cfg->phy_type_high ||
1305 	    ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
1306 	    phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
1307 	    phy_caps->eee_cap != phy_cfg->eee_cap ||
1308 	    phy_caps->eeer_value != phy_cfg->eeer_value ||
1309 	    phy_caps->link_fec_options != phy_cfg->link_fec_opt)
1310 		return false;
1311 
1312 	return true;
1313 }
1314 
1315 /**
1316  * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
1317  * @caps: PHY ability structure to copy data from
1318  * @cfg: PHY configuration structure to copy data to
1319  *
1320  * Helper function to copy data from PHY capabilities data structure
1321  * to PHY configuration data structure
1322  */
ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data * caps,struct ixgbe_aci_cmd_set_phy_cfg_data * cfg)1323 void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
1324 				struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
1325 {
1326 	if (!caps || !cfg)
1327 		return;
1328 
1329 	memset(cfg, 0, sizeof(*cfg));
1330 	cfg->phy_type_low = caps->phy_type_low;
1331 	cfg->phy_type_high = caps->phy_type_high;
1332 	cfg->caps = caps->caps;
1333 	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
1334 	cfg->eee_cap = caps->eee_cap;
1335 	cfg->eeer_value = caps->eeer_value;
1336 	cfg->link_fec_opt = caps->link_fec_options;
1337 	cfg->module_compliance_enforcement =
1338 		caps->module_compliance_enforcement;
1339 	cfg->eee_entry_delay = caps->eee_entry_delay;
1340 }
1341 
1342 /**
1343  * ixgbe_aci_set_phy_cfg - set PHY configuration
1344  * @hw: pointer to the HW struct
1345  * @cfg: structure with PHY configuration data to be set
1346  *
1347  * Set the various PHY configuration parameters supported on the Port
1348  * using ACI command (0x0601).
1349  * One or more of the Set PHY config parameters may be ignored in an MFP
1350  * mode as the PF may not have the privilege to set some of the PHY Config
1351  * parameters.
1352  *
1353  * Return: the exit code of the operation.
1354  */
ixgbe_aci_set_phy_cfg(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_set_phy_cfg_data * cfg)1355 s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
1356 			  struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
1357 {
1358 	struct ixgbe_aci_desc desc;
1359 	bool use_1p40_buff;
1360 	s32 status;
1361 
1362 	if (!cfg)
1363 		return IXGBE_ERR_PARAM;
1364 	use_1p40_buff =	hw->func_caps.common_cap.eee_support != 0;
1365 
1366 	/* Ensure that only valid bits of cfg->caps can be turned on. */
1367 	if (cfg->caps & ~IXGBE_ACI_PHY_ENA_VALID_MASK) {
1368 		cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
1369 	}
1370 
1371 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
1372 	desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
1373 
1374 	if (use_1p40_buff) {
1375 		status = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
1376 	} else {
1377 		struct ixgbe_aci_cmd_set_phy_cfg_data_pre_1_40 cfg_obsolete;
1378 
1379 		memcpy(&cfg_obsolete, cfg, sizeof(cfg_obsolete));
1380 
1381 		status = ixgbe_aci_send_cmd(hw, &desc, &cfg_obsolete,
1382 					    sizeof(cfg_obsolete));
1383 	}
1384 
1385 	/* even if the old buffer is used no need to worry about conversion */
1386 	if (!status)
1387 		hw->phy.curr_user_phy_cfg = *cfg;
1388 
1389 	return status;
1390 }
1391 
1392 /**
1393  * ixgbe_aci_set_link_restart_an - set up link and restart AN
1394  * @hw: pointer to the HW struct
1395  * @ena_link: if true: enable link, if false: disable link
1396  *
1397  * Function sets up the link and restarts the Auto-Negotiation over the link.
1398  *
1399  * Return: the exit code of the operation.
1400  */
ixgbe_aci_set_link_restart_an(struct ixgbe_hw * hw,bool ena_link)1401 s32 ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
1402 {
1403 	struct ixgbe_aci_cmd_restart_an *cmd;
1404 	struct ixgbe_aci_desc desc;
1405 
1406 	cmd = &desc.params.restart_an;
1407 
1408 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
1409 
1410 	cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
1411 	if (ena_link)
1412 		cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
1413 	else
1414 		cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
1415 
1416 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1417 }
1418 
1419 /**
1420  * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
1421  * @hw: pointer to the HW struct
1422  *
1423  * Try to identify the media type based on the phy type.
1424  * If more than one media type, the ixgbe_media_type_unknown is returned.
1425  * First, phy_type_low is checked, then phy_type_high.
1426  * If none are identified, the ixgbe_media_type_unknown is returned
1427  *
1428  * Return: type of a media based on phy type in form of enum.
1429  */
1430 static enum ixgbe_media_type
ixgbe_get_media_type_from_phy_type(struct ixgbe_hw * hw)1431 ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
1432 {
1433 	struct ixgbe_link_status *hw_link_info;
1434 
1435 	if (!hw)
1436 		return ixgbe_media_type_unknown;
1437 
1438 	hw_link_info = &hw->link.link_info;
1439 	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
1440 		/* If more than one media type is selected, report unknown */
1441 		return ixgbe_media_type_unknown;
1442 
1443 	if (hw_link_info->phy_type_low) {
1444 		/* 1G SGMII is a special case where some DA cable PHYs
1445 		 * may show this as an option when it really shouldn't
1446 		 * be since SGMII is meant to be between a MAC and a PHY
1447 		 * in a backplane. Try to detect this case and handle it
1448 		 */
1449 		if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
1450 		    (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
1451 		    IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
1452 		    hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
1453 		    IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
1454 			return ixgbe_media_type_da;
1455 
1456 		switch (hw_link_info->phy_type_low) {
1457 		case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
1458 		case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
1459 		case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
1460 		case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
1461 			return ixgbe_media_type_fiber;
1462 		case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
1463 			return ixgbe_media_type_fiber;
1464 		case IXGBE_PHY_TYPE_LOW_100BASE_TX:
1465 		case IXGBE_PHY_TYPE_LOW_1000BASE_T:
1466 		case IXGBE_PHY_TYPE_LOW_2500BASE_T:
1467 		case IXGBE_PHY_TYPE_LOW_5GBASE_T:
1468 		case IXGBE_PHY_TYPE_LOW_10GBASE_T:
1469 			return ixgbe_media_type_copper;
1470 		case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
1471 			return ixgbe_media_type_da;
1472 		case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
1473 		case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
1474 		case IXGBE_PHY_TYPE_LOW_2500BASE_X:
1475 		case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
1476 		case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
1477 		case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
1478 			return ixgbe_media_type_backplane;
1479 		}
1480 	} else {
1481 		switch (hw_link_info->phy_type_high) {
1482 		case IXGBE_PHY_TYPE_HIGH_10BASE_T:
1483 			return ixgbe_media_type_copper;
1484 		}
1485 	}
1486 	return ixgbe_media_type_unknown;
1487 }
1488 
1489 /**
1490  * ixgbe_update_link_info - update status of the HW network link
1491  * @hw: pointer to the HW struct
1492  *
1493  * Update the status of the HW network link.
1494  *
1495  * Return: the exit code of the operation.
1496  */
ixgbe_update_link_info(struct ixgbe_hw * hw)1497 s32 ixgbe_update_link_info(struct ixgbe_hw *hw)
1498 {
1499 	struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
1500 	struct ixgbe_link_status *li;
1501 	s32 status;
1502 
1503 	if (!hw)
1504 		return IXGBE_ERR_PARAM;
1505 
1506 	li = &hw->link.link_info;
1507 
1508 	status = ixgbe_aci_get_link_info(hw, true, NULL);
1509 	if (status)
1510 		return status;
1511 
1512 	if (li->link_info & IXGBE_ACI_MEDIA_AVAILABLE) {
1513 		pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
1514 			ixgbe_malloc(hw, sizeof(*pcaps));
1515 		if (!pcaps)
1516 			return IXGBE_ERR_OUT_OF_MEM;
1517 
1518 		status = ixgbe_aci_get_phy_caps(hw, false,
1519 						IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
1520 						pcaps);
1521 
1522 		if (status == IXGBE_SUCCESS)
1523 			memcpy(li->module_type, &pcaps->module_type,
1524 			       sizeof(li->module_type));
1525 
1526 		ixgbe_free(hw, pcaps);
1527 	}
1528 
1529 	return status;
1530 }
1531 
1532 /**
1533  * ixgbe_get_link_status - get status of the HW network link
1534  * @hw: pointer to the HW struct
1535  * @link_up: pointer to bool (true/false = linkup/linkdown)
1536  *
1537  * Variable link_up is true if link is up, false if link is down.
1538  * The variable link_up is invalid if status is non zero. As a
1539  * result of this call, link status reporting becomes enabled
1540  *
1541  * Return: the exit code of the operation.
1542  */
ixgbe_get_link_status(struct ixgbe_hw * hw,bool * link_up)1543 s32 ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
1544 {
1545 	s32 status = IXGBE_SUCCESS;
1546 
1547 	if (!hw || !link_up)
1548 		return IXGBE_ERR_PARAM;
1549 
1550 	if (hw->link.get_link_info) {
1551 		status = ixgbe_update_link_info(hw);
1552 		if (status) {
1553 			return status;
1554 		}
1555 	}
1556 
1557 	*link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
1558 
1559 	return status;
1560 }
1561 
1562 /**
1563  * ixgbe_aci_get_link_info - get the link status
1564  * @hw: pointer to the HW struct
1565  * @ena_lse: enable/disable LinkStatusEvent reporting
1566  * @link: pointer to link status structure - optional
1567  *
1568  * Get the current Link Status using ACI command (0x607).
1569  * The current link can be optionally provided to update
1570  * the status.
1571  *
1572  * Return: the link status of the adapter.
1573  */
ixgbe_aci_get_link_info(struct ixgbe_hw * hw,bool ena_lse,struct ixgbe_link_status * link)1574 s32 ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
1575 			    struct ixgbe_link_status *link)
1576 {
1577 	struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
1578 	struct ixgbe_aci_cmd_get_link_status *resp;
1579 	struct ixgbe_link_status *li_old, *li;
1580 	struct ixgbe_fc_info *hw_fc_info;
1581 	struct ixgbe_aci_desc desc;
1582 	bool tx_pause, rx_pause;
1583 	u8 cmd_flags;
1584 	s32 status;
1585 
1586 	if (!hw)
1587 		return IXGBE_ERR_PARAM;
1588 
1589 	li_old = &hw->link.link_info_old;
1590 	li = &hw->link.link_info;
1591 	hw_fc_info = &hw->fc;
1592 
1593 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
1594 	cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
1595 	resp = &desc.params.get_link_status;
1596 	resp->cmd_flags = cmd_flags;
1597 
1598 	status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
1599 
1600 	if (status != IXGBE_SUCCESS)
1601 		return status;
1602 
1603 	/* save off old link status information */
1604 	*li_old = *li;
1605 
1606 	/* update current link status information */
1607 	li->link_speed = IXGBE_LE16_TO_CPU(link_data.link_speed);
1608 	li->phy_type_low = IXGBE_LE64_TO_CPU(link_data.phy_type_low);
1609 	li->phy_type_high = IXGBE_LE64_TO_CPU(link_data.phy_type_high);
1610 	li->link_info = link_data.link_info;
1611 	li->link_cfg_err = link_data.link_cfg_err;
1612 	li->an_info = link_data.an_info;
1613 	li->ext_info = link_data.ext_info;
1614 	li->max_frame_size = IXGBE_LE16_TO_CPU(link_data.max_frame_size);
1615 	li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
1616 	li->topo_media_conflict = link_data.topo_media_conflict;
1617 	li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
1618 				      IXGBE_ACI_CFG_PACING_TYPE_M);
1619 	li->eee_status = link_data.eee_status;
1620 
1621 	/* update fc info */
1622 	tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
1623 	rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
1624 	if (tx_pause && rx_pause)
1625 		hw_fc_info->current_mode = ixgbe_fc_full;
1626 	else if (tx_pause)
1627 		hw_fc_info->current_mode = ixgbe_fc_tx_pause;
1628 	else if (rx_pause)
1629 		hw_fc_info->current_mode = ixgbe_fc_rx_pause;
1630 	else
1631 		hw_fc_info->current_mode = ixgbe_fc_none;
1632 
1633 	li->lse_ena = !!(resp->cmd_flags & IXGBE_ACI_LSE_IS_ENABLED);
1634 
1635 	/* save link status information */
1636 	if (link)
1637 		*link = *li;
1638 
1639 	/* flag cleared so calling functions don't call AQ again */
1640 	hw->link.get_link_info = false;
1641 
1642 	return IXGBE_SUCCESS;
1643 }
1644 
1645 /**
1646  * ixgbe_aci_set_event_mask - set event mask
1647  * @hw: pointer to the HW struct
1648  * @port_num: port number of the physical function
1649  * @mask: event mask to be set
1650  *
1651  * Set the event mask using ACI command (0x0613).
1652  *
1653  * Return: the exit code of the operation.
1654  */
ixgbe_aci_set_event_mask(struct ixgbe_hw * hw,u8 port_num,u16 mask)1655 s32 ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
1656 {
1657 	struct ixgbe_aci_cmd_set_event_mask *cmd;
1658 	struct ixgbe_aci_desc desc;
1659 
1660 	cmd = &desc.params.set_event_mask;
1661 
1662 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
1663 
1664 	cmd->event_mask = IXGBE_CPU_TO_LE16(mask);
1665 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1666 }
1667 
1668 /**
1669  * ixgbe_configure_lse - enable/disable link status events
1670  * @hw: pointer to the HW struct
1671  * @activate: bool value deciding if lse should be enabled nor disabled
1672  * @mask: event mask to be set; a set bit means deactivation of the
1673  * corresponding event
1674  *
1675  * Set the event mask and then enable or disable link status events
1676  *
1677  * Return: the exit code of the operation.
1678  */
ixgbe_configure_lse(struct ixgbe_hw * hw,bool activate,u16 mask)1679 s32 ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
1680 {
1681 	s32 rc;
1682 
1683 	rc = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
1684 	if (rc) {
1685 		return rc;
1686 	}
1687 
1688 	/* Enabling link status events generation by fw */
1689 	rc = ixgbe_aci_get_link_info(hw, activate, NULL);
1690 	if (rc) {
1691 		return rc;
1692 	}
1693 	return IXGBE_SUCCESS;
1694 }
1695 
1696 /**
1697  * ixgbe_aci_get_netlist_node - get a node handle
1698  * @hw: pointer to the hw struct
1699  * @cmd: get_link_topo AQ structure
1700  * @node_part_number: output node part number if node found
1701  * @node_handle: output node handle parameter if node found
1702  *
1703  * Get the netlist node and assigns it to
1704  * the provided handle using ACI command (0x06E0).
1705  *
1706  * Return: the exit code of the operation.
1707  */
ixgbe_aci_get_netlist_node(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_get_link_topo * cmd,u8 * node_part_number,u16 * node_handle)1708 s32 ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
1709 			       struct ixgbe_aci_cmd_get_link_topo *cmd,
1710 			       u8 *node_part_number, u16 *node_handle)
1711 {
1712 	struct ixgbe_aci_desc desc;
1713 
1714 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
1715 	desc.params.get_link_topo = *cmd;
1716 
1717 	if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
1718 		return IXGBE_ERR_NOT_SUPPORTED;
1719 
1720 	if (node_handle)
1721 		*node_handle =
1722 			IXGBE_LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
1723 	if (node_part_number)
1724 		*node_part_number = desc.params.get_link_topo.node_part_num;
1725 
1726 	return IXGBE_SUCCESS;
1727 }
1728 
1729 /**
1730  * ixgbe_find_netlist_node - find a node handle
1731  * @hw: pointer to the hw struct
1732  * @node_type_ctx: type of netlist node to look for
1733  * @node_part_number: node part number to look for
1734  * @node_handle: output parameter if node found - optional
1735  *
1736  * Find and return the node handle for a given node type and part number in the
1737  * netlist. When found IXGBE_SUCCESS is returned, IXGBE_ERR_NOT_SUPPORTED
1738  * otherwise. If @node_handle provided, it would be set to found node handle.
1739  *
1740  * Return: the exit code of the operation.
1741  */
ixgbe_find_netlist_node(struct ixgbe_hw * hw,u8 node_type_ctx,u8 node_part_number,u16 * node_handle)1742 s32 ixgbe_find_netlist_node(struct ixgbe_hw *hw, u8 node_type_ctx,
1743 			    u8 node_part_number, u16 *node_handle)
1744 {
1745 	struct ixgbe_aci_cmd_get_link_topo cmd;
1746 	u8 rec_node_part_number;
1747 	u16 rec_node_handle;
1748 	s32 status;
1749 	u8 idx;
1750 
1751 	for (idx = 0; idx < IXGBE_MAX_NETLIST_SIZE; idx++) {
1752 		memset(&cmd, 0, sizeof(cmd));
1753 
1754 		cmd.addr.topo_params.node_type_ctx =
1755 			(node_type_ctx << IXGBE_ACI_LINK_TOPO_NODE_TYPE_S);
1756 		cmd.addr.topo_params.index = idx;
1757 
1758 		status = ixgbe_aci_get_netlist_node(hw, &cmd,
1759 						    &rec_node_part_number,
1760 						    &rec_node_handle);
1761 		if (status)
1762 			return status;
1763 
1764 		if (rec_node_part_number == node_part_number) {
1765 			if (node_handle)
1766 				*node_handle = rec_node_handle;
1767 			return IXGBE_SUCCESS;
1768 		}
1769 	}
1770 
1771 	return IXGBE_ERR_NOT_SUPPORTED;
1772 }
1773 
1774 /**
1775  * ixgbe_aci_read_i2c - read I2C register value
1776  * @hw: pointer to the hw struct
1777  * @topo_addr: topology address for a device to communicate with
1778  * @bus_addr: 7-bit I2C bus address
1779  * @addr: I2C memory address (I2C offset) with up to 16 bits
1780  * @params: I2C parameters: bit [7] - Repeated start,
1781  *				      bits [6:5] data offset size,
1782  *			    bit [4] - I2C address type, bits [3:0] - data size
1783  *				      to read (0-16 bytes)
1784  * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
1785  *
1786  * Read the value of the I2C pin register using ACI command (0x06E2).
1787  *
1788  * Return: the exit code of the operation.
1789  */
ixgbe_aci_read_i2c(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,u8 * data)1790 s32 ixgbe_aci_read_i2c(struct ixgbe_hw *hw,
1791 		       struct ixgbe_aci_cmd_link_topo_addr topo_addr,
1792 		       u16 bus_addr, __le16 addr, u8 params, u8 *data)
1793 {
1794 	struct ixgbe_aci_desc desc = { 0 };
1795 	struct ixgbe_aci_cmd_i2c *cmd;
1796 	u8 data_size;
1797 	s32 status;
1798 
1799 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_i2c);
1800 	cmd = &desc.params.read_write_i2c;
1801 
1802 	if (!data)
1803 		return IXGBE_ERR_PARAM;
1804 
1805 	data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
1806 		    IXGBE_ACI_I2C_DATA_SIZE_S;
1807 
1808 	cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
1809 	cmd->topo_addr = topo_addr;
1810 	cmd->i2c_params = params;
1811 	cmd->i2c_addr = addr;
1812 
1813 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1814 	if (!status) {
1815 		struct ixgbe_aci_cmd_read_i2c_resp *resp;
1816 		u8 i;
1817 
1818 		resp = &desc.params.read_i2c_resp;
1819 		for (i = 0; i < data_size; i++) {
1820 			*data = resp->i2c_data[i];
1821 			data++;
1822 		}
1823 	}
1824 
1825 	return status;
1826 }
1827 
1828 /**
1829  * ixgbe_aci_write_i2c - write a value to I2C register
1830  * @hw: pointer to the hw struct
1831  * @topo_addr: topology address for a device to communicate with
1832  * @bus_addr: 7-bit I2C bus address
1833  * @addr: I2C memory address (I2C offset) with up to 16 bits
1834  * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size
1835  *				      to write (0-7 bytes)
1836  * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
1837  *
1838  * Write a value to the I2C pin register using ACI command (0x06E3).
1839  *
1840  * Return: the exit code of the operation.
1841  */
ixgbe_aci_write_i2c(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,u8 * data)1842 s32 ixgbe_aci_write_i2c(struct ixgbe_hw *hw,
1843 			struct ixgbe_aci_cmd_link_topo_addr topo_addr,
1844 			u16 bus_addr, __le16 addr, u8 params, u8 *data)
1845 {
1846 	struct ixgbe_aci_desc desc = { 0 };
1847 	struct ixgbe_aci_cmd_i2c *cmd;
1848 	u8 i, data_size;
1849 
1850 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_i2c);
1851 	cmd = &desc.params.read_write_i2c;
1852 
1853 	data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
1854 		    IXGBE_ACI_I2C_DATA_SIZE_S;
1855 
1856 	/* data_size limited to 4 */
1857 	if (data_size > 4)
1858 		return IXGBE_ERR_PARAM;
1859 
1860 	cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
1861 	cmd->topo_addr = topo_addr;
1862 	cmd->i2c_params = params;
1863 	cmd->i2c_addr = addr;
1864 
1865 	for (i = 0; i < data_size; i++) {
1866 		cmd->i2c_data[i] = *data;
1867 		data++;
1868 	}
1869 
1870 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1871 }
1872 
1873 /**
1874  * ixgbe_aci_set_port_id_led - set LED value for the given port
1875  * @hw: pointer to the HW struct
1876  * @orig_mode: set LED original mode
1877  *
1878  * Set LED value for the given port (0x06E9)
1879  *
1880  * Return: the exit code of the operation.
1881  */
ixgbe_aci_set_port_id_led(struct ixgbe_hw * hw,bool orig_mode)1882 s32 ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode)
1883 {
1884 	struct ixgbe_aci_cmd_set_port_id_led *cmd;
1885 	struct ixgbe_aci_desc desc;
1886 
1887 	cmd = &desc.params.set_port_id_led;
1888 
1889 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led);
1890 
1891 	cmd->lport_num = (u8)hw->bus.func;
1892 	cmd->lport_num_valid = IXGBE_ACI_PORT_ID_PORT_NUM_VALID;
1893 
1894 	if (orig_mode)
1895 		cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_ORIG;
1896 	else
1897 		cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_BLINK;
1898 
1899 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1900 }
1901 
1902 /**
1903  * ixgbe_aci_set_gpio - set GPIO pin state
1904  * @hw: pointer to the hw struct
1905  * @gpio_ctrl_handle: GPIO controller node handle
1906  * @pin_idx: IO Number of the GPIO that needs to be set
1907  * @value: SW provide IO value to set in the LSB
1908  *
1909  * Set the GPIO pin state that is a part of the topology
1910  * using ACI command (0x06EC).
1911  *
1912  * Return: the exit code of the operation.
1913  */
ixgbe_aci_set_gpio(struct ixgbe_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool value)1914 s32 ixgbe_aci_set_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
1915 		       bool value)
1916 {
1917 	struct ixgbe_aci_cmd_gpio *cmd;
1918 	struct ixgbe_aci_desc desc;
1919 
1920 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_gpio);
1921 	cmd = &desc.params.read_write_gpio;
1922 	cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
1923 	cmd->gpio_num = pin_idx;
1924 	cmd->gpio_val = value ? 1 : 0;
1925 
1926 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1927 }
1928 
1929 /**
1930  * ixgbe_aci_get_gpio - get GPIO pin state
1931  * @hw: pointer to the hw struct
1932  * @gpio_ctrl_handle: GPIO controller node handle
1933  * @pin_idx: IO Number of the GPIO that needs to be set
1934  * @value: IO value read
1935  *
1936  * Get the value of a GPIO signal which is part of the topology
1937  * using ACI command (0x06ED).
1938  *
1939  * Return: the exit code of the operation.
1940  */
ixgbe_aci_get_gpio(struct ixgbe_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool * value)1941 s32 ixgbe_aci_get_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
1942 		       bool *value)
1943 {
1944 	struct ixgbe_aci_cmd_gpio *cmd;
1945 	struct ixgbe_aci_desc desc;
1946 	s32 status;
1947 
1948 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_gpio);
1949 	cmd = &desc.params.read_write_gpio;
1950 	cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
1951 	cmd->gpio_num = pin_idx;
1952 
1953 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1954 	if (status)
1955 		return status;
1956 
1957 	*value = !!cmd->gpio_val;
1958 	return IXGBE_SUCCESS;
1959 }
1960 
1961 /**
1962  * ixgbe_aci_sff_eeprom - read/write SFF EEPROM
1963  * @hw: pointer to the HW struct
1964  * @lport: bits [7:0] = logical port, bit [8] = logical port valid
1965  * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
1966  * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
1967  * @page: QSFP page
1968  * @page_bank_ctrl: configuration of SFF/CMIS paging and banking control
1969  * @data: pointer to data buffer to be read/written to the I2C device.
1970  * @length: 1-16 for read, 1 for write.
1971  * @write: 0 read, 1 for write.
1972  *
1973  * Read/write SFF EEPROM using ACI command (0x06EE).
1974  *
1975  * Return: the exit code of the operation.
1976  */
ixgbe_aci_sff_eeprom(struct ixgbe_hw * hw,u16 lport,u8 bus_addr,u16 mem_addr,u8 page,u8 page_bank_ctrl,u8 * data,u8 length,bool write)1977 s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr,
1978 			 u16 mem_addr, u8 page, u8 page_bank_ctrl, u8 *data,
1979 			 u8 length, bool write)
1980 {
1981 	struct ixgbe_aci_cmd_sff_eeprom *cmd;
1982 	struct ixgbe_aci_desc desc;
1983 	s32 status;
1984 
1985 	if (!data || (mem_addr & 0xff00))
1986 		return IXGBE_ERR_PARAM;
1987 
1988 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_sff_eeprom);
1989 	cmd = &desc.params.read_write_sff_param;
1990 	desc.flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
1991 	cmd->lport_num = (u8)(lport & 0xff);
1992 	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
1993 	cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(((bus_addr >> 1) &
1994 					 IXGBE_ACI_SFF_I2CBUS_7BIT_M) |
1995 					((page_bank_ctrl <<
1996 					  IXGBE_ACI_SFF_PAGE_BANK_CTRL_S) &
1997 					 IXGBE_ACI_SFF_PAGE_BANK_CTRL_M));
1998 	cmd->i2c_offset = IXGBE_CPU_TO_LE16(mem_addr & 0xff);
1999 	cmd->module_page = page;
2000 	if (write)
2001 		cmd->i2c_bus_addr |= IXGBE_CPU_TO_LE16(IXGBE_ACI_SFF_IS_WRITE);
2002 
2003 	status = ixgbe_aci_send_cmd(hw, &desc, data, length);
2004 	return status;
2005 }
2006 
2007 /**
2008  * ixgbe_aci_prog_topo_dev_nvm - program Topology Device NVM
2009  * @hw: pointer to the hardware structure
2010  * @topo_params: pointer to structure storing topology parameters for a device
2011  *
2012  * Program Topology Device NVM using ACI command (0x06F2).
2013  *
2014  * Return: the exit code of the operation.
2015  */
ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_link_topo_params * topo_params)2016 s32 ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw *hw,
2017 			struct ixgbe_aci_cmd_link_topo_params *topo_params)
2018 {
2019 	struct ixgbe_aci_cmd_prog_topo_dev_nvm *cmd;
2020 	struct ixgbe_aci_desc desc;
2021 
2022 	cmd = &desc.params.prog_topo_dev_nvm;
2023 
2024 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_prog_topo_dev_nvm);
2025 
2026 	memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
2027 
2028 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2029 }
2030 
2031 /**
2032  * ixgbe_aci_read_topo_dev_nvm - read Topology Device NVM
2033  * @hw: pointer to the hardware structure
2034  * @topo_params: pointer to structure storing topology parameters for a device
2035  * @start_address: byte offset in the topology device NVM
2036  * @data: pointer to data buffer
2037  * @data_size: number of bytes to be read from the topology device NVM
2038  * Read Topology Device NVM (0x06F3)
2039  *
2040  * Read Topology of Device NVM using ACI command (0x06F3).
2041  *
2042  * Return: the exit code of the operation.
2043  */
ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_link_topo_params * topo_params,u32 start_address,u8 * data,u8 data_size)2044 s32 ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw *hw,
2045 			struct ixgbe_aci_cmd_link_topo_params *topo_params,
2046 			u32 start_address, u8 *data, u8 data_size)
2047 {
2048 	struct ixgbe_aci_cmd_read_topo_dev_nvm *cmd;
2049 	struct ixgbe_aci_desc desc;
2050 	s32 status;
2051 
2052 	if (!data || data_size == 0 ||
2053 	    data_size > IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
2054 		return IXGBE_ERR_PARAM;
2055 
2056 	cmd = &desc.params.read_topo_dev_nvm;
2057 
2058 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_topo_dev_nvm);
2059 
2060 	desc.datalen = IXGBE_CPU_TO_LE16(data_size);
2061 	memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
2062 	cmd->start_address = IXGBE_CPU_TO_LE32(start_address);
2063 
2064 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2065 	if (status)
2066 		return status;
2067 
2068 	memcpy(data, cmd->data_read, data_size);
2069 
2070 	return IXGBE_SUCCESS;
2071 }
2072 
2073 /**
2074  * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
2075  * @hw: pointer to the HW structure
2076  * @access: NVM access type (read or write)
2077  *
2078  * Request NVM ownership.
2079  *
2080  * Return: the exit code of the operation.
2081  */
ixgbe_acquire_nvm(struct ixgbe_hw * hw,enum ixgbe_aci_res_access_type access)2082 s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw,
2083 		      enum ixgbe_aci_res_access_type access)
2084 {
2085 	u32 fla;
2086 
2087 	/* Skip if we are in blank NVM programming mode */
2088 	fla = IXGBE_READ_REG(hw, GLNVM_FLA);
2089 	if ((fla & GLNVM_FLA_LOCKED_M) == 0)
2090 		return IXGBE_SUCCESS;
2091 
2092 	return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
2093 				 IXGBE_NVM_TIMEOUT);
2094 }
2095 
2096 /**
2097  * ixgbe_release_nvm - Generic request for releasing the NVM ownership
2098  * @hw: pointer to the HW structure
2099  *
2100  * Release NVM ownership.
2101  */
ixgbe_release_nvm(struct ixgbe_hw * hw)2102 void ixgbe_release_nvm(struct ixgbe_hw *hw)
2103 {
2104 	u32 fla;
2105 
2106 	/* Skip if we are in blank NVM programming mode */
2107 	fla = IXGBE_READ_REG(hw, GLNVM_FLA);
2108 	if ((fla & GLNVM_FLA_LOCKED_M) == 0)
2109 		return;
2110 
2111 	ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
2112 }
2113 
2114 
2115 /**
2116  * ixgbe_aci_read_nvm - read NVM
2117  * @hw: pointer to the HW struct
2118  * @module_typeid: module pointer location in words from the NVM beginning
2119  * @offset: byte offset from the module beginning
2120  * @length: length of the section to be read (in bytes from the offset)
2121  * @data: command buffer (size [bytes] = length)
2122  * @last_command: tells if this is the last command in a series
2123  * @read_shadow_ram: tell if this is a shadow RAM read
2124  *
2125  * Read the NVM using ACI command (0x0701).
2126  *
2127  * Return: the exit code of the operation.
2128  */
ixgbe_aci_read_nvm(struct ixgbe_hw * hw,u16 module_typeid,u32 offset,u16 length,void * data,bool last_command,bool read_shadow_ram)2129 s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
2130 		       u16 length, void *data, bool last_command,
2131 		       bool read_shadow_ram)
2132 {
2133 	struct ixgbe_aci_desc desc;
2134 	struct ixgbe_aci_cmd_nvm *cmd;
2135 
2136 	cmd = &desc.params.nvm;
2137 
2138 	if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
2139 		return IXGBE_ERR_PARAM;
2140 
2141 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
2142 
2143 	if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
2144 		cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
2145 
2146 	/* If this is the last command in a series, set the proper flag. */
2147 	if (last_command)
2148 		cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
2149 	cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
2150 	cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
2151 	cmd->offset_high = (offset >> 16) & 0xFF;
2152 	cmd->length = IXGBE_CPU_TO_LE16(length);
2153 
2154 	return ixgbe_aci_send_cmd(hw, &desc, data, length);
2155 }
2156 
2157 /**
2158  * ixgbe_aci_erase_nvm - erase NVM sector
2159  * @hw: pointer to the HW struct
2160  * @module_typeid: module pointer location in words from the NVM beginning
2161  *
2162  * Erase the NVM sector using the ACI command (0x0702).
2163  *
2164  * Return: the exit code of the operation.
2165  */
ixgbe_aci_erase_nvm(struct ixgbe_hw * hw,u16 module_typeid)2166 s32 ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid)
2167 {
2168 	struct ixgbe_aci_desc desc;
2169 	struct ixgbe_aci_cmd_nvm *cmd;
2170 	s32 status;
2171 	__le16 len;
2172 
2173 	/* read a length value from SR, so module_typeid is equal to 0 */
2174 	/* calculate offset where module size is placed from bytes to words */
2175 	/* set last command and read from SR values to true */
2176 	status = ixgbe_aci_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true,
2177 				 true);
2178 	if (status)
2179 		return status;
2180 
2181 	cmd = &desc.params.nvm;
2182 
2183 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_erase);
2184 
2185 	cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
2186 	cmd->length = len;
2187 	cmd->offset_low = 0;
2188 	cmd->offset_high = 0;
2189 
2190 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2191 }
2192 
2193 /**
2194  * ixgbe_aci_update_nvm - update NVM
2195  * @hw: pointer to the HW struct
2196  * @module_typeid: module pointer location in words from the NVM beginning
2197  * @offset: byte offset from the module beginning
2198  * @length: length of the section to be written (in bytes from the offset)
2199  * @data: command buffer (size [bytes] = length)
2200  * @last_command: tells if this is the last command in a series
2201  * @command_flags: command parameters
2202  *
2203  * Update the NVM using the ACI command (0x0703).
2204  *
2205  * Return: the exit code of the operation.
2206  */
ixgbe_aci_update_nvm(struct ixgbe_hw * hw,u16 module_typeid,u32 offset,u16 length,void * data,bool last_command,u8 command_flags)2207 s32 ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
2208 			 u32 offset, u16 length, void *data,
2209 			 bool last_command, u8 command_flags)
2210 {
2211 	struct ixgbe_aci_desc desc;
2212 	struct ixgbe_aci_cmd_nvm *cmd;
2213 
2214 	cmd = &desc.params.nvm;
2215 
2216 	/* In offset the highest byte must be zeroed. */
2217 	if (offset & 0xFF000000)
2218 		return IXGBE_ERR_PARAM;
2219 
2220 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_write);
2221 
2222 	cmd->cmd_flags |= command_flags;
2223 
2224 	/* If this is the last command in a series, set the proper flag. */
2225 	if (last_command)
2226 		cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
2227 	cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
2228 	cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
2229 	cmd->offset_high = (offset >> 16) & 0xFF;
2230 	cmd->length = IXGBE_CPU_TO_LE16(length);
2231 
2232 	desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
2233 
2234 	return ixgbe_aci_send_cmd(hw, &desc, data, length);
2235 }
2236 
2237 /**
2238  * ixgbe_aci_read_nvm_cfg - read an NVM config block
2239  * @hw: pointer to the HW struct
2240  * @cmd_flags: NVM access admin command bits
2241  * @field_id: field or feature ID
2242  * @data: buffer for result
2243  * @buf_size: buffer size
2244  * @elem_count: pointer to count of elements read by FW
2245  *
2246  * Reads a single or multiple feature/field ID and data using ACI command
2247  * (0x0704).
2248  *
2249  * Return: the exit code of the operation.
2250  */
ixgbe_aci_read_nvm_cfg(struct ixgbe_hw * hw,u8 cmd_flags,u16 field_id,void * data,u16 buf_size,u16 * elem_count)2251 s32 ixgbe_aci_read_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
2252 			   u16 field_id, void *data, u16 buf_size,
2253 			   u16 *elem_count)
2254 {
2255 	struct ixgbe_aci_cmd_nvm_cfg *cmd;
2256 	struct ixgbe_aci_desc desc;
2257 	s32 status;
2258 
2259 	cmd = &desc.params.nvm_cfg;
2260 
2261 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_read);
2262 
2263 	cmd->cmd_flags = cmd_flags;
2264 	cmd->id = IXGBE_CPU_TO_LE16(field_id);
2265 
2266 	status = ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
2267 	if (!status && elem_count)
2268 		*elem_count = IXGBE_LE16_TO_CPU(cmd->count);
2269 
2270 	return status;
2271 }
2272 
2273 /**
2274  * ixgbe_aci_write_nvm_cfg - write an NVM config block
2275  * @hw: pointer to the HW struct
2276  * @cmd_flags: NVM access admin command bits
2277  * @data: buffer for result
2278  * @buf_size: buffer size
2279  * @elem_count: count of elements to be written
2280  *
2281  * Writes a single or multiple feature/field ID and data using ACI command
2282  * (0x0705).
2283  *
2284  * Return: the exit code of the operation.
2285  */
ixgbe_aci_write_nvm_cfg(struct ixgbe_hw * hw,u8 cmd_flags,void * data,u16 buf_size,u16 elem_count)2286 s32 ixgbe_aci_write_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
2287 			    void *data, u16 buf_size, u16 elem_count)
2288 {
2289 	struct ixgbe_aci_cmd_nvm_cfg *cmd;
2290 	struct ixgbe_aci_desc desc;
2291 
2292 	cmd = &desc.params.nvm_cfg;
2293 
2294 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_write);
2295 	desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
2296 
2297 	cmd->count = IXGBE_CPU_TO_LE16(elem_count);
2298 	cmd->cmd_flags = cmd_flags;
2299 
2300 	return ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
2301 }
2302 
2303 /**
2304  * ixgbe_nvm_validate_checksum - validate checksum
2305  * @hw: pointer to the HW struct
2306  *
2307  * Verify NVM PFA checksum validity using ACI command (0x0706).
2308  * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
2309  * The function acquires and then releases the NVM ownership.
2310  *
2311  * Return: the exit code of the operation.
2312  */
ixgbe_nvm_validate_checksum(struct ixgbe_hw * hw)2313 s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
2314 {
2315 	struct ixgbe_aci_cmd_nvm_checksum *cmd;
2316 	struct ixgbe_aci_desc desc;
2317 	s32 status;
2318 
2319 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2320 	if (status)
2321 		return status;
2322 
2323 	cmd = &desc.params.nvm_checksum;
2324 
2325 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
2326 	cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
2327 
2328 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2329 
2330 	ixgbe_release_nvm(hw);
2331 
2332 	if (!status)
2333 		if (IXGBE_LE16_TO_CPU(cmd->checksum) !=
2334 		    IXGBE_ACI_NVM_CHECKSUM_CORRECT) {
2335 			ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
2336 				      "Invalid Shadow Ram checksum");
2337 			status = IXGBE_ERR_NVM_CHECKSUM;
2338 		}
2339 
2340 	return status;
2341 }
2342 
2343 /**
2344  * ixgbe_nvm_recalculate_checksum - recalculate checksum
2345  * @hw: pointer to the HW struct
2346  *
2347  * Recalculate NVM PFA checksum using ACI command (0x0706).
2348  * The function acquires and then releases the NVM ownership.
2349  *
2350  * Return: the exit code of the operation.
2351  */
ixgbe_nvm_recalculate_checksum(struct ixgbe_hw * hw)2352 s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw)
2353 {
2354 	struct ixgbe_aci_cmd_nvm_checksum *cmd;
2355 	struct ixgbe_aci_desc desc;
2356 	s32 status;
2357 
2358 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
2359 	if (status)
2360 		return status;
2361 
2362 	cmd = &desc.params.nvm_checksum;
2363 
2364 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
2365 	cmd->flags = IXGBE_ACI_NVM_CHECKSUM_RECALC;
2366 
2367 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2368 
2369 	ixgbe_release_nvm(hw);
2370 
2371 	return status;
2372 }
2373 
2374 /**
2375  * ixgbe_nvm_write_activate - NVM activate write
2376  * @hw: pointer to the HW struct
2377  * @cmd_flags: flags for write activate command
2378  * @response_flags: response indicators from firmware
2379  *
2380  * Update the control word with the required banks' validity bits
2381  * and dumps the Shadow RAM to flash using ACI command (0x0707).
2382  *
2383  * cmd_flags controls which banks to activate, the preservation level to use
2384  * when activating the NVM bank, and whether an EMP reset is required for
2385  * activation.
2386  *
2387  * Note that the 16bit cmd_flags value is split between two separate 1 byte
2388  * flag values in the descriptor.
2389  *
2390  * On successful return of the firmware command, the response_flags variable
2391  * is updated with the flags reported by firmware indicating certain status,
2392  * such as whether EMP reset is enabled.
2393  *
2394  * Return: the exit code of the operation.
2395  */
ixgbe_nvm_write_activate(struct ixgbe_hw * hw,u16 cmd_flags,u8 * response_flags)2396 s32 ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
2397 			     u8 *response_flags)
2398 {
2399 	struct ixgbe_aci_desc desc;
2400 	struct ixgbe_aci_cmd_nvm *cmd;
2401 	s32 status;
2402 
2403 	cmd = &desc.params.nvm;
2404 	ixgbe_fill_dflt_direct_cmd_desc(&desc,
2405 					ixgbe_aci_opc_nvm_write_activate);
2406 
2407 	cmd->cmd_flags = LO_BYTE(cmd_flags);
2408 	cmd->offset_high = HI_BYTE(cmd_flags);
2409 
2410 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2411 	if (!status && response_flags)
2412 		*response_flags = cmd->cmd_flags;
2413 
2414 	return status;
2415 }
2416 
2417 /**
2418  * ixgbe_get_flash_bank_offset - Get offset into requested flash bank
2419  * @hw: pointer to the HW structure
2420  * @bank: whether to read from the active or inactive flash bank
2421  * @module: the module to read from
2422  *
2423  * Based on the module, lookup the module offset from the beginning of the
2424  * flash.
2425  *
2426  * Return: the flash offset. Note that a value of zero is invalid and must be
2427  * treated as an error.
2428  */
ixgbe_get_flash_bank_offset(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u16 module)2429 static u32 ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw,
2430 				       enum ixgbe_bank_select bank,
2431 				       u16 module)
2432 {
2433 	struct ixgbe_bank_info *banks = &hw->flash.banks;
2434 	enum ixgbe_flash_bank active_bank;
2435 	bool second_bank_active;
2436 	u32 offset, size;
2437 
2438 	switch (module) {
2439 	case E610_SR_1ST_NVM_BANK_PTR:
2440 		offset = banks->nvm_ptr;
2441 		size = banks->nvm_size;
2442 		active_bank = banks->nvm_bank;
2443 		break;
2444 	case E610_SR_1ST_OROM_BANK_PTR:
2445 		offset = banks->orom_ptr;
2446 		size = banks->orom_size;
2447 		active_bank = banks->orom_bank;
2448 		break;
2449 	case E610_SR_NETLIST_BANK_PTR:
2450 		offset = banks->netlist_ptr;
2451 		size = banks->netlist_size;
2452 		active_bank = banks->netlist_bank;
2453 		break;
2454 	default:
2455 		return 0;
2456 	}
2457 
2458 	switch (active_bank) {
2459 	case IXGBE_1ST_FLASH_BANK:
2460 		second_bank_active = false;
2461 		break;
2462 	case IXGBE_2ND_FLASH_BANK:
2463 		second_bank_active = true;
2464 		break;
2465 	default:
2466 		return 0;
2467     }
2468 
2469 	/* The second flash bank is stored immediately following the first
2470 	 * bank. Based on whether the 1st or 2nd bank is active, and whether
2471 	 * we want the active or inactive bank, calculate the desired offset.
2472 	 */
2473 	switch (bank) {
2474 	case IXGBE_ACTIVE_FLASH_BANK:
2475 		return offset + (second_bank_active ? size : 0);
2476 	case IXGBE_INACTIVE_FLASH_BANK:
2477 		return offset + (second_bank_active ? 0 : size);
2478 	}
2479 
2480 	return 0;
2481 }
2482 
2483 /**
2484  * ixgbe_read_flash_module - Read a word from one of the main NVM modules
2485  * @hw: pointer to the HW structure
2486  * @bank: which bank of the module to read
2487  * @module: the module to read
2488  * @offset: the offset into the module in bytes
2489  * @data: storage for the word read from the flash
2490  * @length: bytes of data to read
2491  *
2492  * Read data from the specified flash module. The bank parameter indicates
2493  * whether or not to read from the active bank or the inactive bank of that
2494  * module.
2495  *
2496  * The word will be read using flat NVM access, and relies on the
2497  * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks()
2498  * during initialization.
2499  *
2500  * Return: the exit code of the operation.
2501  */
ixgbe_read_flash_module(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u16 module,u32 offset,u8 * data,u32 length)2502 static s32 ixgbe_read_flash_module(struct ixgbe_hw *hw,
2503 				   enum ixgbe_bank_select bank,
2504 				   u16 module, u32 offset, u8 *data, u32 length)
2505 {
2506 	s32 status;
2507 	u32 start;
2508 
2509 	start = ixgbe_get_flash_bank_offset(hw, bank, module);
2510 	if (!start) {
2511 		return IXGBE_ERR_PARAM;
2512 	}
2513 
2514 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2515 	if (status)
2516 		return status;
2517 
2518 	status = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false);
2519 
2520 	ixgbe_release_nvm(hw);
2521 
2522 	return status;
2523 }
2524 
2525 /**
2526  * ixgbe_read_netlist_module - Read data from the netlist module area
2527  * @hw: pointer to the HW structure
2528  * @bank: whether to read from the active or inactive module
2529  * @offset: offset into the netlist to read from
2530  * @data: storage for returned word value
2531  *
2532  * Read a word from the specified netlist bank.
2533  *
2534  * Return: the exit code of the operation.
2535  */
ixgbe_read_netlist_module(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 offset,u16 * data)2536 static s32 ixgbe_read_netlist_module(struct ixgbe_hw *hw,
2537 				     enum ixgbe_bank_select bank,
2538 				     u32 offset, u16 *data)
2539 {
2540 	__le16 data_local;
2541 	s32 status;
2542 
2543 	status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
2544 					 offset * sizeof(u16),
2545 					 (u8 *)&data_local,
2546 					 sizeof(u16));
2547 	if (!status)
2548 		*data = IXGBE_LE16_TO_CPU(data_local);
2549 
2550 	return status;
2551 }
2552 
2553 /**
2554  * ixgbe_read_nvm_module - Read from the active main NVM module
2555  * @hw: pointer to the HW structure
2556  * @bank: whether to read from active or inactive NVM module
2557  * @offset: offset into the NVM module to read, in words
2558  * @data: storage for returned word value
2559  *
2560  * Read the specified word from the active NVM module. This includes the CSS
2561  * header at the start of the NVM module.
2562  *
2563  * Return: the exit code of the operation.
2564  */
ixgbe_read_nvm_module(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 offset,u16 * data)2565 static s32 ixgbe_read_nvm_module(struct ixgbe_hw *hw,
2566 				 enum ixgbe_bank_select bank,
2567 				  u32 offset, u16 *data)
2568 {
2569 	__le16 data_local;
2570 	s32 status;
2571 
2572 	status = ixgbe_read_flash_module(hw, bank, E610_SR_1ST_NVM_BANK_PTR,
2573 					 offset * sizeof(u16),
2574 					 (u8 *)&data_local,
2575 					 sizeof(u16));
2576 	if (!status)
2577 		*data = IXGBE_LE16_TO_CPU(data_local);
2578 
2579 	return status;
2580 }
2581 
2582 /**
2583  * ixgbe_get_nvm_css_hdr_len - Read the CSS header length from the
2584  * NVM CSS header
2585  * @hw: pointer to the HW struct
2586  * @bank: whether to read from the active or inactive flash bank
2587  * @hdr_len: storage for header length in words
2588  *
2589  * Read the CSS header length from the NVM CSS header and add the
2590  * Authentication header size, and then convert to words.
2591  *
2592  * Return: the exit code of the operation.
2593  */
ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 * hdr_len)2594 static s32 ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw,
2595 				     enum ixgbe_bank_select bank,
2596 				     u32 *hdr_len)
2597 {
2598 	u16 hdr_len_l, hdr_len_h;
2599 	u32 hdr_len_dword;
2600 	s32 status;
2601 
2602 	status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L,
2603 				       &hdr_len_l);
2604 	if (status)
2605 		return status;
2606 
2607 	status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H,
2608 				       &hdr_len_h);
2609 	if (status)
2610 		return status;
2611 
2612 	/* CSS header length is in DWORD, so convert to words and add
2613 	 * authentication header size
2614 	 */
2615 	hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
2616 	*hdr_len = (hdr_len_dword * 2) + IXGBE_NVM_AUTH_HEADER_LEN;
2617 
2618 	return IXGBE_SUCCESS;
2619 }
2620 
2621 /**
2622  * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank
2623  * @hw: pointer to the HW structure
2624  * @bank: whether to read from the active or inactive NVM module
2625  * @offset: offset into the Shadow RAM copy to read, in words
2626  * @data: storage for returned word value
2627  *
2628  * Read the specified word from the copy of the Shadow RAM found in the
2629  * specified NVM module.
2630  *
2631  * Return: the exit code of the operation.
2632  */
ixgbe_read_nvm_sr_copy(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 offset,u16 * data)2633 static s32 ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw,
2634 				  enum ixgbe_bank_select bank,
2635 				  u32 offset, u16 *data)
2636 {
2637 	u32 hdr_len;
2638 	s32 status;
2639 
2640 	status = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
2641 	if (status)
2642 		return status;
2643 
2644 	hdr_len = ROUND_UP(hdr_len, 32);
2645 
2646 	return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data);
2647 }
2648 
2649 /**
2650  * ixgbe_get_nvm_minsrevs - Get the minsrevs values from flash
2651  * @hw: pointer to the HW struct
2652  * @minsrevs: structure to store NVM and OROM minsrev values
2653  *
2654  * Read the Minimum Security Revision TLV and extract
2655  * the revision values from the flash image
2656  * into a readable structure for processing.
2657  *
2658  * Return: the exit code of the operation.
2659  */
ixgbe_get_nvm_minsrevs(struct ixgbe_hw * hw,struct ixgbe_minsrev_info * minsrevs)2660 s32 ixgbe_get_nvm_minsrevs(struct ixgbe_hw *hw,
2661 			   struct ixgbe_minsrev_info *minsrevs)
2662 {
2663 	struct ixgbe_aci_cmd_nvm_minsrev data;
2664 	s32 status;
2665 	u16 valid;
2666 
2667 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2668 	if (status)
2669 		return status;
2670 
2671 	status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID,
2672 				    0, sizeof(data), &data,
2673 				    true, false);
2674 
2675 	ixgbe_release_nvm(hw);
2676 
2677 	if (status)
2678 		return status;
2679 
2680 	valid = IXGBE_LE16_TO_CPU(data.validity);
2681 
2682 	/* Extract NVM minimum security revision */
2683 	if (valid & IXGBE_ACI_NVM_MINSREV_NVM_VALID) {
2684 		u16 minsrev_l = IXGBE_LE16_TO_CPU(data.nvm_minsrev_l);
2685 		u16 minsrev_h = IXGBE_LE16_TO_CPU(data.nvm_minsrev_h);
2686 
2687 		minsrevs->nvm = minsrev_h << 16 | minsrev_l;
2688 		minsrevs->nvm_valid = true;
2689 	}
2690 
2691 	/* Extract the OROM minimum security revision */
2692 	if (valid & IXGBE_ACI_NVM_MINSREV_OROM_VALID) {
2693 		u16 minsrev_l = IXGBE_LE16_TO_CPU(data.orom_minsrev_l);
2694 		u16 minsrev_h = IXGBE_LE16_TO_CPU(data.orom_minsrev_h);
2695 
2696 		minsrevs->orom = minsrev_h << 16 | minsrev_l;
2697 		minsrevs->orom_valid = true;
2698 	}
2699 
2700 	return IXGBE_SUCCESS;
2701 }
2702 
2703 /**
2704  * ixgbe_update_nvm_minsrevs - Update minsrevs TLV data in flash
2705  * @hw: pointer to the HW struct
2706  * @minsrevs: minimum security revision information
2707  *
2708  * Update the NVM or Option ROM minimum security revision fields in the PFA
2709  * area of the flash. Reads the minsrevs->nvm_valid and minsrevs->orom_valid
2710  * fields to determine what update is being requested. If the valid bit is not
2711  * set for that module, then the associated minsrev will be left as is.
2712  *
2713  * Return: the exit code of the operation.
2714  */
ixgbe_update_nvm_minsrevs(struct ixgbe_hw * hw,struct ixgbe_minsrev_info * minsrevs)2715 s32 ixgbe_update_nvm_minsrevs(struct ixgbe_hw *hw,
2716 			      struct ixgbe_minsrev_info *minsrevs)
2717 {
2718 	struct ixgbe_aci_cmd_nvm_minsrev data;
2719 	s32 status;
2720 
2721 	if (!minsrevs->nvm_valid && !minsrevs->orom_valid) {
2722 		return IXGBE_ERR_PARAM;
2723 	}
2724 
2725 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
2726 	if (status)
2727 		return status;
2728 
2729 	/* Get current data */
2730 	status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
2731 				    sizeof(data), &data, true, false);
2732 	if (status)
2733 		goto exit_release_res;
2734 
2735 	if (minsrevs->nvm_valid) {
2736 		data.nvm_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->nvm & 0xFFFF);
2737 		data.nvm_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->nvm >> 16);
2738 		data.validity |=
2739 			IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_NVM_VALID);
2740 	}
2741 
2742 	if (minsrevs->orom_valid) {
2743 		data.orom_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->orom & 0xFFFF);
2744 		data.orom_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->orom >> 16);
2745 		data.validity |=
2746 			IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_OROM_VALID);
2747 	}
2748 
2749 	/* Update flash data */
2750 	status = ixgbe_aci_update_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
2751 				      sizeof(data), &data, false,
2752 				      IXGBE_ACI_NVM_SPECIAL_UPDATE);
2753 	if (status)
2754 		goto exit_release_res;
2755 
2756 	/* Dump the Shadow RAM to the flash */
2757 	status = ixgbe_nvm_write_activate(hw, 0, NULL);
2758 
2759 exit_release_res:
2760 	ixgbe_release_nvm(hw);
2761 
2762 	return status;
2763 }
2764 
2765 /**
2766  * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header
2767  * @hw: pointer to the HW struct
2768  * @bank: whether to read from the active or inactive flash bank
2769  * @srev: storage for security revision
2770  *
2771  * Read the security revision out of the CSS header of the active NVM module
2772  * bank.
2773  *
2774  * Return: the exit code of the operation.
2775  */
ixgbe_get_nvm_srev(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 * srev)2776 static s32 ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
2777 			      enum ixgbe_bank_select bank, u32 *srev)
2778 {
2779 	u16 srev_l, srev_h;
2780 	s32 status;
2781 
2782 	status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l);
2783 	if (status)
2784 		return status;
2785 
2786 	status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h);
2787 	if (status)
2788 		return status;
2789 
2790 	*srev = srev_h << 16 | srev_l;
2791 
2792 	return IXGBE_SUCCESS;
2793 }
2794 
2795 /**
2796  * ixgbe_get_nvm_ver_info - Read NVM version information
2797  * @hw: pointer to the HW struct
2798  * @bank: whether to read from the active or inactive flash bank
2799  * @nvm: pointer to NVM info structure
2800  *
2801  * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
2802  * in the nvm info structure.
2803  *
2804  * Return: the exit code of the operation.
2805  */
ixgbe_get_nvm_ver_info(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,struct ixgbe_nvm_info * nvm)2806 static s32 ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw,
2807 				  enum ixgbe_bank_select bank,
2808 				  struct ixgbe_nvm_info *nvm)
2809 {
2810 	u16 eetrack_lo, eetrack_hi, ver;
2811 	s32 status;
2812 
2813 	status = ixgbe_read_nvm_sr_copy(hw, bank,
2814 					E610_SR_NVM_DEV_STARTER_VER, &ver);
2815 	if (status) {
2816 		return status;
2817 	}
2818 
2819 	nvm->major = (ver & E610_NVM_VER_HI_MASK) >> E610_NVM_VER_HI_SHIFT;
2820 	nvm->minor = (ver & E610_NVM_VER_LO_MASK) >> E610_NVM_VER_LO_SHIFT;
2821 
2822 	status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_LO,
2823 					&eetrack_lo);
2824 	if (status) {
2825 		return status;
2826 	}
2827 	status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_HI,
2828 					&eetrack_hi);
2829 	if (status) {
2830 		return status;
2831 	}
2832 
2833 	nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
2834 
2835 	status = ixgbe_get_nvm_srev(hw, bank, &nvm->srev);
2836 
2837 	return IXGBE_SUCCESS;
2838 }
2839 
2840 /**
2841  * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
2842  * @hw: pointer to the HW structure
2843  * @nvm: storage for Option ROM version information
2844  *
2845  * Reads the NVM EETRACK ID, Map version, and security revision of the
2846  * inactive NVM bank. Used to access version data for a pending update that
2847  * has not yet been activated.
2848  *
2849  * Return: the exit code of the operation.
2850  */
ixgbe_get_inactive_nvm_ver(struct ixgbe_hw * hw,struct ixgbe_nvm_info * nvm)2851 s32 ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
2852 {
2853 	return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm);
2854 }
2855 
2856 /**
2857  * ixgbe_get_active_nvm_ver - Read Option ROM version from the active bank
2858  * @hw: pointer to the HW structure
2859  * @nvm: storage for Option ROM version information
2860  *
2861  * Reads the NVM EETRACK ID, Map version, and security revision of the
2862  * active NVM bank.
2863  *
2864  * Return: the exit code of the operation.
2865  */
ixgbe_get_active_nvm_ver(struct ixgbe_hw * hw,struct ixgbe_nvm_info * nvm)2866 s32 ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
2867 {
2868 	return ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, nvm);
2869 }
2870 
2871 /**
2872  * ixgbe_get_netlist_info
2873  * @hw: pointer to the HW struct
2874  * @bank: whether to read from the active or inactive flash bank
2875  * @netlist: pointer to netlist version info structure
2876  *
2877  * Get the netlist version information from the requested bank. Reads the Link
2878  * Topology section to find the Netlist ID block and extract the relevant
2879  * information into the netlist version structure.
2880  *
2881  * Return: the exit code of the operation.
2882  */
ixgbe_get_netlist_info(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,struct ixgbe_netlist_info * netlist)2883 static s32 ixgbe_get_netlist_info(struct ixgbe_hw *hw,
2884 				  enum ixgbe_bank_select bank,
2885 				  struct ixgbe_netlist_info *netlist)
2886 {
2887 	u16 module_id, length, node_count, i;
2888 	u16 *id_blk;
2889 	s32 status;
2890 
2891 	status = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET,
2892 					   &module_id);
2893 	if (status)
2894 		return status;
2895 
2896 	if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID) {
2897 		return IXGBE_ERR_NVM;
2898 	}
2899 
2900 	status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN,
2901 					   &length);
2902 	if (status)
2903 		return status;
2904 
2905 	/* sanity check that we have at least enough words to store the
2906 	 * netlist ID block
2907 	 */
2908 	if (length < IXGBE_NETLIST_ID_BLK_SIZE) {
2909 		return IXGBE_ERR_NVM;
2910 	}
2911 
2912 	status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT,
2913 					   &node_count);
2914 	if (status)
2915 		return status;
2916 	node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M;
2917 
2918 	id_blk = (u16 *)ixgbe_calloc(hw, IXGBE_NETLIST_ID_BLK_SIZE,
2919 		     sizeof(*id_blk));
2920 	if (!id_blk)
2921 		return IXGBE_ERR_NO_SPACE;
2922 
2923 	/* Read out the entire Netlist ID Block at once. */
2924 	status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
2925 				         IXGBE_NETLIST_ID_BLK_OFFSET(node_count) * sizeof(u16),
2926 				         (u8 *)id_blk,
2927 					 IXGBE_NETLIST_ID_BLK_SIZE * sizeof(u16));
2928 	if (status)
2929 		goto exit_error;
2930 
2931 	for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++)
2932 		id_blk[i] = IXGBE_LE16_TO_CPU(((__le16 *)id_blk)[i]);
2933 
2934 	netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
2935 			 id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW];
2936 	netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
2937 			 id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW];
2938 	netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
2939 			id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW];
2940 	netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 |
2941 		       id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW];
2942 	netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER];
2943 	/* Read the left most 4 bytes of SHA */
2944 	netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
2945 			id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
2946 
2947 exit_error:
2948 	ixgbe_free(hw, id_blk);
2949 
2950 	return status;
2951 }
2952 
2953 /**
2954  * ixgbe_get_inactive_netlist_ver
2955  * @hw: pointer to the HW struct
2956  * @netlist: pointer to netlist version info structure
2957  *
2958  * Read the netlist version data from the inactive netlist bank. Used to
2959  * extract version data of a pending flash update in order to display the
2960  * version data.
2961  *
2962  * Return: the exit code of the operation.
2963  */
ixgbe_get_inactive_netlist_ver(struct ixgbe_hw * hw,struct ixgbe_netlist_info * netlist)2964 s32 ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
2965 				   struct ixgbe_netlist_info *netlist)
2966 {
2967 	return ixgbe_get_netlist_info(hw, IXGBE_INACTIVE_FLASH_BANK, netlist);
2968 }
2969 
2970 /**
2971  * ixgbe_read_sr_pointer - Read the value of a Shadow RAM pointer word
2972  * @hw: pointer to the HW structure
2973  * @offset: the word offset of the Shadow RAM word to read
2974  * @pointer: pointer value read from Shadow RAM
2975  *
2976  * Read the given Shadow RAM word, and convert it to a pointer value specified
2977  * in bytes. This function assumes the specified offset is a valid pointer
2978  * word.
2979  *
2980  * Each pointer word specifies whether it is stored in word size or 4KB
2981  * sector size by using the highest bit. The reported pointer value will be in
2982  * bytes, intended for flat NVM reads.
2983  *
2984  * Return: the exit code of the operation.
2985  */
ixgbe_read_sr_pointer(struct ixgbe_hw * hw,u16 offset,u32 * pointer)2986 static s32 ixgbe_read_sr_pointer(struct ixgbe_hw *hw, u16 offset, u32 *pointer)
2987 {
2988 	s32 status;
2989 	u16 value;
2990 
2991 	status = ixgbe_read_ee_aci_E610(hw, offset, &value);
2992 	if (status)
2993 		return status;
2994 
2995 	/* Determine if the pointer is in 4KB or word units */
2996 	if (value & IXGBE_SR_NVM_PTR_4KB_UNITS)
2997 		*pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * 4 * 1024;
2998 	else
2999 		*pointer = value * 2;
3000 
3001 	return IXGBE_SUCCESS;
3002 }
3003 
3004 /**
3005  * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word
3006  * @hw: pointer to the HW structure
3007  * @offset: the word offset of the Shadow RAM to read
3008  * @size: size value read from the Shadow RAM
3009  *
3010  * Read the given Shadow RAM word, and convert it to an area size value
3011  * specified in bytes. This function assumes the specified offset is a valid
3012  * area size word.
3013  *
3014  * Each area size word is specified in 4KB sector units. This function reports
3015  * the size in bytes, intended for flat NVM reads.
3016  *
3017  * Return: the exit code of the operation.
3018  */
ixgbe_read_sr_area_size(struct ixgbe_hw * hw,u16 offset,u32 * size)3019 static s32 ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size)
3020 {
3021 	s32 status;
3022 	u16 value;
3023 
3024 	status = ixgbe_read_ee_aci_E610(hw, offset, &value);
3025 	if (status)
3026 		return status;
3027 
3028 	/* Area sizes are always specified in 4KB units */
3029 	*size = value * 4 * 1024;
3030 
3031 	return IXGBE_SUCCESS;
3032 }
3033 
3034 /**
3035  * ixgbe_discover_flash_size - Discover the available flash size.
3036  * @hw: pointer to the HW struct
3037  *
3038  * The device flash could be up to 16MB in size. However, it is possible that
3039  * the actual size is smaller. Use bisection to determine the accessible size
3040  * of flash memory.
3041  *
3042  * Return: the exit code of the operation.
3043  */
ixgbe_discover_flash_size(struct ixgbe_hw * hw)3044 static s32 ixgbe_discover_flash_size(struct ixgbe_hw *hw)
3045 {
3046 	u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
3047 	s32 status;
3048 
3049 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
3050 	if (status)
3051 		return status;
3052 
3053 	while ((max_size - min_size) > 1) {
3054 		u32 offset = (max_size + min_size) / 2;
3055 		u32 len = 1;
3056 		u8 data;
3057 
3058 		status = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
3059 		if (status == IXGBE_ERR_ACI_ERROR &&
3060 		    hw->aci.last_status == IXGBE_ACI_RC_EINVAL) {
3061 			status = IXGBE_SUCCESS;
3062 			max_size = offset;
3063 		} else if (!status) {
3064 			min_size = offset;
3065 		} else {
3066 			/* an unexpected error occurred */
3067 			goto err_read_flat_nvm;
3068 		}
3069 	}
3070 
3071 	hw->flash.flash_size = max_size;
3072 
3073 err_read_flat_nvm:
3074 	ixgbe_release_nvm(hw);
3075 
3076 	return status;
3077 }
3078 
3079 /**
3080  * ixgbe_determine_active_flash_banks - Discover active bank for each module
3081  * @hw: pointer to the HW struct
3082  *
3083  * Read the Shadow RAM control word and determine which banks are active for
3084  * the NVM, OROM, and Netlist modules. Also read and calculate the associated
3085  * pointer and size. These values are then cached into the ixgbe_flash_info
3086  * structure for later use in order to calculate the correct offset to read
3087  * from the active module.
3088  *
3089  * Return: the exit code of the operation.
3090  */
ixgbe_determine_active_flash_banks(struct ixgbe_hw * hw)3091 static s32 ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw)
3092 {
3093 	struct ixgbe_bank_info *banks = &hw->flash.banks;
3094 	u16 ctrl_word;
3095 	s32 status;
3096 
3097 	status = ixgbe_read_ee_aci_E610(hw, E610_SR_NVM_CTRL_WORD, &ctrl_word);
3098 	if (status) {
3099 		return status;
3100 	}
3101 
3102 	/* Check that the control word indicates validity */
3103 	if ((ctrl_word & IXGBE_SR_CTRL_WORD_1_M) >> IXGBE_SR_CTRL_WORD_1_S !=
3104 	    IXGBE_SR_CTRL_WORD_VALID) {
3105 		return IXGBE_ERR_CONFIG;
3106 	}
3107 
3108 	if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK))
3109 		banks->nvm_bank = IXGBE_1ST_FLASH_BANK;
3110 	else
3111 		banks->nvm_bank = IXGBE_2ND_FLASH_BANK;
3112 
3113 	if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK))
3114 		banks->orom_bank = IXGBE_1ST_FLASH_BANK;
3115 	else
3116 		banks->orom_bank = IXGBE_2ND_FLASH_BANK;
3117 
3118 	if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK))
3119 		banks->netlist_bank = IXGBE_1ST_FLASH_BANK;
3120 	else
3121 		banks->netlist_bank = IXGBE_2ND_FLASH_BANK;
3122 
3123 	status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_NVM_BANK_PTR,
3124 				       &banks->nvm_ptr);
3125 	if (status) {
3126 		return status;
3127 	}
3128 
3129 	status = ixgbe_read_sr_area_size(hw, E610_SR_NVM_BANK_SIZE,
3130 					 &banks->nvm_size);
3131 	if (status) {
3132 		return status;
3133 	}
3134 
3135 	status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_OROM_BANK_PTR,
3136 				       &banks->orom_ptr);
3137 	if (status) {
3138 		return status;
3139 	}
3140 
3141 	status = ixgbe_read_sr_area_size(hw, E610_SR_OROM_BANK_SIZE,
3142 					 &banks->orom_size);
3143 	if (status) {
3144 		return status;
3145 	}
3146 
3147 	status = ixgbe_read_sr_pointer(hw, E610_SR_NETLIST_BANK_PTR,
3148 				       &banks->netlist_ptr);
3149 	if (status) {
3150 		return status;
3151 	}
3152 
3153 	status = ixgbe_read_sr_area_size(hw, E610_SR_NETLIST_BANK_SIZE,
3154 					 &banks->netlist_size);
3155 	if (status) {
3156 		return status;
3157 	}
3158 
3159 	return IXGBE_SUCCESS;
3160 }
3161 
3162 /**
3163  * ixgbe_init_nvm - initializes NVM setting
3164  * @hw: pointer to the HW struct
3165  *
3166  * Read and populate NVM settings such as Shadow RAM size,
3167  * max_timeout, and blank_nvm_mode
3168  *
3169  * Return: the exit code of the operation.
3170  */
ixgbe_init_nvm(struct ixgbe_hw * hw)3171 s32 ixgbe_init_nvm(struct ixgbe_hw *hw)
3172 {
3173 	struct ixgbe_flash_info *flash = &hw->flash;
3174 	u32 fla, gens_stat, status;
3175 	u8 sr_size;
3176 
3177 	/* The SR size is stored regardless of the NVM programming mode
3178 	 * as the blank mode may be used in the factory line.
3179 	 */
3180 	gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
3181 	sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
3182 
3183 	/* Switching to words (sr_size contains power of 2) */
3184 	flash->sr_words = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
3185 
3186 	/* Check if we are in the normal or blank NVM programming mode */
3187 	fla = IXGBE_READ_REG(hw, GLNVM_FLA);
3188 	if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
3189 		flash->blank_nvm_mode = false;
3190 	} else {
3191 		/* Blank programming mode */
3192 		flash->blank_nvm_mode = true;
3193 		return IXGBE_ERR_NVM_BLANK_MODE;
3194 	}
3195 
3196 	status = ixgbe_discover_flash_size(hw);
3197 	if (status) {
3198 		return status;
3199 	}
3200 
3201 	status = ixgbe_determine_active_flash_banks(hw);
3202 	if (status) {
3203 		return status;
3204 	}
3205 
3206 	status = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
3207 					&flash->nvm);
3208 	if (status) {
3209 		return status;
3210 	}
3211 
3212 	/* read the netlist version information */
3213 	status = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK,
3214 					&flash->netlist);
3215 
3216 	return IXGBE_SUCCESS;
3217 }
3218 
3219 /**
3220  * ixgbe_sanitize_operate - Clear the user data
3221  * @hw: pointer to the HW struct
3222  *
3223  * Clear user data from NVM using ACI command (0x070C).
3224  *
3225  * Return: the exit code of the operation.
3226  */
ixgbe_sanitize_operate(struct ixgbe_hw * hw)3227 s32 ixgbe_sanitize_operate(struct ixgbe_hw *hw)
3228 {
3229 	s32 status;
3230 	u8 values;
3231 
3232 	u8 cmd_flags = IXGBE_ACI_SANITIZE_REQ_OPERATE |
3233 		       IXGBE_ACI_SANITIZE_OPERATE_SUBJECT_CLEAR;
3234 
3235 	status = ixgbe_sanitize_nvm(hw, cmd_flags, &values);
3236 	if (status)
3237 		return status;
3238 	if ((!(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
3239 	     !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE)) ||
3240 	    ((values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
3241 	     !(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS)) ||
3242 	    ((values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE) &&
3243 	     !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS)))
3244 		return IXGBE_ERR_ACI_ERROR;
3245 
3246 	return IXGBE_SUCCESS;
3247 }
3248 
3249 /**
3250  * ixgbe_sanitize_nvm - Sanitize NVM
3251  * @hw: pointer to the HW struct
3252  * @cmd_flags: flag to the ACI command
3253  * @values: values returned from the command
3254  *
3255  * Sanitize NVM using ACI command (0x070C).
3256  *
3257  * Return: the exit code of the operation.
3258  */
ixgbe_sanitize_nvm(struct ixgbe_hw * hw,u8 cmd_flags,u8 * values)3259 s32 ixgbe_sanitize_nvm(struct ixgbe_hw *hw, u8 cmd_flags, u8 *values)
3260 {
3261 	struct ixgbe_aci_desc desc;
3262 	struct ixgbe_aci_cmd_nvm_sanitization *cmd;
3263 	s32 status;
3264 
3265 	cmd = &desc.params.nvm_sanitization;
3266 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_sanitization);
3267 	cmd->cmd_flags = cmd_flags;
3268 
3269 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3270 	if (values)
3271 		*values = cmd->values;
3272 
3273 	return status;
3274 }
3275 
3276 /**
3277  * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
3278  * @hw: pointer to the HW structure
3279  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
3280  * @data: word read from the Shadow RAM
3281  *
3282  * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
3283  *
3284  * Return: the exit code of the operation.
3285  */
ixgbe_read_sr_word_aci(struct ixgbe_hw * hw,u16 offset,u16 * data)3286 s32 ixgbe_read_sr_word_aci(struct ixgbe_hw  *hw, u16 offset, u16 *data)
3287 {
3288 	u32 bytes = sizeof(u16);
3289 	__le16 data_local;
3290 	s32 status;
3291 
3292 	status = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
3293 				     (u8 *)&data_local, true);
3294 	if (status)
3295 		return status;
3296 
3297 	*data = IXGBE_LE16_TO_CPU(data_local);
3298 	return IXGBE_SUCCESS;
3299 }
3300 
3301 /**
3302  * ixgbe_read_sr_buf_aci - Reads Shadow RAM buf via ACI
3303  * @hw: pointer to the HW structure
3304  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
3305  * @words: (in) number of words to read; (out) number of words actually read
3306  * @data: words read from the Shadow RAM
3307  *
3308  * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is
3309  * taken before reading the buffer and later released.
3310  *
3311  * Return: the exit code of the operation.
3312  */
ixgbe_read_sr_buf_aci(struct ixgbe_hw * hw,u16 offset,u16 * words,u16 * data)3313 s32 ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
3314 			  u16 *data)
3315 {
3316 	u32 bytes = *words * 2, i;
3317 	s32 status;
3318 
3319 	status = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
3320 
3321 	*words = bytes / 2;
3322 
3323 	for (i = 0; i < *words; i++)
3324 		data[i] = IXGBE_LE16_TO_CPU(((__le16 *)data)[i]);
3325 
3326 	return status;
3327 }
3328 
3329 /**
3330  * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
3331  * @hw: pointer to the HW struct
3332  * @offset: offset from beginning of NVM
3333  * @length: (in) number of bytes to read; (out) number of bytes actually read
3334  * @data: buffer to return data in (sized to fit the specified length)
3335  * @read_shadow_ram: if true, read from shadow RAM instead of NVM
3336  *
3337  * Reads a portion of the NVM, as a flat memory space. This function correctly
3338  * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
3339  * from being exceeded in case of Shadow RAM read requests and ensures that no
3340  * single read request exceeds the maximum 4KB read for a single admin command.
3341  *
3342  * Returns a status code on failure. Note that the data pointer may be
3343  * partially updated if some reads succeed before a failure.
3344  *
3345  * Return: the exit code of the operation.
3346  */
ixgbe_read_flat_nvm(struct ixgbe_hw * hw,u32 offset,u32 * length,u8 * data,bool read_shadow_ram)3347 s32 ixgbe_read_flat_nvm(struct ixgbe_hw  *hw, u32 offset, u32 *length,
3348 			u8 *data, bool read_shadow_ram)
3349 {
3350 	u32 inlen = *length;
3351 	u32 bytes_read = 0;
3352 	bool last_cmd;
3353 	s32 status;
3354 
3355 	*length = 0;
3356 
3357 	/* Verify the length of the read if this is for the Shadow RAM */
3358 	if (read_shadow_ram && ((offset + inlen) >
3359 				(hw->eeprom.word_size * 2u))) {
3360 		return IXGBE_ERR_PARAM;
3361 	}
3362 
3363 	do {
3364 		u32 read_size, sector_offset;
3365 
3366 		/* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
3367 		 * Additionally, a read from the Shadow RAM may not cross over
3368 		 * a sector boundary. Conveniently, the sector size is also 4KB.
3369 		 */
3370 		sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
3371 		read_size = MIN_T(u32,
3372 				  IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
3373 				  inlen - bytes_read);
3374 
3375 		last_cmd = !(bytes_read + read_size < inlen);
3376 
3377 		/* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
3378 		 * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
3379 		 * maximum size guarantees that it will fit within the 2 bytes.
3380 		 */
3381 		status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
3382 					    offset, (u16)read_size,
3383 					    data + bytes_read, last_cmd,
3384 					    read_shadow_ram);
3385 		if (status)
3386 			break;
3387 
3388 		bytes_read += read_size;
3389 		offset += read_size;
3390 	} while (!last_cmd);
3391 
3392 	*length = bytes_read;
3393 	return status;
3394 }
3395 
3396 /**
3397  * ixgbe_check_sr_access_params - verify params for Shadow RAM R/W operations.
3398  * @hw: pointer to the HW structure
3399  * @offset: offset in words from module start
3400  * @words: number of words to access
3401  *
3402  * Check if all the parameters are valid
3403  * before performing any Shadow RAM read/write operations.
3404  *
3405  * Return: the exit code of the operation.
3406  * * - IXGBE_SUCCESS - success.
3407  * * - IXGBE_ERR_PARAM - NVM error: offset beyond SR limit or
3408  * NVM error: tried to access more words then the set limit or
3409  * NVM error: cannot spread over two sectors.
3410  */
ixgbe_check_sr_access_params(struct ixgbe_hw * hw,u32 offset,u16 words)3411 static s32 ixgbe_check_sr_access_params(struct ixgbe_hw *hw, u32 offset,
3412 					u16 words)
3413 {
3414 	if ((offset + words) > hw->eeprom.word_size) {
3415 		return IXGBE_ERR_PARAM;
3416 	}
3417 
3418 	if (words > IXGBE_SR_SECTOR_SIZE_IN_WORDS) {
3419 		/* We can access only up to 4KB (one sector),
3420 		 * in one Admin Command write
3421 		 */
3422 		return IXGBE_ERR_PARAM;
3423 	}
3424 
3425 	if (((offset + (words - 1)) / IXGBE_SR_SECTOR_SIZE_IN_WORDS) !=
3426 	    (offset / IXGBE_SR_SECTOR_SIZE_IN_WORDS)) {
3427 		/* A single access cannot spread over two sectors */
3428 		return IXGBE_ERR_PARAM;
3429 	}
3430 
3431 	return IXGBE_SUCCESS;
3432 }
3433 
3434 /**
3435  * ixgbe_write_sr_word_aci - Writes Shadow RAM word
3436  * @hw: pointer to the HW structure
3437  * @offset: offset of the Shadow RAM word to write
3438  * @data: word to write to the Shadow RAM
3439  *
3440  * Writes a 16 bit word to the Shadow RAM using the admin command.
3441  * NVM ownership must be acquired before calling this function and released
3442  * by a caller. To commit SR to NVM update checksum function should be called.
3443  *
3444  * Return: the exit code of the operation.
3445  */
ixgbe_write_sr_word_aci(struct ixgbe_hw * hw,u32 offset,const u16 * data)3446 s32 ixgbe_write_sr_word_aci(struct ixgbe_hw *hw, u32 offset, const u16 *data)
3447 {
3448 	__le16 data_local = IXGBE_CPU_TO_LE16(*data);
3449 	s32 status;
3450 
3451 	status = ixgbe_check_sr_access_params(hw, offset, 1);
3452 	if (!status)
3453 		status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
3454 					      BYTES_PER_WORD, &data_local,
3455 					      false, 0);
3456 
3457 	return status;
3458 }
3459 
3460 /**
3461  * ixgbe_write_sr_buf_aci - Writes Shadow RAM buf
3462  * @hw: pointer to the HW structure
3463  * @offset: offset of the Shadow RAM buffer to write
3464  * @words: number of words to write
3465  * @data: words to write to the Shadow RAM
3466  *
3467  * Writes a 16 bit word to the Shadow RAM using the admin command.
3468  * NVM ownership must be acquired before calling this function and released
3469  * by a caller. To commit SR to NVM update checksum function should be called.
3470  *
3471  * Return: the exit code of the operation.
3472  */
ixgbe_write_sr_buf_aci(struct ixgbe_hw * hw,u32 offset,u16 words,const u16 * data)3473 s32 ixgbe_write_sr_buf_aci(struct ixgbe_hw *hw, u32 offset, u16 words,
3474 			   const u16 *data)
3475 {
3476 	__le16 *data_local;
3477 	s32 status;
3478 	void *vmem;
3479 	u32 i;
3480 
3481 	vmem = ixgbe_calloc(hw, words, sizeof(u16));
3482 	if (!vmem)
3483 		return IXGBE_ERR_OUT_OF_MEM;
3484 	data_local = (__le16 *)vmem;
3485 
3486 	for (i = 0; i < words; i++)
3487 		data_local[i] = IXGBE_CPU_TO_LE16(data[i]);
3488 
3489 	/* Here we will only write one buffer as the size of the modules
3490 	 * mirrored in the Shadow RAM is always less than 4K.
3491 	 */
3492 	status = ixgbe_check_sr_access_params(hw, offset, words);
3493 	if (!status)
3494 		status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
3495 					      BYTES_PER_WORD * words,
3496 					      data_local, false, 0);
3497 
3498 	ixgbe_free(hw, vmem);
3499 
3500 	return status;
3501 }
3502 
3503 /**
3504  * ixgbe_aci_alternate_write - write to alternate structure
3505  * @hw: pointer to the hardware structure
3506  * @reg_addr0: address of first dword to be written
3507  * @reg_val0: value to be written under 'reg_addr0'
3508  * @reg_addr1: address of second dword to be written
3509  * @reg_val1: value to be written under 'reg_addr1'
3510  *
3511  * Write one or two dwords to alternate structure using ACI command (0x0900).
3512  * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
3513  *
3514  * Return: 0 on success and error code on failure.
3515  */
ixgbe_aci_alternate_write(struct ixgbe_hw * hw,u32 reg_addr0,u32 reg_val0,u32 reg_addr1,u32 reg_val1)3516 s32 ixgbe_aci_alternate_write(struct ixgbe_hw *hw, u32 reg_addr0,
3517 			      u32 reg_val0, u32 reg_addr1, u32 reg_val1)
3518 {
3519 	struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
3520 	struct ixgbe_aci_desc desc;
3521 	s32 status;
3522 
3523 	cmd = &desc.params.read_write_alt_direct;
3524 
3525 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_alt_direct);
3526 	cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
3527 	cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
3528 	cmd->dword0_value = IXGBE_CPU_TO_LE32(reg_val0);
3529 	cmd->dword1_value = IXGBE_CPU_TO_LE32(reg_val1);
3530 
3531 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3532 
3533 	return status;
3534 }
3535 
3536 /**
3537  * ixgbe_aci_alternate_read - read from alternate structure
3538  * @hw: pointer to the hardware structure
3539  * @reg_addr0: address of first dword to be read
3540  * @reg_val0: pointer for data read from 'reg_addr0'
3541  * @reg_addr1: address of second dword to be read
3542  * @reg_val1: pointer for data read from 'reg_addr1'
3543  *
3544  * Read one or two dwords from alternate structure using ACI command (0x0902).
3545  * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
3546  * If 'reg_val1' pointer is not passed then only register at 'reg_addr0'
3547  * is read.
3548  *
3549  * Return: 0 on success and error code on failure.
3550  */
ixgbe_aci_alternate_read(struct ixgbe_hw * hw,u32 reg_addr0,u32 * reg_val0,u32 reg_addr1,u32 * reg_val1)3551 s32 ixgbe_aci_alternate_read(struct ixgbe_hw *hw, u32 reg_addr0,
3552 			     u32 *reg_val0, u32 reg_addr1, u32 *reg_val1)
3553 {
3554 	struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
3555 	struct ixgbe_aci_desc desc;
3556 	s32 status;
3557 
3558 	cmd = &desc.params.read_write_alt_direct;
3559 
3560 	if (!reg_val0)
3561 		return IXGBE_ERR_PARAM;
3562 
3563 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_alt_direct);
3564 	cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
3565 	cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
3566 
3567 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3568 
3569 	if (status == IXGBE_SUCCESS) {
3570 		*reg_val0 = IXGBE_LE32_TO_CPU(cmd->dword0_value);
3571 
3572 		if (reg_val1)
3573 			*reg_val1 = IXGBE_LE32_TO_CPU(cmd->dword1_value);
3574 	}
3575 
3576 	return status;
3577 }
3578 
3579 /**
3580  * ixgbe_aci_alternate_write_done - check if writing to alternate structure
3581  * is done
3582  * @hw: pointer to the HW structure.
3583  * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
3584  * @reset_needed: indicates the SW should trigger GLOBAL reset
3585  *
3586  * Indicates to the FW that alternate structures have been changed.
3587  *
3588  * Return: 0 on success and error code on failure.
3589  */
ixgbe_aci_alternate_write_done(struct ixgbe_hw * hw,u8 bios_mode,bool * reset_needed)3590 s32 ixgbe_aci_alternate_write_done(struct ixgbe_hw *hw, u8 bios_mode,
3591 				   bool *reset_needed)
3592 {
3593 	struct ixgbe_aci_cmd_done_alt_write *cmd;
3594 	struct ixgbe_aci_desc desc;
3595 	s32 status;
3596 
3597 	cmd = &desc.params.done_alt_write;
3598 
3599 	if (!reset_needed)
3600 		return IXGBE_ERR_PARAM;
3601 
3602 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_done_alt_write);
3603 	cmd->flags = bios_mode;
3604 
3605 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3606 	if (!status)
3607 		*reset_needed = (IXGBE_LE16_TO_CPU(cmd->flags) &
3608 				 IXGBE_ACI_RESP_RESET_NEEDED) != 0;
3609 
3610 	return status;
3611 }
3612 
3613 /**
3614  * ixgbe_aci_alternate_clear - clear alternate structure
3615  * @hw: pointer to the HW structure.
3616  *
3617  * Clear the alternate structures of the port from which the function
3618  * is called.
3619  *
3620  * Return: 0 on success and error code on failure.
3621  */
ixgbe_aci_alternate_clear(struct ixgbe_hw * hw)3622 s32 ixgbe_aci_alternate_clear(struct ixgbe_hw *hw)
3623 {
3624 	struct ixgbe_aci_desc desc;
3625 	s32 status;
3626 
3627 	ixgbe_fill_dflt_direct_cmd_desc(&desc,
3628 					ixgbe_aci_opc_clear_port_alt_write);
3629 
3630 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3631 
3632 	return status;
3633 }
3634 
3635 /**
3636  * ixgbe_aci_get_internal_data - get internal FW/HW data
3637  * @hw: pointer to the hardware structure
3638  * @cluster_id: specific cluster to dump
3639  * @table_id: table ID within cluster
3640  * @start: index of line in the block to read
3641  * @buf: dump buffer
3642  * @buf_size: dump buffer size
3643  * @ret_buf_size: return buffer size (returned by FW)
3644  * @ret_next_cluster: next cluster to read (returned by FW)
3645  * @ret_next_table: next block to read (returned by FW)
3646  * @ret_next_index: next index to read (returned by FW)
3647  *
3648  * Get internal FW/HW data using ACI command (0xFF08) for debug purposes.
3649  *
3650  * Return: the exit code of the operation.
3651  */
ixgbe_aci_get_internal_data(struct ixgbe_hw * hw,u16 cluster_id,u16 table_id,u32 start,void * buf,u16 buf_size,u16 * ret_buf_size,u16 * ret_next_cluster,u16 * ret_next_table,u32 * ret_next_index)3652 s32 ixgbe_aci_get_internal_data(struct ixgbe_hw *hw, u16 cluster_id,
3653 				u16 table_id, u32 start, void *buf,
3654 				u16 buf_size, u16 *ret_buf_size,
3655 				u16 *ret_next_cluster, u16 *ret_next_table,
3656 				u32 *ret_next_index)
3657 {
3658 	struct ixgbe_aci_cmd_debug_dump_internals *cmd;
3659 	struct ixgbe_aci_desc desc;
3660 	s32 status;
3661 
3662 	cmd = &desc.params.debug_dump;
3663 
3664 	if (buf_size == 0 || !buf)
3665 		return IXGBE_ERR_PARAM;
3666 
3667 	ixgbe_fill_dflt_direct_cmd_desc(&desc,
3668 					ixgbe_aci_opc_debug_dump_internals);
3669 
3670 	cmd->cluster_id = IXGBE_CPU_TO_LE16(cluster_id);
3671 	cmd->table_id = IXGBE_CPU_TO_LE16(table_id);
3672 	cmd->idx = IXGBE_CPU_TO_LE32(start);
3673 
3674 	status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
3675 
3676 	if (!status) {
3677 		if (ret_buf_size)
3678 			*ret_buf_size = IXGBE_LE16_TO_CPU(desc.datalen);
3679 		if (ret_next_cluster)
3680 			*ret_next_cluster = IXGBE_LE16_TO_CPU(cmd->cluster_id);
3681 		if (ret_next_table)
3682 			*ret_next_table = IXGBE_LE16_TO_CPU(cmd->table_id);
3683 		if (ret_next_index)
3684 			*ret_next_index = IXGBE_LE32_TO_CPU(cmd->idx);
3685 	}
3686 
3687 	return status;
3688 }
3689 
3690 /**
3691  * ixgbe_validate_nvm_rw_reg - Check that an NVM access request is valid
3692  * @cmd: NVM access command structure
3693  *
3694  * Validates that an NVM access structure is request to read or write a valid
3695  * register offset. First validates that the module and flags are correct, and
3696  * then ensures that the register offset is one of the accepted registers.
3697  *
3698  * Return: 0 if the register access is valid, out of range error code otherwise.
3699  */
3700 static s32
ixgbe_validate_nvm_rw_reg(struct ixgbe_nvm_access_cmd * cmd)3701 ixgbe_validate_nvm_rw_reg(struct ixgbe_nvm_access_cmd *cmd)
3702 {
3703 	u16 i;
3704 
3705 	switch (cmd->offset) {
3706 	case GL_HICR:
3707 	case GL_HICR_EN: /* Note, this register is read only */
3708 	case GL_FWSTS:
3709 	case GL_MNG_FWSM:
3710 	case GLNVM_GENS:
3711 	case GLNVM_FLA:
3712 	case GL_FWRESETCNT:
3713 		return 0;
3714 	default:
3715 		break;
3716 	}
3717 
3718 	for (i = 0; i <= GL_HIDA_MAX_INDEX; i++)
3719 		if (cmd->offset == (u32)GL_HIDA(i))
3720 			return 0;
3721 
3722 	for (i = 0; i <= GL_HIBA_MAX_INDEX; i++)
3723 		if (cmd->offset == (u32)GL_HIBA(i))
3724 			return 0;
3725 
3726 	/* All other register offsets are not valid */
3727 	return IXGBE_ERR_OUT_OF_RANGE;
3728 }
3729 
3730 /**
3731  * ixgbe_nvm_access_read - Handle an NVM read request
3732  * @hw: pointer to the HW struct
3733  * @cmd: NVM access command to process
3734  * @data: storage for the register value read
3735  *
3736  * Process an NVM access request to read a register.
3737  *
3738  * Return: 0 if the register read is valid and successful,
3739  * out of range error code otherwise.
3740  */
ixgbe_nvm_access_read(struct ixgbe_hw * hw,struct ixgbe_nvm_access_cmd * cmd,struct ixgbe_nvm_access_data * data)3741 static s32 ixgbe_nvm_access_read(struct ixgbe_hw *hw,
3742 			struct ixgbe_nvm_access_cmd *cmd,
3743 			struct ixgbe_nvm_access_data *data)
3744 {
3745 	s32 status;
3746 
3747 	/* Always initialize the output data, even on failure */
3748 	memset(&data->regval, 0, cmd->data_size);
3749 
3750 	/* Make sure this is a valid read/write access request */
3751 	status = ixgbe_validate_nvm_rw_reg(cmd);
3752 	if (status)
3753 		return status;
3754 
3755 	DEBUGOUT1("NVM access: reading register %08x\n", cmd->offset);
3756 
3757 	/* Read the register and store the contents in the data field */
3758 	data->regval = IXGBE_READ_REG(hw, cmd->offset);
3759 
3760 	return 0;
3761 }
3762 
3763 /**
3764  * ixgbe_nvm_access_write - Handle an NVM write request
3765  * @hw: pointer to the HW struct
3766  * @cmd: NVM access command to process
3767  * @data: NVM access data to write
3768  *
3769  * Process an NVM access request to write a register.
3770  *
3771  * Return: 0 if the register write is valid and successful,
3772  * out of range error code otherwise.
3773  */
ixgbe_nvm_access_write(struct ixgbe_hw * hw,struct ixgbe_nvm_access_cmd * cmd,struct ixgbe_nvm_access_data * data)3774 static s32 ixgbe_nvm_access_write(struct ixgbe_hw *hw,
3775 			struct ixgbe_nvm_access_cmd *cmd,
3776 			struct ixgbe_nvm_access_data *data)
3777 {
3778 	s32 status;
3779 
3780 	/* Make sure this is a valid read/write access request */
3781 	status = ixgbe_validate_nvm_rw_reg(cmd);
3782 	if (status)
3783 		return status;
3784 
3785 	/* Reject requests to write to read-only registers */
3786 	switch (cmd->offset) {
3787 	case GL_HICR_EN:
3788 		return IXGBE_ERR_OUT_OF_RANGE;
3789 	default:
3790 		break;
3791 	}
3792 
3793 	DEBUGOUT2("NVM access: writing register %08x with value %08x\n",
3794 		cmd->offset, data->regval);
3795 
3796 	/* Write the data field to the specified register */
3797 	IXGBE_WRITE_REG(hw, cmd->offset, data->regval);
3798 
3799 	return 0;
3800 }
3801 
3802 /**
3803  * ixgbe_handle_nvm_access - Handle an NVM access request
3804  * @hw: pointer to the HW struct
3805  * @cmd: NVM access command info
3806  * @data: pointer to read or return data
3807  *
3808  * Process an NVM access request. Read the command structure information and
3809  * determine if it is valid. If not, report an error indicating the command
3810  * was invalid.
3811  *
3812  * For valid commands, perform the necessary function, copying the data into
3813  * the provided data buffer.
3814  *
3815  * Return: 0 if the nvm access request is valid and successful,
3816  * error code otherwise.
3817  */
ixgbe_handle_nvm_access(struct ixgbe_hw * hw,struct ixgbe_nvm_access_cmd * cmd,struct ixgbe_nvm_access_data * data)3818 s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
3819 			struct ixgbe_nvm_access_cmd *cmd,
3820 			struct ixgbe_nvm_access_data *data)
3821 {
3822 	switch (cmd->command) {
3823 	case IXGBE_NVM_CMD_READ:
3824 		return ixgbe_nvm_access_read(hw, cmd, data);
3825 	case IXGBE_NVM_CMD_WRITE:
3826 		return ixgbe_nvm_access_write(hw, cmd, data);
3827 	default:
3828 		return IXGBE_ERR_PARAM;
3829 	}
3830 }
3831 
3832 /**
3833  * ixgbe_fwlog_cache_cfg - Cache FW logging config
3834  * @hw: pointer to the HW structure
3835  * @cfg: config to cache
3836  *
3837  * Cache FW logging config.
3838  */
ixgbe_fwlog_cache_cfg(struct ixgbe_hw * hw,struct ixgbe_fwlog_cfg * cfg)3839 static void ixgbe_fwlog_cache_cfg(struct ixgbe_hw *hw,
3840 				  struct ixgbe_fwlog_cfg *cfg)
3841 {
3842 	hw->fwlog_cfg = *cfg;
3843 }
3844 
3845 /**
3846  * ixgbe_fwlog_valid_module_entries - validate all the module entry IDs and
3847  * log levels
3848  * @hw: pointer to the HW structure
3849  * @entries: entries to validate
3850  * @num_entries: number of entries to validate
3851  *
3852  * Checks if all the module entry IDs and log levels are valid.
3853  *
3854  * Return: true if all the module entry IDs and log levels are valid,
3855  * otherwise false.
3856  */
ixgbe_fwlog_valid_module_entries(struct ixgbe_hw * hw,struct ixgbe_fwlog_module_entry * entries,u16 num_entries)3857 static bool ixgbe_fwlog_valid_module_entries(struct ixgbe_hw *hw,
3858 			struct ixgbe_fwlog_module_entry *entries,
3859 			u16 num_entries)
3860 {
3861 	u16 i;
3862 
3863 	UNREFERENCED_1PARAMETER(hw);
3864 
3865 	if (!entries) {
3866 		return false;
3867 	}
3868 
3869 	if (!num_entries) {
3870 		return false;
3871 	}
3872 
3873 	for (i = 0; i < num_entries; i++) {
3874 		struct ixgbe_fwlog_module_entry *entry = &entries[i];
3875 
3876 		if (entry->module_id >= IXGBE_ACI_FW_LOG_ID_MAX) {
3877 			return false;
3878 		}
3879 
3880 		if (entry->log_level >= IXGBE_FWLOG_LEVEL_INVALID) {
3881 			return false;
3882 		}
3883 	}
3884 
3885 	return true;
3886 }
3887 
3888 /**
3889  * ixgbe_fwlog_valid_cfg - validate configuration
3890  * @hw: pointer to the HW structure
3891  * @cfg: config to validate
3892  *
3893  * Validate the entire configuration.
3894  *
3895  * Return: true if the entire configuration is valid, otherwise false.
3896  */
ixgbe_fwlog_valid_cfg(struct ixgbe_hw * hw,struct ixgbe_fwlog_cfg * cfg)3897 static bool ixgbe_fwlog_valid_cfg(struct ixgbe_hw *hw,
3898 				  struct ixgbe_fwlog_cfg *cfg)
3899 {
3900 	if (!cfg) {
3901 		return false;
3902 	}
3903 
3904 	if (cfg->log_resolution < IXGBE_ACI_FW_LOG_MIN_RESOLUTION ||
3905 	    cfg->log_resolution > IXGBE_ACI_FW_LOG_MAX_RESOLUTION) {
3906 		return false;
3907 	}
3908 
3909 	if (!ixgbe_fwlog_valid_module_entries(hw, cfg->module_entries,
3910 				  IXGBE_ACI_FW_LOG_ID_MAX))
3911 		return false;
3912 
3913 	return true;
3914 }
3915 
3916 /**
3917  * ixgbe_fwlog_init - Initialize cached structures for tracking FW logging
3918  * @hw: pointer to the HW structure
3919  * @cfg: config used to initialize the cached structures
3920  *
3921  * Initialize cached structures for tracking FW logging
3922  * Called on driver initialization and before calling
3923  * ixgbe_init_hw(). Firmware logging will be configured based on these settings
3924  * and also the PF will be registered on init.
3925  *
3926  * Return: the exit code of the operation.
3927  */
ixgbe_fwlog_init(struct ixgbe_hw * hw,struct ixgbe_fwlog_cfg * cfg)3928 s32 ixgbe_fwlog_init(struct ixgbe_hw *hw, struct ixgbe_fwlog_cfg *cfg)
3929 {
3930 	if (!ixgbe_fwlog_valid_cfg(hw, cfg))
3931 		return IXGBE_ERR_PARAM;
3932 
3933 	ixgbe_fwlog_cache_cfg(hw, cfg);
3934 
3935 	return IXGBE_SUCCESS;
3936 }
3937 
3938 /**
3939  * ixgbe_aci_fwlog_set - Set FW logging configuration
3940  * @hw: pointer to the HW structure
3941  * @entries: entries to configure
3942  * @num_entries: number of @entries
3943  * @options: options from ixgbe_fwlog_cfg->options structure
3944  * @log_resolution: logging resolution
3945  *
3946  * Set FW logging configuration using ACI command (0xFF30).
3947  *
3948  * Return: the exit code of the operation.
3949  */
ixgbe_aci_fwlog_set(struct ixgbe_hw * hw,struct ixgbe_fwlog_module_entry * entries,u16 num_entries,u16 options,u16 log_resolution)3950 static s32 ixgbe_aci_fwlog_set(struct ixgbe_hw *hw,
3951 			       struct ixgbe_fwlog_module_entry *entries,
3952 			       u16 num_entries, u16 options, u16 log_resolution)
3953 {
3954 	struct ixgbe_aci_cmd_fw_log_cfg_resp fw_modules[IXGBE_ACI_FW_LOG_ID_MAX];
3955 	struct ixgbe_aci_cmd_fw_log *cmd;
3956 	struct ixgbe_aci_desc desc;
3957 	s32 status;
3958 	u16 i;
3959 
3960 	if (num_entries > IXGBE_ACI_FW_LOG_ID_MAX)
3961 		return IXGBE_ERR_PARAM;
3962 
3963 	for (i = 0; i < num_entries; i++) {
3964 		fw_modules[i].module_identifier =
3965 			IXGBE_CPU_TO_LE16(entries[i].module_id);
3966 		fw_modules[i].log_level = entries[i].log_level;
3967 	}
3968 
3969 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_fw_logs_config);
3970 	desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
3971 
3972 	cmd = &desc.params.fw_log;
3973 
3974 	cmd->cmd_flags = IXGBE_ACI_FW_LOG_CONF_SET_VALID;
3975 	cmd->ops.cfg.log_resolution = IXGBE_CPU_TO_LE16(log_resolution);
3976 	cmd->ops.cfg.mdl_cnt = IXGBE_CPU_TO_LE16(num_entries);
3977 
3978 	if (options & IXGBE_FWLOG_OPTION_ARQ_ENA)
3979 		cmd->cmd_flags |= IXGBE_ACI_FW_LOG_CONF_AQ_EN;
3980 	if (options & IXGBE_FWLOG_OPTION_UART_ENA)
3981 		cmd->cmd_flags |= IXGBE_ACI_FW_LOG_CONF_UART_EN;
3982 
3983 	status = ixgbe_aci_send_cmd(hw, &desc, fw_modules,
3984 				 sizeof(*fw_modules) * num_entries);
3985 
3986 	return status;
3987 }
3988 
3989 /**
3990  * ixgbe_fwlog_supported - Cached for whether FW supports FW logging or not
3991  * @hw: pointer to the HW structure
3992  *
3993  * This will always return false if called before ixgbe_init_hw(), so it must be
3994  * called after ixgbe_init_hw().
3995  *
3996  * Return: true if FW supports FW logging.
3997  * If this function is called before ixgbe_init_hw(), return false.
3998  */
ixgbe_fwlog_supported(struct ixgbe_hw * hw)3999 bool ixgbe_fwlog_supported(struct ixgbe_hw *hw)
4000 {
4001 	return hw->fwlog_support_ena;
4002 }
4003 
4004 /**
4005  * ixgbe_fwlog_set - Set the firmware logging settings
4006  * @hw: pointer to the HW structure
4007  * @cfg: config used to set firmware logging
4008  *
4009  * Call this function whenever the driver needs to set the firmware
4010  * logging configuration. It can be called on initialization, reset, or during
4011  * runtime.
4012  *
4013  * If the PF wishes to receive FW logging then it must register via
4014  * ixgbe_fwlog_register. Note, that ixgbe_fwlog_register does not need to
4015  * be called for init.
4016  *
4017  * Return: the exit code of the operation.
4018  */
ixgbe_fwlog_set(struct ixgbe_hw * hw,struct ixgbe_fwlog_cfg * cfg)4019 s32 ixgbe_fwlog_set(struct ixgbe_hw *hw, struct ixgbe_fwlog_cfg *cfg)
4020 {
4021 	s32 status;
4022 
4023 	if (!ixgbe_fwlog_supported(hw))
4024 		return IXGBE_ERR_NOT_SUPPORTED;
4025 
4026 	if (!ixgbe_fwlog_valid_cfg(hw, cfg))
4027 		return IXGBE_ERR_PARAM;
4028 
4029 	status = ixgbe_aci_fwlog_set(hw, cfg->module_entries,
4030 				  IXGBE_ACI_FW_LOG_ID_MAX, cfg->options,
4031 				  cfg->log_resolution);
4032 	if (!status)
4033 		ixgbe_fwlog_cache_cfg(hw, cfg);
4034 
4035 	return status;
4036 }
4037 
4038 /**
4039  * ixgbe_fwlog_update_cached_entries - Update module entries in cached
4040  * FW logging config
4041  * @hw: pointer to the HW structure
4042  * @entries: entries to cache
4043  * @num_entries: number of @entries
4044  *
4045  * Update module entries in cached FW logging config.
4046  */
ixgbe_fwlog_update_cached_entries(struct ixgbe_hw * hw,struct ixgbe_fwlog_module_entry * entries,u16 num_entries)4047 static void ixgbe_fwlog_update_cached_entries(struct ixgbe_hw *hw,
4048 			struct ixgbe_fwlog_module_entry *entries,
4049 			u16 num_entries)
4050 {
4051 	u16 i;
4052 
4053 	for (i = 0; i < num_entries; i++) {
4054 		struct ixgbe_fwlog_module_entry *updated = &entries[i];
4055 		u16 j;
4056 
4057 		for (j = 0; j < IXGBE_ACI_FW_LOG_ID_MAX; j++) {
4058 			struct ixgbe_fwlog_module_entry *cached =
4059 				&hw->fwlog_cfg.module_entries[j];
4060 
4061 			if (cached->module_id == updated->module_id) {
4062 				cached->log_level = updated->log_level;
4063 				break;
4064 			}
4065 		}
4066 	}
4067 }
4068 
4069 /**
4070  * ixgbe_fwlog_update_modules - Update the log level 1 or more
4071  * FW logging modules
4072  * @hw: pointer to the HW structure
4073  * @entries: array of ixgbe_fwlog_module_entry(s)
4074  * @num_entries: number of entries
4075  *
4076  * Update the log level of 1 or more FW logging modules via module ID.
4077  *
4078  * Only the entries passed in will be affected. All other firmware logging
4079  * settings will be unaffected.
4080  *
4081  * Return: the exit code of the operation.
4082  */
ixgbe_fwlog_update_modules(struct ixgbe_hw * hw,struct ixgbe_fwlog_module_entry * entries,u16 num_entries)4083 s32 ixgbe_fwlog_update_modules(struct ixgbe_hw *hw,
4084 			       struct ixgbe_fwlog_module_entry *entries,
4085 			       u16 num_entries)
4086 {
4087 	struct ixgbe_fwlog_cfg cfg;
4088 	s32 status;
4089 
4090 	if (!ixgbe_fwlog_supported(hw))
4091 		return IXGBE_ERR_NOT_SUPPORTED;
4092 
4093 	if (num_entries > IXGBE_ACI_FW_LOG_ID_MAX)
4094 		return IXGBE_ERR_PARAM;
4095 
4096 	if (!ixgbe_fwlog_valid_module_entries(hw, entries, num_entries))
4097 		return IXGBE_ERR_PARAM;
4098 
4099 	status = ixgbe_fwlog_get(hw, &cfg);
4100 	if (status)
4101 		goto status_out;
4102 
4103 	status = ixgbe_aci_fwlog_set(hw, entries, num_entries, cfg.options,
4104 				     cfg.log_resolution);
4105 	if (!status)
4106 		ixgbe_fwlog_update_cached_entries(hw, entries, num_entries);
4107 
4108 status_out:
4109 	return status;
4110 }
4111 
4112 /**
4113  * ixgbe_aci_fwlog_register - Register PF for firmware logging events.
4114  * @hw: pointer to the HW structure
4115  * @reg: true to register and false to unregister
4116  *
4117  * Register a PF for firmware logging events using ACI command (0xFF31).
4118  *
4119  * Return: the exit code of the operation.
4120  */
ixgbe_aci_fwlog_register(struct ixgbe_hw * hw,bool reg)4121 static s32 ixgbe_aci_fwlog_register(struct ixgbe_hw *hw, bool reg)
4122 {
4123 	struct ixgbe_aci_desc desc;
4124 
4125 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_fw_logs_register);
4126 
4127 	if (reg)
4128 		desc.params.fw_log.cmd_flags = IXGBE_ACI_FW_LOG_AQ_REGISTER;
4129 
4130 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
4131 }
4132 
4133 /**
4134  * ixgbe_fwlog_register - Register the PF for firmware logging
4135  * @hw: pointer to the HW structure
4136  *
4137  * After this call the PF will start to receive firmware logging based on the
4138  * configuration set in ixgbe_fwlog_set.
4139  *
4140  * Return: the exit code of the operation.
4141  */
ixgbe_fwlog_register(struct ixgbe_hw * hw)4142 s32 ixgbe_fwlog_register(struct ixgbe_hw *hw)
4143 {
4144 	s32 status;
4145 
4146 	if (!ixgbe_fwlog_supported(hw))
4147 		return IXGBE_ERR_NOT_SUPPORTED;
4148 
4149 	status = ixgbe_aci_fwlog_register(hw, true);
4150 
4151 	if (!status)
4152 		hw->fwlog_cfg.options |= IXGBE_FWLOG_OPTION_IS_REGISTERED;
4153 
4154 	return status;
4155 }
4156 
4157 /**
4158  * ixgbe_fwlog_unregister - Unregister the PF from firmware logging
4159  * @hw: pointer to the HW structure
4160  *
4161  * Make an attempt to unregister the PF from firmware logging.
4162  *
4163  * Return: the exit code of the operation.
4164  */
ixgbe_fwlog_unregister(struct ixgbe_hw * hw)4165 s32 ixgbe_fwlog_unregister(struct ixgbe_hw *hw)
4166 {
4167 	s32 status;
4168 
4169 	if (!ixgbe_fwlog_supported(hw))
4170 		return IXGBE_ERR_NOT_SUPPORTED;
4171 
4172 	status = ixgbe_aci_fwlog_register(hw, false);
4173 	if (!status)
4174 		hw->fwlog_cfg.options &= ~IXGBE_FWLOG_OPTION_IS_REGISTERED;
4175 
4176 	return status;
4177 }
4178 
4179 /**
4180  * ixgbe_aci_fwlog_get - Get the current firmware logging configuration
4181  * @hw: pointer to the HW structure
4182  * @cfg: firmware logging configuration to populate
4183  *
4184  * Make an attempt to get the current firmware logging
4185  * configuration using ACI command (0xFF32).
4186  *
4187  * Return: the exit code of the operation.
4188  */
ixgbe_aci_fwlog_get(struct ixgbe_hw * hw,struct ixgbe_fwlog_cfg * cfg)4189 static s32 ixgbe_aci_fwlog_get(struct ixgbe_hw *hw, struct ixgbe_fwlog_cfg *cfg)
4190 {
4191 	struct ixgbe_aci_cmd_fw_log_cfg_resp *fw_modules;
4192 	struct ixgbe_aci_cmd_fw_log *cmd;
4193 	struct ixgbe_aci_desc desc;
4194 	u16 i, module_id_cnt;
4195 	u8 *buf = NULL;
4196 	s32 status;
4197 
4198 	memset(cfg, 0, sizeof(*cfg));
4199 
4200 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_fw_logs_query);
4201 	cmd = &desc.params.fw_log;
4202 
4203 	cmd->cmd_flags = IXGBE_ACI_FW_LOG_AQ_QUERY;
4204 
4205 	buf = (u8 *)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
4206 	if (!buf)
4207 		return IXGBE_ERR_OUT_OF_MEM;
4208 
4209 	status = ixgbe_aci_send_cmd(hw, &desc, buf, IXGBE_ACI_MAX_BUFFER_SIZE);
4210 	if (status) {
4211 		goto status_out;
4212 	}
4213 
4214 	module_id_cnt = IXGBE_LE16_TO_CPU(cmd->ops.cfg.mdl_cnt);
4215 	if (module_id_cnt > IXGBE_ACI_FW_LOG_ID_MAX) {
4216 		module_id_cnt = IXGBE_ACI_FW_LOG_ID_MAX;
4217 	}
4218 
4219 	cfg->log_resolution = (u8)IXGBE_LE16_TO_CPU(cmd->ops.cfg.log_resolution);
4220 	if (cmd->cmd_flags & IXGBE_ACI_FW_LOG_CONF_AQ_EN)
4221 		cfg->options |= IXGBE_FWLOG_OPTION_ARQ_ENA;
4222 	if (cmd->cmd_flags & IXGBE_ACI_FW_LOG_CONF_UART_EN)
4223 		cfg->options |= IXGBE_FWLOG_OPTION_UART_ENA;
4224 	if (cmd->cmd_flags & IXGBE_ACI_FW_LOG_QUERY_REGISTERED)
4225 		cfg->options |= IXGBE_FWLOG_OPTION_IS_REGISTERED;
4226 
4227 	fw_modules = (struct ixgbe_aci_cmd_fw_log_cfg_resp *)buf;
4228 
4229 	for (i = 0; i < module_id_cnt; i++) {
4230 		struct ixgbe_aci_cmd_fw_log_cfg_resp *fw_module = &fw_modules[i];
4231 
4232 		cfg->module_entries[i].module_id =
4233 			IXGBE_LE16_TO_CPU(fw_module->module_identifier);
4234 		cfg->module_entries[i].log_level = fw_module->log_level;
4235 	}
4236 
4237 status_out:
4238 	if (buf)
4239 		ixgbe_free(hw, buf);
4240 	return status;
4241 }
4242 
4243 /**
4244  * ixgbe_fwlog_set_support_ena - Set if FW logging is supported by FW
4245  * @hw: pointer to the HW struct
4246  *
4247  * If FW returns success to the ixgbe_aci_fwlog_get call then it supports FW
4248  * logging, else it doesn't. Set the fwlog_support_ena flag accordingly.
4249  *
4250  * This function is only meant to be called during driver init to determine if
4251  * the FW support FW logging.
4252  *
4253  * Return: the exit code of the operation.
4254  */
ixgbe_fwlog_set_support_ena(struct ixgbe_hw * hw)4255 void ixgbe_fwlog_set_support_ena(struct ixgbe_hw *hw)
4256 {
4257 	struct ixgbe_fwlog_cfg cfg;
4258 	s32 status;
4259 
4260 	hw->fwlog_support_ena = false;
4261 
4262 	/* don't call ixgbe_fwlog_get() because that would overwrite the cached
4263 	 * configuration from the call to ixgbe_fwlog_init(), which is expected
4264 	 * to be called prior to this function
4265 	 */
4266 	status = ixgbe_aci_fwlog_get(hw, &cfg);
4267 	if (!status)
4268 		hw->fwlog_support_ena = true;
4269 }
4270 
4271 /**
4272  * ixgbe_fwlog_get - Get the firmware logging settings
4273  * @hw: pointer to the HW structure
4274  * @cfg: config to populate based on current firmware logging settings
4275  *
4276  * Get the current firmware logging settings.
4277  *
4278  * Return: the exit code of the operation.
4279  */
ixgbe_fwlog_get(struct ixgbe_hw * hw,struct ixgbe_fwlog_cfg * cfg)4280 s32 ixgbe_fwlog_get(struct ixgbe_hw *hw, struct ixgbe_fwlog_cfg *cfg)
4281 {
4282 	s32 status;
4283 
4284 	if (!ixgbe_fwlog_supported(hw))
4285 		return IXGBE_ERR_NOT_SUPPORTED;
4286 
4287 	if (!cfg)
4288 		return IXGBE_ERR_PARAM;
4289 
4290 	status = ixgbe_aci_fwlog_get(hw, cfg);
4291 	if (status)
4292 		return status;
4293 
4294 	ixgbe_fwlog_cache_cfg(hw, cfg);
4295 
4296 	return IXGBE_SUCCESS;
4297 }
4298 
4299 /**
4300  * ixgbe_fwlog_event_dump - Dump the event received over the Admin Receive Queue
4301  * @hw: pointer to the HW structure
4302  * @desc: Admin Receive Queue descriptor
4303  * @buf: buffer that contains the FW log event data
4304  *
4305  * If the driver receives the ixgbe_aci_opc_fw_logs_event on the Admin Receive
4306  * Queue, then it should call this function to dump the FW log data.
4307  */
ixgbe_fwlog_event_dump(struct ixgbe_hw * hw,struct ixgbe_aci_desc * desc,void * buf)4308 void ixgbe_fwlog_event_dump(struct ixgbe_hw *hw,
4309 			    struct ixgbe_aci_desc *desc, void *buf)
4310 {
4311 	if (!ixgbe_fwlog_supported(hw))
4312 		return;
4313 
4314 	ixgbe_info_fwlog(hw, 32, 1, (u8 *)buf,
4315 			 IXGBE_LE16_TO_CPU(desc->datalen));
4316 }
4317 
4318 /**
4319  * ixgbe_aci_set_health_status_config - Configure FW health events
4320  * @hw: pointer to the HW struct
4321  * @event_source: type of diagnostic events to enable
4322  *
4323  * Configure the health status event types that the firmware will send to this
4324  * PF using ACI command (0xFF20). The supported event types are: PF-specific,
4325  * all PFs, and global.
4326  *
4327  * Return: the exit code of the operation.
4328  */
ixgbe_aci_set_health_status_config(struct ixgbe_hw * hw,u8 event_source)4329 s32 ixgbe_aci_set_health_status_config(struct ixgbe_hw *hw, u8 event_source)
4330 {
4331 	struct ixgbe_aci_cmd_set_health_status_config *cmd;
4332 	struct ixgbe_aci_desc desc;
4333 
4334 	cmd = &desc.params.set_health_status_config;
4335 
4336 	ixgbe_fill_dflt_direct_cmd_desc(&desc,
4337 				      ixgbe_aci_opc_set_health_status_config);
4338 
4339 	cmd->event_source = event_source;
4340 
4341 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
4342 }
4343 
4344 /**
4345  * ixgbe_init_ops_E610 - Inits func ptrs and MAC type
4346  * @hw: pointer to hardware structure
4347  *
4348  * Initialize the function pointers and assign the MAC type for E610.
4349  * Does not touch the hardware.
4350  *
4351  * Return: the exit code of the operation.
4352  */
ixgbe_init_ops_E610(struct ixgbe_hw * hw)4353 s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw)
4354 {
4355 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
4356 	struct ixgbe_mac_info *mac = &hw->mac;
4357 	struct ixgbe_phy_info *phy = &hw->phy;
4358 	s32 ret_val;
4359 
4360 	ret_val = ixgbe_init_ops_X550(hw);
4361 
4362 	/* MAC */
4363 	mac->ops.reset_hw = ixgbe_reset_hw_E610;
4364 	mac->ops.start_hw = ixgbe_start_hw_E610;
4365 	mac->ops.get_media_type = ixgbe_get_media_type_E610;
4366 	mac->ops.get_supported_physical_layer =
4367 		ixgbe_get_supported_physical_layer_E610;
4368 	mac->ops.get_san_mac_addr = NULL;
4369 	mac->ops.set_san_mac_addr = NULL;
4370 	mac->ops.get_wwn_prefix = NULL;
4371 	mac->ops.setup_link = ixgbe_setup_link_E610;
4372 	mac->ops.check_link = ixgbe_check_link_E610;
4373 	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_E610;
4374 	mac->ops.setup_fc = ixgbe_setup_fc_E610;
4375 	mac->ops.fc_autoneg = ixgbe_fc_autoneg_E610;
4376 	mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_E610;
4377 	mac->ops.disable_rx = ixgbe_disable_rx_E610;
4378 	mac->ops.setup_eee = ixgbe_setup_eee_E610;
4379 	mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_E610;
4380 	mac->ops.fw_rollback_mode = ixgbe_fw_rollback_mode_E610;
4381 	mac->ops.get_fw_tsam_mode = ixgbe_get_fw_tsam_mode_E610;
4382 	mac->ops.get_fw_version = ixgbe_aci_get_fw_ver;
4383 	mac->ops.get_nvm_version = ixgbe_get_active_nvm_ver;
4384        mac->ops.get_thermal_sensor_data = NULL;
4385        mac->ops.init_thermal_sensor_thresh = NULL;
4386 
4387 	/* PHY */
4388 	phy->ops.init = ixgbe_init_phy_ops_E610;
4389 	phy->ops.identify = ixgbe_identify_phy_E610;
4390 
4391 	if (hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
4392 		phy->eee_speeds_supported = IXGBE_LINK_SPEED_2_5GB_FULL;
4393 	else
4394 		phy->eee_speeds_supported = IXGBE_LINK_SPEED_2_5GB_FULL |
4395 					    IXGBE_LINK_SPEED_5GB_FULL |
4396 					    IXGBE_LINK_SPEED_10GB_FULL;
4397 
4398 	phy->eee_speeds_advertised = phy->eee_speeds_supported;
4399 
4400 	/* Additional ops overrides for e610 to go here */
4401 	eeprom->ops.init_params = ixgbe_init_eeprom_params_E610;
4402 	eeprom->ops.read = ixgbe_read_ee_aci_E610;
4403 	eeprom->ops.read_buffer = ixgbe_read_ee_aci_buffer_E610;
4404 	eeprom->ops.write = ixgbe_write_ee_aci_E610;
4405 	eeprom->ops.write_buffer = ixgbe_write_ee_aci_buffer_E610;
4406 	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_E610;
4407 	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_E610;
4408 	eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_E610;
4409 	eeprom->ops.read_pba_string = ixgbe_read_pba_string_E610;
4410 
4411 	/* Initialize bus function number */
4412 	hw->mac.ops.set_lan_id(hw);
4413 
4414 	return ret_val;
4415 }
4416 
4417 /**
4418  * ixgbe_reset_hw_E610 - Perform hardware reset
4419  * @hw: pointer to hardware structure
4420  *
4421  * Resets the hardware by resetting the transmit and receive units, masks
4422  * and clears all interrupts, and perform a reset.
4423  *
4424  * Return: the exit code of the operation.
4425  */
ixgbe_reset_hw_E610(struct ixgbe_hw * hw)4426 s32 ixgbe_reset_hw_E610(struct ixgbe_hw *hw)
4427 {
4428 	u32 swfw_mask = hw->phy.phy_semaphore_mask;
4429 	u32 ctrl, i;
4430 	s32 status;
4431 
4432 	DEBUGFUNC("ixgbe_reset_hw_E610");
4433 
4434 	/* Call adapter stop to disable tx/rx and clear interrupts */
4435 	status = hw->mac.ops.stop_adapter(hw);
4436 	if (status != IXGBE_SUCCESS)
4437 		goto reset_hw_out;
4438 
4439 	/* flush pending Tx transactions */
4440 	ixgbe_clear_tx_pending(hw);
4441 
4442 	status = hw->phy.ops.init(hw);
4443 	if (status != IXGBE_SUCCESS)
4444 		DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
4445 			  status);
4446 mac_reset_top:
4447 	status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
4448 	if (status != IXGBE_SUCCESS) {
4449 		ERROR_REPORT2(IXGBE_ERROR_CAUTION,
4450 			      "semaphore failed with %d", status);
4451 		return IXGBE_ERR_SWFW_SYNC;
4452 	}
4453 	ctrl = IXGBE_CTRL_RST;
4454 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
4455 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
4456 	IXGBE_WRITE_FLUSH(hw);
4457 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
4458 
4459 	/* Poll for reset bit to self-clear indicating reset is complete */
4460 	for (i = 0; i < 10; i++) {
4461 		usec_delay(1);
4462 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
4463 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
4464 			break;
4465 	}
4466 
4467 	if (ctrl & IXGBE_CTRL_RST_MASK) {
4468 		status = IXGBE_ERR_RESET_FAILED;
4469 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
4470 			      "Reset polling failed to complete.\n");
4471 	}
4472 	msec_delay(100);
4473 
4474 	/*
4475 	 * Double resets are required for recovery from certain error
4476 	 * conditions.  Between resets, it is necessary to stall to allow time
4477 	 * for any pending HW events to complete.
4478 	 */
4479 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
4480 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
4481 		goto mac_reset_top;
4482 	}
4483 
4484 	/* Set the Rx packet buffer size. */
4485 	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
4486 
4487 	/* Store the permanent mac address */
4488 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
4489 
4490 	/*
4491 	 * Store MAC address from RAR0, clear receive address registers, and
4492 	 * clear the multicast table.  Also reset num_rar_entries to 128,
4493 	 * since we modify this value when programming the SAN MAC address.
4494 	 */
4495 	hw->mac.num_rar_entries = 128;
4496 	hw->mac.ops.init_rx_addrs(hw);
4497 
4498 reset_hw_out:
4499 	return status;
4500 }
4501 
4502 /**
4503  * ixgbe_start_hw_E610 - Prepare hardware for Tx/Rx
4504  * @hw: pointer to hardware structure
4505  *
4506  * Gets firmware version and if API version matches it
4507  * starts the hardware using the generic start_hw function
4508  * and the generation start_hw function.
4509  * Then performs revision-specific operations, if any.
4510  **/
ixgbe_start_hw_E610(struct ixgbe_hw * hw)4511 s32 ixgbe_start_hw_E610(struct ixgbe_hw *hw)
4512 {
4513 	s32 ret_val = IXGBE_SUCCESS;
4514 
4515 	ret_val = hw->mac.ops.get_fw_version(hw);
4516 	if (ret_val)
4517 		goto out;
4518 
4519 	ret_val = ixgbe_start_hw_generic(hw);
4520 	if (ret_val != IXGBE_SUCCESS)
4521 		goto out;
4522 
4523 	ixgbe_start_hw_gen2(hw);
4524 
4525 out:
4526 	return ret_val;
4527 }
4528 
4529 /**
4530  * ixgbe_get_media_type_E610 - Gets media type
4531  * @hw: pointer to the HW struct
4532  *
4533  * In order to get the media type, the function gets PHY
4534  * capabilities and later on use them to identify the PHY type
4535  * checking phy_type_high and phy_type_low.
4536  *
4537  * Return: the type of media in form of ixgbe_media_type enum
4538  * or ixgbe_media_type_unknown in case of an error.
4539  */
ixgbe_get_media_type_E610(struct ixgbe_hw * hw)4540 enum ixgbe_media_type ixgbe_get_media_type_E610(struct ixgbe_hw *hw)
4541 {
4542 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
4543 	u64 phy_mask = 0;
4544 	s32 rc;
4545 	u8 i;
4546 
4547 	rc = ixgbe_update_link_info(hw);
4548 	if (rc) {
4549 		return ixgbe_media_type_unknown;
4550 	}
4551 
4552 	/* If there is no link but PHY (dongle) is available SW should use
4553 	 * Get PHY Caps admin command instead of Get Link Status, find most
4554 	 * significant bit that is set in PHY types reported by the command
4555 	 * and use it to discover media type.
4556 	 */
4557 	if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
4558 	    (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
4559 		/* Get PHY Capabilities */
4560 		rc = ixgbe_aci_get_phy_caps(hw, false,
4561 					    IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
4562 					    &pcaps);
4563 		if (rc) {
4564 			return ixgbe_media_type_unknown;
4565 		}
4566 
4567 		/* Check if there is some bit set in phy_type_high */
4568 		for (i = 64; i > 0; i--) {
4569 			phy_mask = (u64)((u64)1 << (i - 1));
4570 			if ((pcaps.phy_type_high & phy_mask) != 0) {
4571 				/* If any bit is set treat it as PHY type */
4572 				hw->link.link_info.phy_type_high = phy_mask;
4573 				hw->link.link_info.phy_type_low = 0;
4574 				break;
4575 			}
4576 			phy_mask = 0;
4577 		}
4578 
4579 		/* If nothing found in phy_type_high search in phy_type_low */
4580 		if (phy_mask == 0) {
4581 			for (i = 64; i > 0; i--) {
4582 				phy_mask = (u64)((u64)1 << (i - 1));
4583 				if ((pcaps.phy_type_low & phy_mask) != 0) {
4584 					/* If any bit is set treat it as PHY type */
4585 					hw->link.link_info.phy_type_high = 0;
4586 					hw->link.link_info.phy_type_low = phy_mask;
4587 					break;
4588 				}
4589 			}
4590 		}
4591 
4592 	}
4593 
4594 	/* Based on link status or search above try to discover media type */
4595 	hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
4596 
4597 	return hw->phy.media_type;
4598 }
4599 
4600 /**
4601  * ixgbe_get_supported_physical_layer_E610 - Returns physical layer type
4602  * @hw: pointer to hardware structure
4603  *
4604  * Determines physical layer capabilities of the current configuration.
4605  *
4606  * Return: the exit code of the operation.
4607  **/
ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw * hw)4608 u64 ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw *hw)
4609 {
4610 	u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
4611 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
4612 	u64 phy_type;
4613 	s32 rc;
4614 
4615 	rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
4616 				    &pcaps);
4617 	if (rc)
4618 		return IXGBE_PHYSICAL_LAYER_UNKNOWN;
4619 
4620 	phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_low);
4621 	if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_T)
4622 		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
4623 	if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_T)
4624 		physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
4625 	if(phy_type & IXGBE_PHY_TYPE_LOW_100BASE_TX)
4626 		physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
4627 	if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_LR)
4628 		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_LR;
4629 	if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_SR)
4630 		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_SR;
4631 	if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
4632 		physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
4633 	if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
4634 		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
4635 	if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_SX)
4636 		physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_SX;
4637 	if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_KX)
4638 		physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_KX;
4639 	if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_T)
4640 		physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_T;
4641 	if(phy_type & IXGBE_PHY_TYPE_LOW_5GBASE_T)
4642 		physical_layer |= IXGBE_PHYSICAL_LAYER_5000BASE_T;
4643 
4644 	phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_high);
4645 	if(phy_type & IXGBE_PHY_TYPE_HIGH_10BASE_T)
4646 		physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
4647 
4648 	return physical_layer;
4649 }
4650 
4651 /**
4652  * ixgbe_setup_link_E610 - Set up link
4653  * @hw: pointer to hardware structure
4654  * @speed: new link speed
4655  * @autoneg_wait: true when waiting for completion is needed
4656  *
4657  * Set up the link with the specified speed.
4658  *
4659  * Return: the exit code of the operation.
4660  */
ixgbe_setup_link_E610(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)4661 s32 ixgbe_setup_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
4662 			  bool autoneg_wait)
4663 {
4664 	/* Simply request FW to perform proper PHY setup */
4665 	return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
4666 }
4667 
4668 /**
4669  * ixgbe_check_link_E610 - Determine link and speed status
4670  * @hw: pointer to hardware structure
4671  * @speed: pointer to link speed
4672  * @link_up: true when link is up
4673  * @link_up_wait_to_complete: bool used to wait for link up or not
4674  *
4675  * Determine if the link is up and the current link speed
4676  * using ACI command (0x0607).
4677  *
4678  * Return: the exit code of the operation.
4679  */
ixgbe_check_link_E610(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)4680 s32 ixgbe_check_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4681 			  bool *link_up, bool link_up_wait_to_complete)
4682 {
4683 	s32 rc;
4684 	u32 i;
4685 
4686 	if (!speed || !link_up)
4687 		return IXGBE_ERR_PARAM;
4688 
4689 	/* Set get_link_info flag to ensure that fresh
4690 	 * link information will be obtained from FW
4691 	 * by sending Get Link Status admin command. */
4692 	hw->link.get_link_info = true;
4693 
4694 	/* Update link information in adapter context. */
4695 	rc = ixgbe_get_link_status(hw, link_up);
4696 	if (rc)
4697 		return rc;
4698 
4699 	/* Wait for link up if it was requested. */
4700 	if (link_up_wait_to_complete && *link_up == false) {
4701 		for (i = 0; i < hw->mac.max_link_up_time; i++) {
4702 			msec_delay(100);
4703 			hw->link.get_link_info = true;
4704 			rc = ixgbe_get_link_status(hw, link_up);
4705 			if (rc)
4706 				return rc;
4707 			if (*link_up)
4708 				break;
4709 		}
4710 	}
4711 
4712 	/* Use link information in adapter context updated by the call
4713 	 * to ixgbe_get_link_status() to determine current link speed.
4714 	 * Link speed information is valid only when link up was
4715 	 * reported by FW. */
4716 	if (*link_up) {
4717 		switch (hw->link.link_info.link_speed) {
4718 		case IXGBE_ACI_LINK_SPEED_10MB:
4719 			*speed = IXGBE_LINK_SPEED_10_FULL;
4720 			break;
4721 		case IXGBE_ACI_LINK_SPEED_100MB:
4722 			*speed = IXGBE_LINK_SPEED_100_FULL;
4723 			break;
4724 		case IXGBE_ACI_LINK_SPEED_1000MB:
4725 			*speed = IXGBE_LINK_SPEED_1GB_FULL;
4726 			break;
4727 		case IXGBE_ACI_LINK_SPEED_2500MB:
4728 			*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4729 			break;
4730 		case IXGBE_ACI_LINK_SPEED_5GB:
4731 			*speed = IXGBE_LINK_SPEED_5GB_FULL;
4732 			break;
4733 		case IXGBE_ACI_LINK_SPEED_10GB:
4734 			*speed = IXGBE_LINK_SPEED_10GB_FULL;
4735 			break;
4736 		default:
4737 			*speed = IXGBE_LINK_SPEED_UNKNOWN;
4738 			break;
4739 		}
4740 	} else {
4741 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
4742 	}
4743 
4744 	return IXGBE_SUCCESS;
4745 }
4746 
4747 /**
4748  * ixgbe_get_link_capabilities_E610 - Determine link capabilities
4749  * @hw: pointer to hardware structure
4750  * @speed: pointer to link speed
4751  * @autoneg: true when autoneg or autotry is enabled
4752  *
4753  * Determine speed and AN parameters of a link.
4754  *
4755  * Return: the exit code of the operation.
4756  */
ixgbe_get_link_capabilities_E610(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)4757 s32 ixgbe_get_link_capabilities_E610(struct ixgbe_hw *hw,
4758 				     ixgbe_link_speed *speed,
4759 				     bool *autoneg)
4760 {
4761 	if (!speed || !autoneg)
4762 		return IXGBE_ERR_PARAM;
4763 
4764 	*autoneg = true;
4765 	*speed = hw->phy.speeds_supported;
4766 
4767 	return IXGBE_SUCCESS;
4768 }
4769 
4770 /**
4771  * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
4772  * @hw: pointer to hardware structure
4773  * @cfg: PHY configuration data to set FC mode
4774  * @req_mode: FC mode to configure
4775  *
4776  * Configures PHY Flow Control according to the provided configuration.
4777  *
4778  * Return: the exit code of the operation.
4779  */
ixgbe_cfg_phy_fc(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_set_phy_cfg_data * cfg,enum ixgbe_fc_mode req_mode)4780 s32 ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
4781 		     struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
4782 		     enum ixgbe_fc_mode req_mode)
4783 {
4784 	struct ixgbe_aci_cmd_get_phy_caps_data* pcaps = NULL;
4785 	s32 status = IXGBE_SUCCESS;
4786 	u8 pause_mask = 0x0;
4787 
4788 	if (!cfg)
4789 		return IXGBE_ERR_PARAM;
4790 
4791 	switch (req_mode) {
4792 	case ixgbe_fc_auto:
4793 	{
4794 		pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
4795 			ixgbe_malloc(hw, sizeof(*pcaps));
4796 		if (!pcaps) {
4797 			status = IXGBE_ERR_OUT_OF_MEM;
4798 			goto out;
4799 		}
4800 
4801 		/* Query the value of FC that both the NIC and the attached
4802 		 * media can do. */
4803 		status = ixgbe_aci_get_phy_caps(hw, false,
4804 			IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, pcaps);
4805 		if (status)
4806 			goto out;
4807 
4808 		pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
4809 		pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
4810 
4811 		break;
4812 	}
4813 	case ixgbe_fc_full:
4814 		pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
4815 		pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
4816 		break;
4817 	case ixgbe_fc_rx_pause:
4818 		pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
4819 		break;
4820 	case ixgbe_fc_tx_pause:
4821 		pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
4822 		break;
4823 	default:
4824 		break;
4825 	}
4826 
4827 	/* clear the old pause settings */
4828 	cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
4829 		IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
4830 
4831 	/* set the new capabilities */
4832 	cfg->caps |= pause_mask;
4833 
4834 out:
4835 	if (pcaps)
4836 		ixgbe_free(hw, pcaps);
4837 	return status;
4838 }
4839 
4840 /**
4841  * ixgbe_setup_fc_E610 - Set up flow control
4842  * @hw: pointer to hardware structure
4843  *
4844  * Set up flow control. This has to be done during init time.
4845  *
4846  * Return: the exit code of the operation.
4847  */
ixgbe_setup_fc_E610(struct ixgbe_hw * hw)4848 s32 ixgbe_setup_fc_E610(struct ixgbe_hw *hw)
4849 {
4850 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps = { 0 };
4851 	struct ixgbe_aci_cmd_set_phy_cfg_data cfg = { 0 };
4852 	s32 status;
4853 
4854 	/* Get the current PHY config */
4855 	status = ixgbe_aci_get_phy_caps(hw, false,
4856 		IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
4857 	if (status)
4858 		return status;
4859 
4860 	ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
4861 
4862 	/* Configure the set PHY data */
4863 	status = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
4864 	if (status)
4865 		return status;
4866 
4867 	/* If the capabilities have changed, then set the new config */
4868 	if (cfg.caps != pcaps.caps) {
4869 		cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
4870 
4871 		status = ixgbe_aci_set_phy_cfg(hw, &cfg);
4872 		if (status)
4873 			return status;
4874 	}
4875 
4876 	return status;
4877 }
4878 
4879 /**
4880  * ixgbe_fc_autoneg_E610 - Configure flow control
4881  * @hw: pointer to hardware structure
4882  *
4883  * Configure Flow Control.
4884  */
ixgbe_fc_autoneg_E610(struct ixgbe_hw * hw)4885 void ixgbe_fc_autoneg_E610(struct ixgbe_hw *hw)
4886 {
4887 	s32 status;
4888 
4889 	/* Get current link status.
4890 	 * Current FC mode will be stored in the hw context. */
4891 	status = ixgbe_aci_get_link_info(hw, false, NULL);
4892 	if (status) {
4893 		goto out;
4894 	}
4895 
4896 	/* Check if the link is up */
4897 	if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)) {
4898 		status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4899 		goto out;
4900 	}
4901 
4902 	/* Check if auto-negotiation has completed */
4903 	if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED)) {
4904 		status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4905 		goto out;
4906 	}
4907 
4908 out:
4909 	if (status == IXGBE_SUCCESS) {
4910 		hw->fc.fc_was_autonegged = true;
4911 	} else {
4912 		hw->fc.fc_was_autonegged = false;
4913 		hw->fc.current_mode = hw->fc.requested_mode;
4914 	}
4915 }
4916 
4917 /**
4918  * ixgbe_set_fw_drv_ver_E610 - Send driver version to FW
4919  * @hw: pointer to the HW structure
4920  * @maj: driver version major number
4921  * @minor: driver version minor number
4922  * @build: driver version build number
4923  * @sub: driver version sub build number
4924  * @len: length of driver_ver string
4925  * @driver_ver: driver string
4926  *
4927  * Send driver version number to Firmware using ACI command (0x0002).
4928  *
4929  * Return: the exit code of the operation.
4930  * IXGBE_SUCCESS - OK
4931  * IXGBE_ERR_PARAM - incorrect parameters were given
4932  * IXGBE_ERR_ACI_ERROR - encountered an error during sending the command
4933  * IXGBE_ERR_ACI_TIMEOUT - a timeout occurred
4934  * IXGBE_ERR_OUT_OF_MEM - ran out of memory
4935  */
ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw * hw,u8 maj,u8 minor,u8 build,u8 sub,u16 len,const char * driver_ver)4936 s32 ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw *hw, u8 maj, u8 minor, u8 build,
4937 			      u8 sub, u16 len, const char *driver_ver)
4938 {
4939 	size_t limited_len = min(len, (u16)IXGBE_DRV_VER_STR_LEN_E610);
4940 	struct ixgbe_driver_ver dv;
4941 
4942 	DEBUGFUNC("ixgbe_set_fw_drv_ver_E610");
4943 
4944 	if (!len || !driver_ver)
4945 		return IXGBE_ERR_PARAM;
4946 
4947 	dv.major_ver = maj;
4948 	dv.minor_ver = minor;
4949 	dv.build_ver = build;
4950 	dv.subbuild_ver = sub;
4951 
4952 	memset(dv.driver_string, 0, IXGBE_DRV_VER_STR_LEN_E610);
4953 	memcpy(dv.driver_string, driver_ver, limited_len);
4954 
4955 	return ixgbe_aci_send_driver_ver(hw, &dv);
4956 }
4957 
4958 /**
4959  * ixgbe_disable_rx_E610 - Disable RX unit
4960  * @hw: pointer to hardware structure
4961  *
4962  * Disable RX DMA unit on E610 with use of ACI command (0x000C).
4963  *
4964  * Return: the exit code of the operation.
4965  */
ixgbe_disable_rx_E610(struct ixgbe_hw * hw)4966 void ixgbe_disable_rx_E610(struct ixgbe_hw *hw)
4967 {
4968 	u32 rxctrl;
4969 
4970 	DEBUGFUNC("ixgbe_disable_rx_E610");
4971 
4972 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4973 	if (rxctrl & IXGBE_RXCTRL_RXEN) {
4974 		u32 pfdtxgswc;
4975 		s32 status;
4976 
4977 		pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4978 		if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
4979 			pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
4980 			IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4981 			hw->mac.set_lben = true;
4982 		} else {
4983 			hw->mac.set_lben = false;
4984 		}
4985 
4986 		status = ixgbe_aci_disable_rxen(hw);
4987 
4988 		/* If we fail - disable RX using register write */
4989 		if (status) {
4990 			rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4991 			if (rxctrl & IXGBE_RXCTRL_RXEN) {
4992 				rxctrl &= ~IXGBE_RXCTRL_RXEN;
4993 				IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
4994 			}
4995 		}
4996 	}
4997 }
4998 
4999 /**
5000  * ixgbe_setup_eee_E610 - Enable/disable EEE support
5001  * @hw: pointer to the HW structure
5002  * @enable_eee: boolean flag to enable EEE
5003  *
5004  * Enables/disable EEE based on enable_eee flag.
5005  *
5006  * Return: the exit code of the operation.
5007  */
ixgbe_setup_eee_E610(struct ixgbe_hw * hw,bool enable_eee)5008 s32 ixgbe_setup_eee_E610(struct ixgbe_hw *hw, bool enable_eee)
5009 {
5010 	struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
5011 	struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
5012 	u16 eee_cap = 0;
5013 	s32 status;
5014 
5015 	status = ixgbe_aci_get_phy_caps(hw, false,
5016 		IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
5017 	if (status != IXGBE_SUCCESS)
5018 		return status;
5019 
5020 	ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
5021 
5022 	phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
5023 	phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
5024 
5025 	/* setup only speeds which are defined for [0x0601/0x0600].eee_cap */
5026 	if (enable_eee) {
5027 		if (hw->phy.eee_speeds_advertised & IXGBE_LINK_SPEED_100_FULL)
5028 			eee_cap |= IXGBE_ACI_PHY_EEE_EN_100BASE_TX;
5029 		if (hw->phy.eee_speeds_advertised & IXGBE_LINK_SPEED_1GB_FULL)
5030 			eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_T;
5031 		if (hw->phy.eee_speeds_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
5032 			eee_cap |= IXGBE_ACI_PHY_EEE_EN_2_5GBASE_T;
5033 		if (hw->phy.eee_speeds_advertised & IXGBE_LINK_SPEED_5GB_FULL)
5034 			eee_cap |= IXGBE_ACI_PHY_EEE_EN_5GBASE_T;
5035 		if (hw->phy.eee_speeds_advertised & IXGBE_LINK_SPEED_10GB_FULL)
5036 			eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_T;
5037 	}
5038 
5039 	/* Set EEE capability for particular PHY types */
5040 	phy_cfg.eee_cap = IXGBE_CPU_TO_LE16(eee_cap);
5041 
5042 	status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
5043 
5044 	return status;
5045 }
5046 
5047 /**
5048  * ixgbe_fw_recovery_mode_E610 - Check FW NVM recovery mode
5049  * @hw: pointer to hardware structure
5050  *
5051  * Checks FW NVM recovery mode by
5052  * reading the value of the dedicated register.
5053  *
5054  * Return: true if FW is in recovery mode, otherwise false.
5055  */
ixgbe_fw_recovery_mode_E610(struct ixgbe_hw * hw)5056 bool ixgbe_fw_recovery_mode_E610(struct ixgbe_hw *hw)
5057 {
5058 	u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
5059 
5060 	return !!(fwsm & GL_MNG_FWSM_FW_MODES_RECOVERY_M);
5061 }
5062 
5063 /**
5064  * ixgbe_fw_rollback_mode_E610 - Check FW NVM Rollback
5065  * @hw: pointer to hardware structure
5066  *
5067  * Checks FW NVM Rollback mode by reading the
5068  * value of the dedicated register.
5069  *
5070  * Return: true if FW is in Rollback mode, otherwise false.
5071  */
ixgbe_fw_rollback_mode_E610(struct ixgbe_hw * hw)5072 bool ixgbe_fw_rollback_mode_E610(struct ixgbe_hw *hw)
5073 {
5074 	u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
5075 
5076 	return !!(fwsm & GL_MNG_FWSM_FW_MODES_ROLLBACK_M);
5077 }
5078 
5079 /**
5080  * ixgbe_get_fw_tsam_mode_E610 - Check FW NVM Thermal Sensor Autonomous Mode
5081  * @hw: pointer to hardware structure
5082  *
5083  * Checks Thermal Sensor Autonomous Mode by reading the
5084  * value of the dedicated register.
5085  *
5086  * Return: true if FW is in TSAM, otherwise false.
5087  */
ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw * hw)5088 bool ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw *hw)
5089 {
5090 	u32 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_X550EM_a);
5091 
5092 	return !!(fwsm & IXGBE_FWSM_TS_ENABLED);
5093 }
5094 
5095 /**
5096  * ixgbe_init_phy_ops_E610 - PHY specific init
5097  * @hw: pointer to hardware structure
5098  *
5099  * Initialize any function pointers that were not able to be
5100  * set during init_shared_code because the PHY type was not known.
5101  *
5102  * Return: the exit code of the operation.
5103  */
ixgbe_init_phy_ops_E610(struct ixgbe_hw * hw)5104 s32 ixgbe_init_phy_ops_E610(struct ixgbe_hw *hw)
5105 {
5106 	struct ixgbe_mac_info *mac = &hw->mac;
5107 	struct ixgbe_phy_info *phy = &hw->phy;
5108 	s32 ret_val;
5109 
5110 	phy->ops.identify_sfp = ixgbe_identify_module_E610;
5111 	phy->ops.read_reg = NULL; /* PHY reg access is not required */
5112 	phy->ops.write_reg = NULL;
5113 	phy->ops.read_reg_mdi = NULL;
5114 	phy->ops.write_reg_mdi = NULL;
5115 	phy->ops.setup_link = ixgbe_setup_phy_link_E610;
5116 	phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_E610;
5117 	phy->ops.read_i2c_byte = NULL; /* disabled for E610 */
5118 	phy->ops.write_i2c_byte = NULL; /* disabled for E610 */
5119 	phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_E610;
5120 	phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_E610;
5121 	phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_E610;
5122 	phy->ops.i2c_bus_clear = NULL; /* do not use generic implementation  */
5123 	phy->ops.check_overtemp = ixgbe_check_overtemp_E610;
5124 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
5125 		phy->ops.set_phy_power = ixgbe_set_phy_power_E610;
5126 	else
5127 		phy->ops.set_phy_power = NULL;
5128 	phy->ops.enter_lplu = ixgbe_enter_lplu_E610;
5129 	phy->ops.handle_lasi = NULL; /* no implementation for E610 */
5130 	phy->ops.read_i2c_byte_unlocked = NULL; /* disabled for E610 */
5131 	phy->ops.write_i2c_byte_unlocked = NULL; /* disabled for E610 */
5132 
5133 	/* TODO: Set functions pointers based on device ID */
5134 
5135 	/* Identify the PHY */
5136 	ret_val = phy->ops.identify(hw);
5137 	if (ret_val != IXGBE_SUCCESS)
5138 		return ret_val;
5139 
5140 	/* TODO: Set functions pointers based on PHY type */
5141 
5142 	return ret_val;
5143 }
5144 
5145 /**
5146  * ixgbe_identify_phy_E610 - Identify PHY
5147  * @hw: pointer to hardware structure
5148  *
5149  * Determine PHY type, supported speeds and PHY ID.
5150  *
5151  * Return: the exit code of the operation.
5152  */
ixgbe_identify_phy_E610(struct ixgbe_hw * hw)5153 s32 ixgbe_identify_phy_E610(struct ixgbe_hw *hw)
5154 {
5155 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
5156 	s32 rc;
5157 
5158 	/* Set PHY type */
5159 	hw->phy.type = ixgbe_phy_fw;
5160 
5161 	rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
5162 				    &pcaps);
5163 	if (rc)
5164 		return rc;
5165 
5166 	if (!(pcaps.module_compliance_enforcement &
5167 	      IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
5168 		/* Handle lenient mode */
5169 		rc = ixgbe_aci_get_phy_caps(hw, false,
5170 					    IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
5171 					    &pcaps);
5172 		if (rc)
5173 			return rc;
5174 	}
5175 
5176 	/* Determine supported speeds */
5177 	hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
5178 
5179 	if (pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
5180 	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
5181 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
5182 	if (pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
5183 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
5184 	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
5185 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
5186 	if (pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_T  ||
5187 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
5188 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
5189 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
5190 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_1G_SGMII    ||
5191 	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
5192 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
5193 	if (pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_T       ||
5194 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10G_SFI_DA      ||
5195 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_SR      ||
5196 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_LR      ||
5197 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1  ||
5198 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
5199 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C     ||
5200 	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
5201 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
5202 
5203 	/* 2.5 and 5 Gbps link speeds must be excluded from the
5204 	 * auto-negotiation set used during driver initialization due to
5205 	 * compatibility issues with certain switches. Those issues do not
5206 	 * exist in case of E610 2.5G SKU device (0x57b1).
5207 	 */
5208 	if (!hw->phy.autoneg_advertised &&
5209 	    hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
5210 		hw->phy.autoneg_advertised = hw->phy.speeds_supported;
5211 
5212 	if (pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_2500BASE_T   ||
5213 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_2500BASE_X   ||
5214 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_2500BASE_KX  ||
5215 	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
5216 	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
5217 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
5218 
5219 	if (!hw->phy.autoneg_advertised &&
5220 	    hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
5221 		hw->phy.autoneg_advertised = hw->phy.speeds_supported;
5222 
5223 	if (pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_5GBASE_T  ||
5224 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
5225 	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
5226 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
5227 
5228 	/* Set PHY ID */
5229 	memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
5230 
5231 	return IXGBE_SUCCESS;
5232 }
5233 
5234 /**
5235  * ixgbe_identify_module_E610 - Identify SFP module type
5236  * @hw: pointer to hardware structure
5237  *
5238  * Identify the SFP module type.
5239  *
5240  * Return: the exit code of the operation.
5241  */
ixgbe_identify_module_E610(struct ixgbe_hw * hw)5242 s32 ixgbe_identify_module_E610(struct ixgbe_hw *hw)
5243 {
5244 	bool media_available;
5245 	u8 module_type;
5246 	s32 rc;
5247 
5248 	rc = ixgbe_update_link_info(hw);
5249 	if (rc)
5250 		goto err;
5251 
5252 	media_available =
5253 		(hw->link.link_info.link_info &
5254 		 IXGBE_ACI_MEDIA_AVAILABLE) ? true : false;
5255 
5256 	if (media_available) {
5257 		hw->phy.sfp_type = ixgbe_sfp_type_unknown;
5258 
5259 		/* Get module type from hw context updated by ixgbe_update_link_info() */
5260 		module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
5261 
5262 		if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
5263 		    (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
5264 			hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
5265 		} else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
5266 			hw->phy.sfp_type = ixgbe_sfp_type_sr;
5267 		} else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
5268 			   (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
5269 			hw->phy.sfp_type = ixgbe_sfp_type_lr;
5270 		}
5271 		rc = IXGBE_SUCCESS;
5272 	} else {
5273 		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
5274 		rc = IXGBE_ERR_SFP_NOT_PRESENT;
5275 	}
5276 err:
5277 	return rc;
5278 }
5279 
5280 /**
5281  * ixgbe_setup_phy_link_E610 - Sets up firmware-controlled PHYs
5282  * @hw: pointer to hardware structure
5283  *
5284  * Set the parameters for the firmware-controlled PHYs.
5285  *
5286  * Return: the exit code of the operation.
5287  */
ixgbe_setup_phy_link_E610(struct ixgbe_hw * hw)5288 s32 ixgbe_setup_phy_link_E610(struct ixgbe_hw *hw)
5289 {
5290 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
5291 	struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
5292 	u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
5293 	u64 sup_phy_type_low, sup_phy_type_high;
5294 	s32 rc;
5295 
5296 	rc = ixgbe_aci_get_link_info(hw, false, NULL);
5297 	if (rc) {
5298 		goto err;
5299 	}
5300 
5301 	/* If media is not available get default config */
5302 	if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
5303 		rmode = IXGBE_ACI_REPORT_DFLT_CFG;
5304 
5305 	rc = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
5306 	if (rc) {
5307 		goto err;
5308 	}
5309 
5310 	sup_phy_type_low = pcaps.phy_type_low;
5311 	sup_phy_type_high = pcaps.phy_type_high;
5312 
5313 	/* Get Active configuration to avoid unintended changes */
5314 	rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
5315 				    &pcaps);
5316 	if (rc) {
5317 		goto err;
5318 	}
5319 	ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
5320 
5321 	/* Set default PHY types for a given speed */
5322 	pcfg.phy_type_low = 0;
5323 	pcfg.phy_type_high = 0;
5324 
5325 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
5326 		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
5327 		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
5328 	}
5329 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
5330 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
5331 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
5332 		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
5333 	}
5334 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
5335 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
5336 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
5337 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
5338 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
5339 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
5340 		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
5341 	}
5342 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
5343 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
5344 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
5345 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
5346 		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
5347 		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
5348 	}
5349 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
5350 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
5351 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
5352 		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
5353 	}
5354 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
5355 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
5356 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
5357 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
5358 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
5359 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
5360 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
5361 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
5362 		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
5363 	}
5364 
5365 	/* Mask the set values to avoid requesting unsupported link types */
5366 	pcfg.phy_type_low &= sup_phy_type_low;
5367 	pcfg.phy_type_high &= sup_phy_type_high;
5368 
5369 	if (pcfg.phy_type_high != pcaps.phy_type_high ||
5370 	    pcfg.phy_type_low != pcaps.phy_type_low ||
5371 	    pcfg.caps != pcaps.caps) {
5372 		pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
5373 		pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
5374 
5375 		rc = ixgbe_aci_set_phy_cfg(hw, &pcfg);
5376 	}
5377 
5378 err:
5379 	return rc;
5380 }
5381 
5382 /**
5383  * ixgbe_get_phy_firmware_version_E610 - Gets the PHY Firmware Version
5384  * @hw: pointer to hardware structure
5385  * @firmware_version: pointer to the PHY Firmware Version
5386  *
5387  * Determines PHY FW version based on response to Get PHY Capabilities
5388  * admin command (0x0600).
5389  *
5390  * Return: the exit code of the operation.
5391  */
ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw * hw,u16 * firmware_version)5392 s32 ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw *hw,
5393 					u16 *firmware_version)
5394 {
5395 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
5396 	s32 status;
5397 
5398 	if (!firmware_version)
5399 		return IXGBE_ERR_PARAM;
5400 
5401 	status = ixgbe_aci_get_phy_caps(hw, false,
5402 					IXGBE_ACI_REPORT_ACTIVE_CFG,
5403 					&pcaps);
5404 	if (status)
5405 		return status;
5406 
5407 	/* TODO: determine which bytes of the 8-byte phy_fw_ver
5408 	 * field should be written to the 2-byte firmware_version
5409 	 * output argument. */
5410 	memcpy(firmware_version, pcaps.phy_fw_ver, sizeof(u16));
5411 
5412 	return IXGBE_SUCCESS;
5413 }
5414 
5415 /**
5416  * ixgbe_read_i2c_sff8472_E610 - Reads 8 bit word over I2C interface
5417  * @hw: pointer to hardware structure
5418  * @byte_offset: byte offset at address 0xA2
5419  * @sff8472_data: value read
5420  *
5421  * Performs byte read operation from SFP module's SFF-8472 data over I2C.
5422  *
5423  * Return: the exit code of the operation.
5424  **/
ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw * hw,u8 byte_offset,u8 * sff8472_data)5425 s32 ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw *hw, u8 byte_offset,
5426 				u8 *sff8472_data)
5427 {
5428 	return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR2,
5429 				    byte_offset, 0,
5430 				    IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
5431 				    sff8472_data, 1, false);
5432 }
5433 
5434 /**
5435  * ixgbe_read_i2c_eeprom_E610 - Reads 8 bit EEPROM word over I2C interface
5436  * @hw: pointer to hardware structure
5437  * @byte_offset: EEPROM byte offset to read
5438  * @eeprom_data: value read
5439  *
5440  * Performs byte read operation from SFP module's EEPROM over I2C interface.
5441  *
5442  * Return: the exit code of the operation.
5443  **/
ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw * hw,u8 byte_offset,u8 * eeprom_data)5444 s32 ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
5445 			       u8 *eeprom_data)
5446 {
5447 	return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
5448 				    byte_offset, 0,
5449 				    IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
5450 				    eeprom_data, 1, false);
5451 }
5452 
5453 /**
5454  * ixgbe_write_i2c_eeprom_E610 - Writes 8 bit EEPROM word over I2C interface
5455  * @hw: pointer to hardware structure
5456  * @byte_offset: EEPROM byte offset to write
5457  * @eeprom_data: value to write
5458  *
5459  * Performs byte write operation to SFP module's EEPROM over I2C interface.
5460  *
5461  * Return: the exit code of the operation.
5462  **/
ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw * hw,u8 byte_offset,u8 eeprom_data)5463 s32 ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
5464 				u8 eeprom_data)
5465 {
5466 	return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
5467 				    byte_offset, 0,
5468 				    IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
5469 				    &eeprom_data, 1, true);
5470 }
5471 
5472 /**
5473  * ixgbe_check_overtemp_E610 - Check firmware-controlled PHYs for overtemp
5474  * @hw: pointer to hardware structure
5475  *
5476  * Get the link status and check if the PHY temperature alarm detected.
5477  *
5478  * Return: the exit code of the operation.
5479  */
ixgbe_check_overtemp_E610(struct ixgbe_hw * hw)5480 s32 ixgbe_check_overtemp_E610(struct ixgbe_hw *hw)
5481 {
5482 	struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
5483 	struct ixgbe_aci_cmd_get_link_status *resp;
5484 	struct ixgbe_aci_desc desc;
5485 	s32 status = IXGBE_SUCCESS;
5486 
5487 	if (!hw)
5488 		return IXGBE_ERR_PARAM;
5489 
5490 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
5491 	resp = &desc.params.get_link_status;
5492 	resp->cmd_flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_LSE_NOP);
5493 
5494 	status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
5495 	if (status != IXGBE_SUCCESS)
5496 		return status;
5497 
5498 	if (link_data.ext_info & IXGBE_ACI_LINK_PHY_TEMP_ALARM) {
5499 		ERROR_REPORT1(IXGBE_ERROR_CAUTION,
5500 			      "PHY Temperature Alarm detected");
5501 		status = IXGBE_ERR_OVERTEMP;
5502 	}
5503 
5504 	return status;
5505 }
5506 
5507 /**
5508  * ixgbe_set_phy_power_E610 - Control power for copper PHY
5509  * @hw: pointer to hardware structure
5510  * @on: true for on, false for off
5511  *
5512  * Set the power on/off of the PHY
5513  * by getting its capabilities and setting the appropriate
5514  * configuration parameters.
5515  *
5516  * Return: the exit code of the operation.
5517  */
ixgbe_set_phy_power_E610(struct ixgbe_hw * hw,bool on)5518 s32 ixgbe_set_phy_power_E610(struct ixgbe_hw *hw, bool on)
5519 {
5520 	struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
5521 	struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
5522 	s32 status;
5523 
5524 	status = ixgbe_aci_get_phy_caps(hw, false,
5525 		IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
5526 	if (status != IXGBE_SUCCESS)
5527 		return status;
5528 
5529 	ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
5530 
5531 	if (on) {
5532 		phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
5533 	} else {
5534 		phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
5535 	}
5536 
5537 	/* PHY is already in requested power mode */
5538 	if (phy_caps.caps == phy_cfg.caps)
5539 		return IXGBE_SUCCESS;
5540 
5541 	phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
5542 	phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
5543 
5544 	status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
5545 
5546 	return status;
5547 }
5548 
5549 /**
5550  * ixgbe_enter_lplu_E610 - Transition to low power states
5551  * @hw: pointer to hardware structure
5552  *
5553  * Configures Low Power Link Up on transition to low power states
5554  * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
5555  * X557 PHY immediately prior to entering LPLU.
5556  *
5557  * Return: the exit code of the operation.
5558  */
ixgbe_enter_lplu_E610(struct ixgbe_hw * hw)5559 s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw)
5560 {
5561 	struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
5562 	struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
5563 	s32 status;
5564 
5565 	status = ixgbe_aci_get_phy_caps(hw, false,
5566 		IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
5567 	if (status != IXGBE_SUCCESS)
5568 		return status;
5569 
5570 	ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
5571 
5572 	phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
5573 
5574 	status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
5575 
5576 	return status;
5577 }
5578 
5579 /**
5580  * ixgbe_init_eeprom_params_E610 - Initialize EEPROM params
5581  * @hw: pointer to hardware structure
5582  *
5583  * Initializes the EEPROM parameters ixgbe_eeprom_info within the
5584  * ixgbe_hw struct in order to set up EEPROM access.
5585  *
5586  * Return: the exit code of the operation.
5587  */
ixgbe_init_eeprom_params_E610(struct ixgbe_hw * hw)5588 s32 ixgbe_init_eeprom_params_E610(struct ixgbe_hw *hw)
5589 {
5590 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
5591 	u32 gens_stat;
5592 	u8 sr_size;
5593 
5594 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
5595 		eeprom->type = ixgbe_flash;
5596 
5597 		gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
5598 		sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >>
5599 			  GLNVM_GENS_SR_SIZE_S;
5600 
5601 		/* Switching to words (sr_size contains power of 2) */
5602 		eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
5603 
5604 		DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
5605 			  eeprom->type, eeprom->word_size);
5606 	}
5607 
5608 	return IXGBE_SUCCESS;
5609 }
5610 
5611 /**
5612  * ixgbe_read_ee_aci_E610 - Read EEPROM word using the admin command.
5613  * @hw: pointer to hardware structure
5614  * @offset: offset of  word in the EEPROM to read
5615  * @data: word read from the EEPROM
5616  *
5617  * Reads a 16 bit word from the EEPROM using the ACI.
5618  * If the EEPROM params are not initialized, the function
5619  * initialize them before proceeding with reading.
5620  * The function acquires and then releases the NVM ownership.
5621  *
5622  * Return: the exit code of the operation.
5623  */
ixgbe_read_ee_aci_E610(struct ixgbe_hw * hw,u16 offset,u16 * data)5624 s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data)
5625 {
5626 	s32 status;
5627 
5628 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5629 		status = ixgbe_init_eeprom_params(hw);
5630 		if (status)
5631 			return status;
5632 	}
5633 
5634 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
5635 	if (status)
5636 		return status;
5637 
5638 	status = ixgbe_read_sr_word_aci(hw, offset, data);
5639 	ixgbe_release_nvm(hw);
5640 
5641 	return status;
5642 }
5643 
5644 /**
5645  * ixgbe_read_ee_aci_buffer_E610- Read EEPROM word(s) using admin commands.
5646  * @hw: pointer to hardware structure
5647  * @offset: offset of  word in the EEPROM to read
5648  * @words: number of words
5649  * @data: word(s) read from the EEPROM
5650  *
5651  * Reads a 16 bit word(s) from the EEPROM using the ACI.
5652  * If the EEPROM params are not initialized, the function
5653  * initialize them before proceeding with reading.
5654  * The function acquires and then releases the NVM ownership.
5655  *
5656  * Return: the exit code of the operation.
5657  */
ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)5658 s32 ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
5659 				  u16 words, u16 *data)
5660 {
5661 	s32 status;
5662 
5663 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5664 		status = ixgbe_init_eeprom_params(hw);
5665 		if (status)
5666 			return status;
5667 	}
5668 
5669 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
5670 	if (status)
5671 		return status;
5672 
5673 	status = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
5674 	ixgbe_release_nvm(hw);
5675 
5676 	return status;
5677 }
5678 
5679 /**
5680  * ixgbe_write_ee_aci_E610 - Write EEPROM word using the admin command.
5681  * @hw: pointer to hardware structure
5682  * @offset: offset of  word in the EEPROM to write
5683  * @data: word write to the EEPROM
5684  *
5685  * Write a 16 bit word to the EEPROM using the ACI.
5686  * If the EEPROM params are not initialized, the function
5687  * initialize them before proceeding with writing.
5688  * The function acquires and then releases the NVM ownership.
5689  *
5690  * Return: the exit code of the operation.
5691  */
ixgbe_write_ee_aci_E610(struct ixgbe_hw * hw,u16 offset,u16 data)5692 s32 ixgbe_write_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 data)
5693 {
5694 	s32 status;
5695 
5696 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5697 		status = ixgbe_init_eeprom_params(hw);
5698 		if (status)
5699 			return status;
5700 	}
5701 
5702 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
5703 	if (status)
5704 		return status;
5705 
5706 	status = ixgbe_write_sr_word_aci(hw, (u32)offset, &data);
5707 	ixgbe_release_nvm(hw);
5708 
5709 	return status;
5710 }
5711 
5712 /**
5713  * ixgbe_write_ee_aci_buffer_E610 - Write EEPROM word(s) using admin commands.
5714  * @hw: pointer to hardware structure
5715  * @offset: offset of  word in the EEPROM to write
5716  * @words: number of words
5717  * @data: word(s) write to the EEPROM
5718  *
5719  * Write a 16 bit word(s) to the EEPROM using the ACI.
5720  * If the EEPROM params are not initialized, the function
5721  * initialize them before proceeding with writing.
5722  * The function acquires and then releases the NVM ownership.
5723  *
5724  * Return: the exit code of the operation.
5725  */
ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)5726 s32 ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
5727 				   u16 words, u16 *data)
5728 {
5729 	s32 status;
5730 
5731 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5732 		status = ixgbe_init_eeprom_params(hw);
5733 		if (status)
5734 			return status;
5735 	}
5736 
5737 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
5738 	if (status)
5739 		return status;
5740 
5741 	status = ixgbe_write_sr_buf_aci(hw, (u32)offset, words, data);
5742 	ixgbe_release_nvm(hw);
5743 
5744 	return status;
5745 }
5746 
5747 /**
5748  * ixgbe_calc_eeprom_checksum_E610 - Calculates and returns the checksum
5749  * @hw: pointer to hardware structure
5750  *
5751  * Calculate SW Checksum that covers the whole 64kB shadow RAM
5752  * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
5753  * is customer specific and unknown. Therefore, this function skips all maximum
5754  * possible size of VPD (1kB).
5755  * If the EEPROM params are not initialized, the function
5756  * initializes them before proceeding.
5757  * The function acquires and then releases the NVM ownership.
5758  *
5759  * Return: the negative error code on error, or the 16-bit checksum
5760  */
ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw * hw)5761 s32 ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw *hw)
5762 {
5763 	bool nvm_acquired = false;
5764 	u16 pcie_alt_module = 0;
5765 	u16 checksum_local = 0;
5766 	u16 checksum = 0;
5767 	u16 vpd_module;
5768 	void *vmem;
5769 	s32 status;
5770 	u16 *data;
5771 	u16 i;
5772 
5773 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5774 		status = ixgbe_init_eeprom_params(hw);
5775 		if (status)
5776 			return status;
5777 	}
5778 
5779 	vmem = ixgbe_calloc(hw, IXGBE_SR_SECTOR_SIZE_IN_WORDS, sizeof(u16));
5780 	if (!vmem)
5781 		return IXGBE_ERR_OUT_OF_MEM;
5782 	data = (u16 *)vmem;
5783 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
5784 	if (status)
5785 		goto ixgbe_calc_sr_checksum_exit;
5786 	nvm_acquired = true;
5787 
5788 	/* read pointer to VPD area */
5789 	status = ixgbe_read_sr_word_aci(hw, E610_SR_VPD_PTR, &vpd_module);
5790 	if (status)
5791 		goto ixgbe_calc_sr_checksum_exit;
5792 
5793 	/* read pointer to PCIe Alt Auto-load module */
5794 	status = ixgbe_read_sr_word_aci(hw, E610_SR_PCIE_ALT_AUTO_LOAD_PTR,
5795 					&pcie_alt_module);
5796 	if (status)
5797 		goto ixgbe_calc_sr_checksum_exit;
5798 
5799 	/* Calculate SW checksum that covers the whole 64kB shadow RAM
5800 	 * except the VPD and PCIe ALT Auto-load modules
5801 	 */
5802 	for (i = 0; i < hw->eeprom.word_size; i++) {
5803 		/* Read SR page */
5804 		if ((i % IXGBE_SR_SECTOR_SIZE_IN_WORDS) == 0) {
5805 			u16 words = IXGBE_SR_SECTOR_SIZE_IN_WORDS;
5806 
5807 			status = ixgbe_read_sr_buf_aci(hw, i, &words, data);
5808 			if (status != IXGBE_SUCCESS)
5809 				goto ixgbe_calc_sr_checksum_exit;
5810 		}
5811 
5812 		/* Skip Checksum word */
5813 		if (i == E610_SR_SW_CHECKSUM_WORD)
5814 			continue;
5815 		/* Skip VPD module (convert byte size to word count) */
5816 		if (i >= (u32)vpd_module &&
5817 		    i < ((u32)vpd_module + E610_SR_VPD_SIZE_WORDS))
5818 			continue;
5819 		/* Skip PCIe ALT module (convert byte size to word count) */
5820 		if (i >= (u32)pcie_alt_module &&
5821 		    i < ((u32)pcie_alt_module + E610_SR_PCIE_ALT_SIZE_WORDS))
5822 			continue;
5823 
5824 		checksum_local += data[i % IXGBE_SR_SECTOR_SIZE_IN_WORDS];
5825 	}
5826 
5827 	checksum = (u16)IXGBE_SR_SW_CHECKSUM_BASE - checksum_local;
5828 
5829 ixgbe_calc_sr_checksum_exit:
5830 	if(nvm_acquired)
5831 		ixgbe_release_nvm(hw);
5832 	ixgbe_free(hw, vmem);
5833 
5834 	if(!status)
5835 		return (s32)checksum;
5836 	else
5837 		return status;
5838 }
5839 
5840 /**
5841  * ixgbe_update_eeprom_checksum_E610 - Updates the EEPROM checksum and flash
5842  * @hw: pointer to hardware structure
5843  *
5844  * After writing EEPROM to Shadow RAM, software sends the admin command
5845  * to recalculate and update EEPROM checksum and instructs the hardware
5846  * to update the flash.
5847  * If the EEPROM params are not initialized, the function
5848  * initialize them before proceeding.
5849  * The function acquires and then releases the NVM ownership.
5850  *
5851  * Return: the exit code of the operation.
5852  */
ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw * hw)5853 s32 ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw *hw)
5854 {
5855 	s32 status;
5856 
5857 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5858 		status = ixgbe_init_eeprom_params(hw);
5859 		if (status)
5860 			return status;
5861 	}
5862 
5863 	status = ixgbe_nvm_recalculate_checksum(hw);
5864 	if (status)
5865 		return status;
5866 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
5867 	if (status)
5868 		return status;
5869 
5870 	status = ixgbe_nvm_write_activate(hw, IXGBE_ACI_NVM_ACTIV_REQ_EMPR,
5871 					  NULL);
5872 	ixgbe_release_nvm(hw);
5873 
5874 	return status;
5875 }
5876 
5877 /**
5878  * ixgbe_validate_eeprom_checksum_E610 - Validate EEPROM checksum
5879  * @hw: pointer to hardware structure
5880  * @checksum_val: calculated checksum
5881  *
5882  * Performs checksum calculation and validates the EEPROM checksum. If the
5883  * caller does not need checksum_val, the value can be NULL.
5884  * If the EEPROM params are not initialized, the function
5885  * initialize them before proceeding.
5886  * The function acquires and then releases the NVM ownership.
5887  *
5888  * Return: the exit code of the operation.
5889  */
ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw * hw,u16 * checksum_val)5890 s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val)
5891 {
5892 	u32 status;
5893 
5894 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5895 		status = ixgbe_init_eeprom_params(hw);
5896 		if (status)
5897 			return status;
5898 	}
5899 
5900 	status = ixgbe_nvm_validate_checksum(hw);
5901 
5902 	if (status)
5903 		return status;
5904 
5905 	if (checksum_val) {
5906 		u16 tmp_checksum;
5907 		status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
5908 		if (status)
5909 			return status;
5910 
5911 		status = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD,
5912 						&tmp_checksum);
5913 		ixgbe_release_nvm(hw);
5914 
5915 		if (!status)
5916 			*checksum_val = tmp_checksum;
5917 	}
5918 
5919 	return status;
5920 }
5921 
5922 /**
5923  * ixgbe_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
5924  * @hw: pointer to hardware structure
5925  * @module_tlv: pointer to module TLV to return
5926  * @module_tlv_len: pointer to module TLV length to return
5927  * @module_type: module type requested
5928  *
5929  * Finds the requested sub module TLV type from the Preserved Field
5930  * Area (PFA) and returns the TLV pointer and length. The caller can
5931  * use these to read the variable length TLV value.
5932  *
5933  * Return: the exit code of the operation.
5934  */
ixgbe_get_pfa_module_tlv(struct ixgbe_hw * hw,u16 * module_tlv,u16 * module_tlv_len,u16 module_type)5935 static s32 ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv,
5936 				    u16 *module_tlv_len, u16 module_type)
5937 {
5938 	u16 pfa_len, pfa_ptr, pfa_end_ptr;
5939 	u16 next_tlv;
5940 	s32 status;
5941 
5942 	status = ixgbe_read_ee_aci_E610(hw, E610_SR_PFA_PTR, &pfa_ptr);
5943 	if (status != IXGBE_SUCCESS) {
5944 		return status;
5945 	}
5946 	status = ixgbe_read_ee_aci_E610(hw, pfa_ptr, &pfa_len);
5947 	if (status != IXGBE_SUCCESS) {
5948 		return status;
5949 	}
5950 	/* Starting with first TLV after PFA length, iterate through the list
5951 	 * of TLVs to find the requested one.
5952 	 */
5953 	next_tlv = pfa_ptr + 1;
5954 	pfa_end_ptr = pfa_ptr + pfa_len;
5955 	while (next_tlv < pfa_end_ptr) {
5956 		u16 tlv_sub_module_type, tlv_len;
5957 
5958 		/* Read TLV type */
5959 		status = ixgbe_read_ee_aci_E610(hw, next_tlv,
5960 						&tlv_sub_module_type);
5961 		if (status != IXGBE_SUCCESS) {
5962 			break;
5963 		}
5964 		/* Read TLV length */
5965 		status = ixgbe_read_ee_aci_E610(hw, next_tlv + 1, &tlv_len);
5966 		if (status != IXGBE_SUCCESS) {
5967 			break;
5968 		}
5969 		if (tlv_sub_module_type == module_type) {
5970 			if (tlv_len) {
5971 				*module_tlv = next_tlv;
5972 				*module_tlv_len = tlv_len;
5973 				return IXGBE_SUCCESS;
5974 			}
5975 			return IXGBE_ERR_INVAL_SIZE;
5976 		}
5977 		/* Check next TLV, i.e. current TLV pointer + length + 2 words
5978 		 * (for current TLV's type and length)
5979 		 */
5980 		next_tlv = next_tlv + tlv_len + 2;
5981 	}
5982 	/* Module does not exist */
5983 	return IXGBE_ERR_DOES_NOT_EXIST;
5984 }
5985 
5986 /**
5987  * ixgbe_read_pba_string_E610 - Reads part number string from NVM
5988  * @hw: pointer to hardware structure
5989  * @pba_num: stores the part number string from the NVM
5990  * @pba_num_size: part number string buffer length
5991  *
5992  * Reads the part number string from the NVM.
5993  *
5994  * Return: the exit code of the operation.
5995  */
ixgbe_read_pba_string_E610(struct ixgbe_hw * hw,u8 * pba_num,u32 pba_num_size)5996 s32 ixgbe_read_pba_string_E610(struct ixgbe_hw *hw, u8 *pba_num,
5997 			       u32 pba_num_size)
5998 {
5999 	u16 pba_tlv, pba_tlv_len;
6000 	u16 pba_word, pba_size;
6001 	s32 status;
6002 	u16 i;
6003 
6004 	status = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
6005 					E610_SR_PBA_BLOCK_PTR);
6006 	if (status != IXGBE_SUCCESS) {
6007 		return status;
6008 	}
6009 
6010 	/* pba_size is the next word */
6011 	status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2), &pba_size);
6012 	if (status != IXGBE_SUCCESS) {
6013 		return status;
6014 	}
6015 
6016 	if (pba_tlv_len < pba_size) {
6017 		return IXGBE_ERR_INVAL_SIZE;
6018 	}
6019 
6020 	/* Subtract one to get PBA word count (PBA Size word is included in
6021 	 * total size)
6022 	 */
6023 	pba_size--;
6024 	if (pba_num_size < (((u32)pba_size * 2) + 1)) {
6025 		return IXGBE_ERR_PARAM;
6026 	}
6027 
6028 	for (i = 0; i < pba_size; i++) {
6029 		status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2 + 1) + i,
6030 						&pba_word);
6031 		if (status != IXGBE_SUCCESS) {
6032 			return status;
6033 		}
6034 
6035 		pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
6036 		pba_num[(i * 2) + 1] = pba_word & 0xFF;
6037 	}
6038 	pba_num[(pba_size * 2)] = '\0';
6039 
6040 	return status;
6041 }
6042