xref: /freebsd/sys/dev/ixgbe/ixgbe_e610.c (revision dea5f973d0c8d29a79b433283d0a2de8f4615957)
1 /******************************************************************************
2   SPDX-License-Identifier: BSD-3-Clause
3 
4   Copyright (c) 2025, Intel Corporation
5   All rights reserved.
6 
7   Redistribution and use in source and binary forms, with or without
8   modification, are permitted provided that the following conditions are met:
9 
10    1. Redistributions of source code must retain the above copyright notice,
11       this list of conditions and the following disclaimer.
12 
13    2. Redistributions in binary form must reproduce the above copyright
14       notice, this list of conditions and the following disclaimer in the
15       documentation and/or other materials provided with the distribution.
16 
17    3. Neither the name of the Intel Corporation nor the names of its
18       contributors may be used to endorse or promote products derived from
19       this software without specific prior written permission.
20 
21   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31   POSSIBILITY OF SUCH DAMAGE.
32 
33 ******************************************************************************/
34 
35 #include "ixgbe_type.h"
36 #include "ixgbe_e610.h"
37 #include "ixgbe_x550.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40 #include "ixgbe_api.h"
41 
42 /**
43  * ixgbe_init_aci - initialization routine for Admin Command Interface
44  * @hw: pointer to the hardware structure
45  *
46  * Initialize the ACI lock.
47  */
ixgbe_init_aci(struct ixgbe_hw * hw)48 void ixgbe_init_aci(struct ixgbe_hw *hw)
49 {
50 	ixgbe_init_lock(&hw->aci.lock);
51 }
52 
53 /**
54  * ixgbe_shutdown_aci - shutdown routine for Admin Command Interface
55  * @hw: pointer to the hardware structure
56  *
57  * Destroy the ACI lock.
58  */
ixgbe_shutdown_aci(struct ixgbe_hw * hw)59 void ixgbe_shutdown_aci(struct ixgbe_hw *hw)
60 {
61 	ixgbe_destroy_lock(&hw->aci.lock);
62 }
63 
64 /**
65  * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
66  * be resent
67  * @opcode: ACI opcode
68  *
69  * Check if ACI command should be sent again depending on the provided opcode.
70  *
71  * Return: true if the sending command routine should be repeated,
72  * otherwise false.
73  */
ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)74 static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
75 {
76 	switch (opcode) {
77 	case ixgbe_aci_opc_disable_rxen:
78 	case ixgbe_aci_opc_get_phy_caps:
79 	case ixgbe_aci_opc_get_link_status:
80 	case ixgbe_aci_opc_get_link_topo:
81 		return true;
82 	}
83 
84 	return false;
85 }
86 
87 /**
88  * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
89  * Command Interface
90  * @hw: pointer to the HW struct
91  * @desc: descriptor describing the command
92  * @buf: buffer to use for indirect commands (NULL for direct commands)
93  * @buf_size: size of buffer for indirect commands (0 for direct commands)
94  *
95  * Admin Command is sent using CSR by setting descriptor and buffer in specific
96  * registers.
97  *
98  * Return: the exit code of the operation.
99  * * - IXGBE_SUCCESS - success.
100  * * - IXGBE_ERR_ACI_DISABLED - CSR mechanism is not enabled.
101  * * - IXGBE_ERR_ACI_BUSY - CSR mechanism is busy.
102  * * - IXGBE_ERR_PARAM - buf_size is too big or
103  * invalid argument buf or buf_size.
104  * * - IXGBE_ERR_ACI_TIMEOUT - Admin Command X command timeout.
105  * * - IXGBE_ERR_ACI_ERROR - Admin Command X invalid state of HICR register or
106  * Admin Command failed because of bad opcode was returned or
107  * Admin Command failed with error Y.
108  */
109 static s32
ixgbe_aci_send_cmd_execute(struct ixgbe_hw * hw,struct ixgbe_aci_desc * desc,void * buf,u16 buf_size)110 ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
111 			   void *buf, u16 buf_size)
112 {
113 	u32 hicr = 0, tmp_buf_size = 0, i = 0;
114 	u32 *raw_desc = (u32 *)desc;
115 	s32 status = IXGBE_SUCCESS;
116 	bool valid_buf = false;
117 	u32 *tmp_buf = NULL;
118 	u16 opcode = 0;
119 
120 	do {
121 		hw->aci.last_status = IXGBE_ACI_RC_OK;
122 
123 		/* It's necessary to check if mechanism is enabled */
124 		hicr = IXGBE_READ_REG(hw, PF_HICR);
125 		if (!(hicr & PF_HICR_EN)) {
126 			status = IXGBE_ERR_ACI_DISABLED;
127 			break;
128 		}
129 		if (hicr & PF_HICR_C) {
130 			hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
131 			status = IXGBE_ERR_ACI_BUSY;
132 			break;
133 		}
134 		opcode = desc->opcode;
135 
136 		if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE) {
137 			status = IXGBE_ERR_PARAM;
138 			break;
139 		}
140 
141 		if (buf)
142 			desc->flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF);
143 
144 		/* Check if buf and buf_size are proper params */
145 		if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF)) {
146 			if ((buf && buf_size == 0) ||
147 			    (buf == NULL && buf_size)) {
148 				status = IXGBE_ERR_PARAM;
149 				break;
150 			}
151 			if (buf && buf_size)
152 				valid_buf = true;
153 		}
154 
155 		if (valid_buf == true) {
156 			if (buf_size % 4 == 0)
157 				tmp_buf_size = buf_size;
158 			else
159 				tmp_buf_size = (buf_size & (u16)(~0x03)) + 4;
160 
161 			tmp_buf = (u32*)ixgbe_malloc(hw, tmp_buf_size);
162 			if (!tmp_buf)
163 				return IXGBE_ERR_OUT_OF_MEM;
164 
165 			/* tmp_buf will be firstly filled with 0xFF and after
166 			 * that the content of buf will be written into it.
167 			 * This approach lets us use valid buf_size and
168 			 * prevents us from reading past buf area
169 			 * when buf_size mod 4 not equal to 0.
170 			 */
171 			memset(tmp_buf, 0xFF, tmp_buf_size);
172 			memcpy(tmp_buf, buf, buf_size);
173 
174 			if (tmp_buf_size > IXGBE_ACI_LG_BUF)
175 				desc->flags |=
176 				IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_LB);
177 
178 			desc->datalen = IXGBE_CPU_TO_LE16(buf_size);
179 
180 			if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD)) {
181 				for (i = 0; i < tmp_buf_size / 4; i++) {
182 					IXGBE_WRITE_REG(hw, PF_HIBA(i),
183 						IXGBE_LE32_TO_CPU(tmp_buf[i]));
184 				}
185 			}
186 		}
187 
188 		/* Descriptor is written to specific registers */
189 		for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
190 			IXGBE_WRITE_REG(hw, PF_HIDA(i),
191 					IXGBE_LE32_TO_CPU(raw_desc[i]));
192 
193 		/* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
194 		 * PF_HICR_EV
195 		 */
196 		hicr = IXGBE_READ_REG(hw, PF_HICR);
197 		hicr = (hicr | PF_HICR_C) & ~(PF_HICR_SV | PF_HICR_EV);
198 		IXGBE_WRITE_REG(hw, PF_HICR, hicr);
199 
200 		/* Wait for sync Admin Command response */
201 		for (i = 0; i < IXGBE_ACI_SYNC_RESPONSE_TIMEOUT; i += 1) {
202 			hicr = IXGBE_READ_REG(hw, PF_HICR);
203 			if ((hicr & PF_HICR_SV) || !(hicr & PF_HICR_C))
204 				break;
205 
206 			msec_delay(1);
207 		}
208 
209 		/* Wait for async Admin Command response */
210 		if ((hicr & PF_HICR_SV) && (hicr & PF_HICR_C)) {
211 			for (i = 0; i < IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT;
212 			     i += 1) {
213 				hicr = IXGBE_READ_REG(hw, PF_HICR);
214 				if ((hicr & PF_HICR_EV) || !(hicr & PF_HICR_C))
215 					break;
216 
217 				msec_delay(1);
218 			}
219 		}
220 
221 		/* Read sync Admin Command response */
222 		if ((hicr & PF_HICR_SV)) {
223 			for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
224 				raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA(i));
225 				raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
226 			}
227 		}
228 
229 		/* Read async Admin Command response */
230 		if ((hicr & PF_HICR_EV) && !(hicr & PF_HICR_C)) {
231 			for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
232 				raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA_2(i));
233 				raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
234 			}
235 		}
236 
237 		/* Handle timeout and invalid state of HICR register */
238 		if (hicr & PF_HICR_C) {
239 			status = IXGBE_ERR_ACI_TIMEOUT;
240 			break;
241 		} else if (!(hicr & PF_HICR_SV) && !(hicr & PF_HICR_EV)) {
242 			status = IXGBE_ERR_ACI_ERROR;
243 			break;
244 		}
245 
246 		/* For every command other than 0x0014 treat opcode mismatch
247 		 * as an error. Response to 0x0014 command read from HIDA_2
248 		 * is a descriptor of an event which is expected to contain
249 		 * different opcode than the command.
250 		 */
251 		if (desc->opcode != opcode &&
252 		    opcode != IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
253 			status = IXGBE_ERR_ACI_ERROR;
254 			break;
255 		}
256 
257 		if (desc->retval != IXGBE_ACI_RC_OK) {
258 			hw->aci.last_status = (enum ixgbe_aci_err)desc->retval;
259 			status = IXGBE_ERR_ACI_ERROR;
260 			break;
261 		}
262 
263 		/* Write a response values to a buf */
264 		if (valid_buf && (desc->flags &
265 				  IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF))) {
266 			for (i = 0; i < tmp_buf_size / 4; i++) {
267 				tmp_buf[i] = IXGBE_READ_REG(hw, PF_HIBA(i));
268 				tmp_buf[i] = IXGBE_CPU_TO_LE32(tmp_buf[i]);
269 			}
270 			memcpy(buf, tmp_buf, buf_size);
271 		}
272 	} while (0);
273 
274 	if (tmp_buf)
275 		ixgbe_free(hw, tmp_buf);
276 
277 	return status;
278 }
279 
280 /**
281  * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
282  * @hw: pointer to the HW struct
283  * @desc: descriptor describing the command
284  * @buf: buffer to use for indirect commands (NULL for direct commands)
285  * @buf_size: size of buffer for indirect commands (0 for direct commands)
286  *
287  * Helper function to send FW Admin Commands to the FW Admin Command Interface.
288  *
289  * Retry sending the FW Admin Command multiple times to the FW ACI
290  * if the EBUSY Admin Command error is returned.
291  *
292  * Return: the exit code of the operation.
293  */
ixgbe_aci_send_cmd(struct ixgbe_hw * hw,struct ixgbe_aci_desc * desc,void * buf,u16 buf_size)294 s32 ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
295 		       void *buf, u16 buf_size)
296 {
297 	struct ixgbe_aci_desc desc_cpy;
298 	enum ixgbe_aci_err last_status;
299 	bool is_cmd_for_retry;
300 	u8 *buf_cpy = NULL;
301 	s32 status;
302 	u16 opcode;
303 	u8 idx = 0;
304 
305 	opcode = IXGBE_LE16_TO_CPU(desc->opcode);
306 	is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
307 	memset(&desc_cpy, 0, sizeof(desc_cpy));
308 
309 	if (is_cmd_for_retry) {
310 		if (buf) {
311 			buf_cpy = (u8 *)ixgbe_malloc(hw, buf_size);
312 			if (!buf_cpy)
313 				return IXGBE_ERR_OUT_OF_MEM;
314 		}
315 		memcpy(&desc_cpy, desc, sizeof(desc_cpy));
316 	}
317 
318 	do {
319 		ixgbe_acquire_lock(&hw->aci.lock);
320 		status = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
321 		last_status = hw->aci.last_status;
322 		ixgbe_release_lock(&hw->aci.lock);
323 
324 		if (!is_cmd_for_retry || status == IXGBE_SUCCESS ||
325 		    (last_status != IXGBE_ACI_RC_EBUSY && status != IXGBE_ERR_ACI_ERROR))
326 			break;
327 
328 		if (buf)
329 			memcpy(buf, buf_cpy, buf_size);
330 		memcpy(desc, &desc_cpy, sizeof(desc_cpy));
331 
332 		msec_delay(IXGBE_ACI_SEND_DELAY_TIME_MS);
333 	} while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE);
334 
335 	if (buf_cpy)
336 		ixgbe_free(hw, buf_cpy);
337 
338 	return status;
339 }
340 
341 /**
342  * ixgbe_aci_check_event_pending - check if there are any pending events
343  * @hw: pointer to the HW struct
344  *
345  * Determine if there are any pending events.
346  *
347  * Return: true if there are any currently pending events
348  * otherwise false.
349  */
ixgbe_aci_check_event_pending(struct ixgbe_hw * hw)350 bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
351 {
352 	u32 ep_bit_mask;
353 	u32 fwsts;
354 
355 	ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
356 
357 	/* Check state of Event Pending (EP) bit */
358 	fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
359 	return (fwsts & ep_bit_mask) ? true : false;
360 }
361 
362 /**
363  * ixgbe_aci_get_event - get an event from ACI
364  * @hw: pointer to the HW struct
365  * @e: event information structure
366  * @pending: optional flag signaling that there are more pending events
367  *
368  * Obtain an event from ACI and return its content
369  * through 'e' using ACI command (0x0014).
370  * Provide information if there are more events
371  * to retrieve through 'pending'.
372  *
373  * Return: the exit code of the operation.
374  */
ixgbe_aci_get_event(struct ixgbe_hw * hw,struct ixgbe_aci_event * e,bool * pending)375 s32 ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
376 			bool *pending)
377 {
378 	struct ixgbe_aci_desc desc;
379 	s32 status;
380 
381 	if (!e || (!e->msg_buf && e->buf_len) || (e->msg_buf && !e->buf_len))
382 		return IXGBE_ERR_PARAM;
383 
384 	ixgbe_acquire_lock(&hw->aci.lock);
385 
386 	/* Check if there are any events pending */
387 	if (!ixgbe_aci_check_event_pending(hw)) {
388 		status = IXGBE_ERR_ACI_NO_EVENTS;
389 		goto aci_get_event_exit;
390 	}
391 
392 	/* Obtain pending event */
393 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
394 	status = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
395 	if (status)
396 		goto aci_get_event_exit;
397 
398 	/* Returned 0x0014 opcode indicates that no event was obtained */
399 	if (desc.opcode == IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
400 		status = IXGBE_ERR_ACI_NO_EVENTS;
401 		goto aci_get_event_exit;
402 	}
403 
404 	/* Determine size of event data */
405 	e->msg_len = MIN_T(u16, IXGBE_LE16_TO_CPU(desc.datalen), e->buf_len);
406 	/* Write event descriptor to event info structure */
407 	memcpy(&e->desc, &desc, sizeof(e->desc));
408 
409 	/* Check if there are any further events pending */
410 	if (pending) {
411 		*pending = ixgbe_aci_check_event_pending(hw);
412 	}
413 
414 aci_get_event_exit:
415 	ixgbe_release_lock(&hw->aci.lock);
416 
417 	return status;
418 }
419 
420 /**
421  * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
422  * @desc: pointer to the temp descriptor (non DMA mem)
423  * @opcode: the opcode can be used to decide which flags to turn off or on
424  *
425  * Helper function to fill the descriptor desc with default values
426  * and the provided opcode.
427  */
ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc * desc,u16 opcode)428 void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
429 {
430 	/* zero out the desc */
431 	memset(desc, 0, sizeof(*desc));
432 	desc->opcode = IXGBE_CPU_TO_LE16(opcode);
433 	desc->flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_SI);
434 }
435 
436 /**
437  * ixgbe_aci_get_fw_ver - get the firmware version
438  * @hw: pointer to the HW struct
439  *
440  * Get the firmware version using ACI command (0x0001).
441  *
442  * Return: the exit code of the operation.
443  */
ixgbe_aci_get_fw_ver(struct ixgbe_hw * hw)444 s32 ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
445 {
446 	struct ixgbe_aci_cmd_get_ver *resp;
447 	struct ixgbe_aci_desc desc;
448 	s32 status;
449 
450 	resp = &desc.params.get_ver;
451 
452 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver);
453 
454 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
455 
456 	if (!status) {
457 		hw->fw_branch = resp->fw_branch;
458 		hw->fw_maj_ver = resp->fw_major;
459 		hw->fw_min_ver = resp->fw_minor;
460 		hw->fw_patch = resp->fw_patch;
461 		hw->fw_build = IXGBE_LE32_TO_CPU(resp->fw_build);
462 		hw->api_branch = resp->api_branch;
463 		hw->api_maj_ver = resp->api_major;
464 		hw->api_min_ver = resp->api_minor;
465 		hw->api_patch = resp->api_patch;
466 	}
467 
468 	return status;
469 }
470 
471 /**
472  * ixgbe_aci_send_driver_ver - send the driver version to firmware
473  * @hw: pointer to the HW struct
474  * @dv: driver's major, minor version
475  *
476  * Send the driver version to the firmware
477  * using the ACI command (0x0002).
478  *
479  * Return: the exit code of the operation.
480  * Returns IXGBE_ERR_PARAM, if dv is NULL.
481  */
ixgbe_aci_send_driver_ver(struct ixgbe_hw * hw,struct ixgbe_driver_ver * dv)482 s32 ixgbe_aci_send_driver_ver(struct ixgbe_hw *hw, struct ixgbe_driver_ver *dv)
483 {
484 	struct ixgbe_aci_cmd_driver_ver *cmd;
485 	struct ixgbe_aci_desc desc;
486 	u16 len;
487 
488 	cmd = &desc.params.driver_ver;
489 
490 	if (!dv)
491 		return IXGBE_ERR_PARAM;
492 
493 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_driver_ver);
494 
495 	desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
496 	cmd->major_ver = dv->major_ver;
497 	cmd->minor_ver = dv->minor_ver;
498 	cmd->build_ver = dv->build_ver;
499 	cmd->subbuild_ver = dv->subbuild_ver;
500 
501 	len = 0;
502 	while (len < sizeof(dv->driver_string) &&
503 	       IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
504 		len++;
505 
506 	return ixgbe_aci_send_cmd(hw, &desc, dv->driver_string, len);
507 }
508 
509 /**
510  * ixgbe_aci_req_res - request a common resource
511  * @hw: pointer to the HW struct
512  * @res: resource ID
513  * @access: access type
514  * @sdp_number: resource number
515  * @timeout: the maximum time in ms that the driver may hold the resource
516  *
517  * Requests a common resource using the ACI command (0x0008).
518  * Specifies the maximum time the driver may hold the resource.
519  * If the requested resource is currently occupied by some other driver,
520  * a busy return value is returned and the timeout field value indicates the
521  * maximum time the current owner has to free it.
522  *
523  * Return: the exit code of the operation.
524  */
525 static s32
ixgbe_aci_req_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res,enum ixgbe_aci_res_access_type access,u8 sdp_number,u32 * timeout)526 ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
527 		  enum ixgbe_aci_res_access_type access, u8 sdp_number,
528 		  u32 *timeout)
529 {
530 	struct ixgbe_aci_cmd_req_res *cmd_resp;
531 	struct ixgbe_aci_desc desc;
532 	s32 status;
533 
534 	cmd_resp = &desc.params.res_owner;
535 
536 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
537 
538 	cmd_resp->res_id = IXGBE_CPU_TO_LE16(res);
539 	cmd_resp->access_type = IXGBE_CPU_TO_LE16(access);
540 	cmd_resp->res_number = IXGBE_CPU_TO_LE32(sdp_number);
541 	cmd_resp->timeout = IXGBE_CPU_TO_LE32(*timeout);
542 	*timeout = 0;
543 
544 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
545 
546 	/* The completion specifies the maximum time in ms that the driver
547 	 * may hold the resource in the Timeout field.
548 	 * If the resource is held by some other driver, the command completes
549 	 * with a busy return value and the timeout field indicates the maximum
550 	 * time the current owner of the resource has to free it.
551 	 */
552 	if (!status || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
553 		*timeout = IXGBE_LE32_TO_CPU(cmd_resp->timeout);
554 
555 	return status;
556 }
557 
558 /**
559  * ixgbe_aci_release_res - release a common resource using ACI
560  * @hw: pointer to the HW struct
561  * @res: resource ID
562  * @sdp_number: resource number
563  *
564  * Release a common resource using ACI command (0x0009).
565  *
566  * Return: the exit code of the operation.
567  */
568 static s32
ixgbe_aci_release_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res,u8 sdp_number)569 ixgbe_aci_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
570 		      u8 sdp_number)
571 {
572 	struct ixgbe_aci_cmd_req_res *cmd;
573 	struct ixgbe_aci_desc desc;
574 
575 	cmd = &desc.params.res_owner;
576 
577 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
578 
579 	cmd->res_id = IXGBE_CPU_TO_LE16(res);
580 	cmd->res_number = IXGBE_CPU_TO_LE32(sdp_number);
581 
582 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
583 }
584 
585 /**
586  * ixgbe_acquire_res - acquire the ownership of a resource
587  * @hw: pointer to the HW structure
588  * @res: resource ID
589  * @access: access type (read or write)
590  * @timeout: timeout in milliseconds
591  *
592  * Make an attempt to acquire the ownership of a resource using
593  * the ixgbe_aci_req_res to utilize ACI.
594  * In case if some other driver has previously acquired the resource and
595  * performed any necessary updates, the IXGBE_ERR_ACI_NO_WORK is returned,
596  * and the caller does not obtain the resource and has no further work to do.
597  * If needed, the function will poll until the current lock owner timeouts.
598  *
599  * Return: the exit code of the operation.
600  */
ixgbe_acquire_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res,enum ixgbe_aci_res_access_type access,u32 timeout)601 s32 ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
602 		      enum ixgbe_aci_res_access_type access, u32 timeout)
603 {
604 #define IXGBE_RES_POLLING_DELAY_MS	10
605 	u32 delay = IXGBE_RES_POLLING_DELAY_MS;
606 	u32 res_timeout = timeout;
607 	u32 retry_timeout = 0;
608 	s32 status;
609 
610 	status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
611 
612 	/* A return code of IXGBE_ERR_ACI_NO_WORK means that another driver has
613 	 * previously acquired the resource and performed any necessary updates;
614 	 * in this case the caller does not obtain the resource and has no
615 	 * further work to do.
616 	 */
617 	if (status == IXGBE_ERR_ACI_NO_WORK)
618 		goto ixgbe_acquire_res_exit;
619 
620 	/* If necessary, poll until the current lock owner timeouts.
621 	 * Set retry_timeout to the timeout value reported by the FW in the
622 	 * response to the "Request Resource Ownership" (0x0008) Admin Command
623 	 * as it indicates the maximum time the current owner of the resource
624 	 * is allowed to hold it.
625 	 */
626 	retry_timeout = res_timeout;
627 	while (status && retry_timeout && res_timeout) {
628 		msec_delay(delay);
629 		retry_timeout = (retry_timeout > delay) ?
630 			retry_timeout - delay : 0;
631 		status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
632 
633 		if (status == IXGBE_ERR_ACI_NO_WORK)
634 			/* lock free, but no work to do */
635 			break;
636 
637 		if (!status)
638 			/* lock acquired */
639 			break;
640 	}
641 
642 ixgbe_acquire_res_exit:
643 	return status;
644 }
645 
646 /**
647  * ixgbe_release_res - release a common resource
648  * @hw: pointer to the HW structure
649  * @res: resource ID
650  *
651  * Release a common resource using ixgbe_aci_release_res.
652  */
ixgbe_release_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res)653 void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
654 {
655 	u32 total_delay = 0;
656 	s32 status;
657 
658 	status = ixgbe_aci_release_res(hw, res, 0);
659 
660 	/* There are some rare cases when trying to release the resource
661 	 * results in an admin command timeout, so handle them correctly.
662 	 */
663 	while ((status == IXGBE_ERR_ACI_TIMEOUT) &&
664 	       (total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT)) {
665 		msec_delay(1);
666 		status = ixgbe_aci_release_res(hw, res, 0);
667 		total_delay++;
668 	}
669 }
670 
671 /**
672  * ixgbe_parse_common_caps - Parse common device/function capabilities
673  * @hw: pointer to the HW struct
674  * @caps: pointer to common capabilities structure
675  * @elem: the capability element to parse
676  * @prefix: message prefix for tracing capabilities
677  *
678  * Given a capability element, extract relevant details into the common
679  * capability structure.
680  *
681  * Return: true if the capability matches one of the common capability ids,
682  * false otherwise.
683  */
684 static bool
ixgbe_parse_common_caps(struct ixgbe_hw * hw,struct ixgbe_hw_common_caps * caps,struct ixgbe_aci_cmd_list_caps_elem * elem,const char * prefix)685 ixgbe_parse_common_caps(struct ixgbe_hw *hw, struct ixgbe_hw_common_caps *caps,
686 			struct ixgbe_aci_cmd_list_caps_elem *elem,
687 			const char *prefix)
688 {
689 	u32 logical_id = IXGBE_LE32_TO_CPU(elem->logical_id);
690 	u32 phys_id = IXGBE_LE32_TO_CPU(elem->phys_id);
691 	u32 number = IXGBE_LE32_TO_CPU(elem->number);
692 	u16 cap = IXGBE_LE16_TO_CPU(elem->cap);
693 	bool found = true;
694 
695 	UNREFERENCED_1PARAMETER(hw);
696 
697 	switch (cap) {
698 	case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
699 		caps->valid_functions = number;
700 		break;
701 	case IXGBE_ACI_CAPS_SRIOV:
702 		caps->sr_iov_1_1 = (number == 1);
703 		break;
704 	case IXGBE_ACI_CAPS_VMDQ:
705 		caps->vmdq = (number == 1);
706 		break;
707 	case IXGBE_ACI_CAPS_DCB:
708 		caps->dcb = (number == 1);
709 		caps->active_tc_bitmap = logical_id;
710 		caps->maxtc = phys_id;
711 		break;
712 	case IXGBE_ACI_CAPS_RSS:
713 		caps->rss_table_size = number;
714 		caps->rss_table_entry_width = logical_id;
715 		break;
716 	case IXGBE_ACI_CAPS_RXQS:
717 		caps->num_rxq = number;
718 		caps->rxq_first_id = phys_id;
719 		break;
720 	case IXGBE_ACI_CAPS_TXQS:
721 		caps->num_txq = number;
722 		caps->txq_first_id = phys_id;
723 		break;
724 	case IXGBE_ACI_CAPS_MSIX:
725 		caps->num_msix_vectors = number;
726 		caps->msix_vector_first_id = phys_id;
727 		break;
728 	case IXGBE_ACI_CAPS_NVM_VER:
729 		break;
730 	case IXGBE_ACI_CAPS_NVM_MGMT:
731 		caps->sec_rev_disabled =
732 			(number & IXGBE_NVM_MGMT_SEC_REV_DISABLED) ?
733 			true : false;
734 		caps->update_disabled =
735 			(number & IXGBE_NVM_MGMT_UPDATE_DISABLED) ?
736 			true : false;
737 		caps->nvm_unified_update =
738 			(number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
739 			true : false;
740 		caps->netlist_auth =
741 			(number & IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT) ?
742 			true : false;
743 		break;
744 	case IXGBE_ACI_CAPS_MAX_MTU:
745 		caps->max_mtu = number;
746 		break;
747 	case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
748 		caps->pcie_reset_avoidance = (number > 0);
749 		break;
750 	case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
751 		caps->reset_restrict_support = (number == 1);
752 		break;
753 	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
754 	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
755 	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
756 	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
757 	{
758 		u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
759 
760 		caps->ext_topo_dev_img_ver_high[index] = number;
761 		caps->ext_topo_dev_img_ver_low[index] = logical_id;
762 		caps->ext_topo_dev_img_part_num[index] =
763 			(phys_id & IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
764 			IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S;
765 		caps->ext_topo_dev_img_load_en[index] =
766 			(phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
767 		caps->ext_topo_dev_img_prog_en[index] =
768 			(phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
769 		break;
770 	}
771 	case IXGBE_ACI_CAPS_OROM_RECOVERY_UPDATE:
772 		caps->orom_recovery_update = (number == 1);
773 		break;
774 	case IXGBE_ACI_CAPS_NEXT_CLUSTER_ID:
775 		caps->next_cluster_id_support = (number == 1);
776 		DEBUGOUT2("%s: next_cluster_id_support = %d\n",
777 			  prefix, caps->next_cluster_id_support);
778 		break;
779 	default:
780 		/* Not one of the recognized common capabilities */
781 		found = false;
782 	}
783 
784 	return found;
785 }
786 
787 /**
788  * ixgbe_hweight8 - count set bits among the 8 lowest bits
789  * @w: variable storing set bits to count
790  *
791  * Return: the number of set bits among the 8 lowest bits in the provided value.
792  */
ixgbe_hweight8(u32 w)793 static u8 ixgbe_hweight8(u32 w)
794 {
795 	u8 hweight = 0, i;
796 
797 	for (i = 0; i < 8; i++)
798 		if (w & (1 << i))
799 			hweight++;
800 
801 	return hweight;
802 }
803 
804 /**
805  * ixgbe_hweight32 - count set bits among the 32 lowest bits
806  * @w: variable storing set bits to count
807  *
808  * Return: the number of set bits among the 32 lowest bits in the
809  * provided value.
810  */
ixgbe_hweight32(u32 w)811 static u8 ixgbe_hweight32(u32 w)
812 {
813 	u32 bitMask = 0x1, i;
814 	u8  bitCnt = 0;
815 
816 	for (i = 0; i < 32; i++)
817 	{
818 		if (w & bitMask)
819 			bitCnt++;
820 
821 		bitMask = bitMask << 0x1;
822 	}
823 
824 	return bitCnt;
825 }
826 
827 /**
828  * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
829  * @hw: pointer to the HW struct
830  * @dev_p: pointer to device capabilities structure
831  * @cap: capability element to parse
832  *
833  * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
834  */
835 static void
ixgbe_parse_valid_functions_cap(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)836 ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
837 				struct ixgbe_hw_dev_caps *dev_p,
838 				struct ixgbe_aci_cmd_list_caps_elem *cap)
839 {
840 	u32 number = IXGBE_LE32_TO_CPU(cap->number);
841 
842 	UNREFERENCED_1PARAMETER(hw);
843 
844 	dev_p->num_funcs = ixgbe_hweight32(number);
845 }
846 
847 /**
848  * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps
849  * @hw: pointer to the HW struct
850  * @dev_p: pointer to device capabilities structure
851  * @cap: capability element to parse
852  *
853  * Parse IXGBE_ACI_CAPS_VF for device capabilities.
854  */
ixgbe_parse_vf_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)855 static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
856 				    struct ixgbe_hw_dev_caps *dev_p,
857 				    struct ixgbe_aci_cmd_list_caps_elem *cap)
858 {
859 	u32 number = IXGBE_LE32_TO_CPU(cap->number);
860 
861 	UNREFERENCED_1PARAMETER(hw);
862 
863 	dev_p->num_vfs_exposed = number;
864 }
865 
866 /**
867  * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
868  * @hw: pointer to the HW struct
869  * @dev_p: pointer to device capabilities structure
870  * @cap: capability element to parse
871  *
872  * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
873  */
ixgbe_parse_vsi_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)874 static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
875 				     struct ixgbe_hw_dev_caps *dev_p,
876 				     struct ixgbe_aci_cmd_list_caps_elem *cap)
877 {
878 	u32 number = IXGBE_LE32_TO_CPU(cap->number);
879 
880 	UNREFERENCED_1PARAMETER(hw);
881 
882 	dev_p->num_vsi_allocd_to_host = number;
883 }
884 
885 /**
886  * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
887  * @hw: pointer to the HW struct
888  * @dev_p: pointer to device capabilities structure
889  * @cap: capability element to parse
890  *
891  * Parse IXGBE_ACI_CAPS_FD for device capabilities.
892  */
ixgbe_parse_fdir_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)893 static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
894 				      struct ixgbe_hw_dev_caps *dev_p,
895 				      struct ixgbe_aci_cmd_list_caps_elem *cap)
896 {
897 	u32 number = IXGBE_LE32_TO_CPU(cap->number);
898 
899 	UNREFERENCED_1PARAMETER(hw);
900 
901 	dev_p->num_flow_director_fltr = number;
902 }
903 
904 /**
905  * ixgbe_parse_dev_caps - Parse device capabilities
906  * @hw: pointer to the HW struct
907  * @dev_p: pointer to device capabilities structure
908  * @buf: buffer containing the device capability records
909  * @cap_count: the number of capabilities
910  *
911  * Helper device to parse device (0x000B) capabilities list. For
912  * capabilities shared between device and function, this relies on
913  * ixgbe_parse_common_caps.
914  *
915  * Loop through the list of provided capabilities and extract the relevant
916  * data into the device capabilities structured.
917  */
ixgbe_parse_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,void * buf,u32 cap_count)918 static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
919 				 struct ixgbe_hw_dev_caps *dev_p,
920 				 void *buf, u32 cap_count)
921 {
922 	struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
923 	u32 i;
924 
925 	cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
926 
927 	memset(dev_p, 0, sizeof(*dev_p));
928 
929 	for (i = 0; i < cap_count; i++) {
930 		u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
931 		bool found;
932 
933 		found = ixgbe_parse_common_caps(hw, &dev_p->common_cap,
934 					      &cap_resp[i], "dev caps");
935 
936 		switch (cap) {
937 		case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
938 			ixgbe_parse_valid_functions_cap(hw, dev_p,
939 							&cap_resp[i]);
940 			break;
941 		case IXGBE_ACI_CAPS_VF:
942 			ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
943 			break;
944 		case IXGBE_ACI_CAPS_VSI:
945 			ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
946 			break;
947 		case  IXGBE_ACI_CAPS_FD:
948 			ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
949 			break;
950 		default:
951 			/* Don't list common capabilities as unknown */
952 			if (!found)
953 				break;
954 		}
955 	}
956 
957 }
958 
959 /**
960  * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps
961  * @hw: pointer to the HW struct
962  * @func_p: pointer to function capabilities structure
963  * @cap: pointer to the capability element to parse
964  *
965  * Extract function capabilities for IXGBE_ACI_CAPS_VF.
966  */
ixgbe_parse_vf_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_p,struct ixgbe_aci_cmd_list_caps_elem * cap)967 static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
968 				     struct ixgbe_hw_func_caps *func_p,
969 				     struct ixgbe_aci_cmd_list_caps_elem *cap)
970 {
971 	u32 logical_id = IXGBE_LE32_TO_CPU(cap->logical_id);
972 	u32 number = IXGBE_LE32_TO_CPU(cap->number);
973 
974 	UNREFERENCED_1PARAMETER(hw);
975 
976 	func_p->num_allocd_vfs = number;
977 	func_p->vf_base_id = logical_id;
978 }
979 
980 /**
981  * ixgbe_get_num_per_func - determine number of resources per PF
982  * @hw: pointer to the HW structure
983  * @max: value to be evenly split between each PF
984  *
985  * Determine the number of valid functions by going through the bitmap returned
986  * from parsing capabilities and use this to calculate the number of resources
987  * per PF based on the max value passed in.
988  *
989  * Return: the number of resources per PF or 0, if no PH are available.
990  */
ixgbe_get_num_per_func(struct ixgbe_hw * hw,u32 max)991 static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
992 {
993 	u8 funcs;
994 
995 #define IXGBE_CAPS_VALID_FUNCS_M	0xFF
996 	funcs = ixgbe_hweight8(hw->dev_caps.common_cap.valid_functions &
997 			     IXGBE_CAPS_VALID_FUNCS_M);
998 
999 	if (!funcs)
1000 		return 0;
1001 
1002 	return max / funcs;
1003 }
1004 
1005 /**
1006  * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
1007  * @hw: pointer to the HW struct
1008  * @func_p: pointer to function capabilities structure
1009  * @cap: pointer to the capability element to parse
1010  *
1011  * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
1012  */
ixgbe_parse_vsi_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_p,struct ixgbe_aci_cmd_list_caps_elem * cap)1013 static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
1014 				      struct ixgbe_hw_func_caps *func_p,
1015 				      struct ixgbe_aci_cmd_list_caps_elem *cap)
1016 {
1017 	func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
1018 }
1019 
1020 /**
1021  * ixgbe_parse_func_caps - Parse function capabilities
1022  * @hw: pointer to the HW struct
1023  * @func_p: pointer to function capabilities structure
1024  * @buf: buffer containing the function capability records
1025  * @cap_count: the number of capabilities
1026  *
1027  * Helper function to parse function (0x000A) capabilities list. For
1028  * capabilities shared between device and function, this relies on
1029  * ixgbe_parse_common_caps.
1030  *
1031  * Loop through the list of provided capabilities and extract the relevant
1032  * data into the function capabilities structured.
1033  */
ixgbe_parse_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_p,void * buf,u32 cap_count)1034 static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
1035 				  struct ixgbe_hw_func_caps *func_p,
1036 				  void *buf, u32 cap_count)
1037 {
1038 	struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
1039 	u32 i;
1040 
1041 	cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
1042 
1043 	memset(func_p, 0, sizeof(*func_p));
1044 
1045 	for (i = 0; i < cap_count; i++) {
1046 		u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
1047 		ixgbe_parse_common_caps(hw, &func_p->common_cap,
1048 					&cap_resp[i], "func caps");
1049 
1050 		switch (cap) {
1051 		case IXGBE_ACI_CAPS_VF:
1052 			ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
1053 			break;
1054 		case IXGBE_ACI_CAPS_VSI:
1055 			ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
1056 			break;
1057 		default:
1058 			/* Don't list common capabilities as unknown */
1059 			break;
1060 		}
1061 	}
1062 
1063 }
1064 
1065 /**
1066  * ixgbe_aci_list_caps - query function/device capabilities
1067  * @hw: pointer to the HW struct
1068  * @buf: a buffer to hold the capabilities
1069  * @buf_size: size of the buffer
1070  * @cap_count: if not NULL, set to the number of capabilities reported
1071  * @opc: capabilities type to discover, device or function
1072  *
1073  * Get the function (0x000A) or device (0x000B) capabilities description from
1074  * firmware and store it in the buffer.
1075  *
1076  * If the cap_count pointer is not NULL, then it is set to the number of
1077  * capabilities firmware will report. Note that if the buffer size is too
1078  * small, it is possible the command will return IXGBE_ERR_OUT_OF_MEM. The
1079  * cap_count will still be updated in this case. It is recommended that the
1080  * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
1081  * buffer that firmware could return) to avoid this.
1082  *
1083  * Return: the exit code of the operation.
1084  * Exit code of IXGBE_ERR_OUT_OF_MEM means the buffer size is too small.
1085  */
ixgbe_aci_list_caps(struct ixgbe_hw * hw,void * buf,u16 buf_size,u32 * cap_count,enum ixgbe_aci_opc opc)1086 s32 ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
1087 			u32 *cap_count, enum ixgbe_aci_opc opc)
1088 {
1089 	struct ixgbe_aci_cmd_list_caps *cmd;
1090 	struct ixgbe_aci_desc desc;
1091 	s32 status;
1092 
1093 	cmd = &desc.params.get_cap;
1094 
1095 	if (opc != ixgbe_aci_opc_list_func_caps &&
1096 	    opc != ixgbe_aci_opc_list_dev_caps)
1097 		return IXGBE_ERR_PARAM;
1098 
1099 	ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
1100 	status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
1101 
1102 	if (cap_count)
1103 		*cap_count = IXGBE_LE32_TO_CPU(cmd->count);
1104 
1105 	return status;
1106 }
1107 
1108 /**
1109  * ixgbe_discover_dev_caps - Read and extract device capabilities
1110  * @hw: pointer to the hardware structure
1111  * @dev_caps: pointer to device capabilities structure
1112  *
1113  * Read the device capabilities and extract them into the dev_caps structure
1114  * for later use.
1115  *
1116  * Return: the exit code of the operation.
1117  */
ixgbe_discover_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_caps)1118 s32 ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
1119 			    struct ixgbe_hw_dev_caps *dev_caps)
1120 {
1121 	u32 status, cap_count = 0;
1122 	u8 *cbuf = NULL;
1123 
1124 	cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
1125 	if (!cbuf)
1126 		return IXGBE_ERR_OUT_OF_MEM;
1127 	/* Although the driver doesn't know the number of capabilities the
1128 	 * device will return, we can simply send a 4KB buffer, the maximum
1129 	 * possible size that firmware can return.
1130 	 */
1131 	cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
1132 		    sizeof(struct ixgbe_aci_cmd_list_caps_elem);
1133 
1134 	status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
1135 				     &cap_count,
1136 				     ixgbe_aci_opc_list_dev_caps);
1137 	if (!status)
1138 		ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
1139 
1140 	if (cbuf)
1141 		ixgbe_free(hw, cbuf);
1142 
1143 	return status;
1144 }
1145 
1146 /**
1147  * ixgbe_discover_func_caps - Read and extract function capabilities
1148  * @hw: pointer to the hardware structure
1149  * @func_caps: pointer to function capabilities structure
1150  *
1151  * Read the function capabilities and extract them into the func_caps structure
1152  * for later use.
1153  *
1154  * Return: the exit code of the operation.
1155  */
ixgbe_discover_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_caps)1156 s32 ixgbe_discover_func_caps(struct ixgbe_hw *hw,
1157 			     struct ixgbe_hw_func_caps *func_caps)
1158 {
1159 	u32 cap_count = 0;
1160 	u8 *cbuf = NULL;
1161 	s32 status;
1162 
1163 	cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
1164 	if(!cbuf)
1165 		return IXGBE_ERR_OUT_OF_MEM;
1166 	/* Although the driver doesn't know the number of capabilities the
1167 	 * device will return, we can simply send a 4KB buffer, the maximum
1168 	 * possible size that firmware can return.
1169 	 */
1170 	cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
1171 		    sizeof(struct ixgbe_aci_cmd_list_caps_elem);
1172 
1173 	status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
1174 				     &cap_count,
1175 				     ixgbe_aci_opc_list_func_caps);
1176 	if (!status)
1177 		ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
1178 
1179 	if (cbuf)
1180 		ixgbe_free(hw, cbuf);
1181 
1182 	return status;
1183 }
1184 
1185 /**
1186  * ixgbe_get_caps - get info about the HW
1187  * @hw: pointer to the hardware structure
1188  *
1189  * Retrieve both device and function capabilities.
1190  *
1191  * Return: the exit code of the operation.
1192  */
ixgbe_get_caps(struct ixgbe_hw * hw)1193 s32 ixgbe_get_caps(struct ixgbe_hw *hw)
1194 {
1195 	s32 status;
1196 
1197 	status = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
1198 	if (status)
1199 		return status;
1200 
1201 	return ixgbe_discover_func_caps(hw, &hw->func_caps);
1202 }
1203 
1204 /**
1205  * ixgbe_aci_disable_rxen - disable RX
1206  * @hw: pointer to the HW struct
1207  *
1208  * Request a safe disable of Receive Enable using ACI command (0x000C).
1209  *
1210  * Return: the exit code of the operation.
1211  */
ixgbe_aci_disable_rxen(struct ixgbe_hw * hw)1212 s32 ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
1213 {
1214 	struct ixgbe_aci_cmd_disable_rxen *cmd;
1215 	struct ixgbe_aci_desc desc;
1216 
1217 	UNREFERENCED_1PARAMETER(hw);
1218 
1219 	cmd = &desc.params.disable_rxen;
1220 
1221 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
1222 
1223 	cmd->lport_num = (u8)hw->bus.func;
1224 
1225 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1226 }
1227 
1228 /**
1229  * ixgbe_aci_get_phy_caps - returns PHY capabilities
1230  * @hw: pointer to the HW struct
1231  * @qual_mods: report qualified modules
1232  * @report_mode: report mode capabilities
1233  * @pcaps: structure for PHY capabilities to be filled
1234  *
1235  * Returns the various PHY capabilities supported on the Port
1236  * using ACI command (0x0600).
1237  *
1238  * Return: the exit code of the operation.
1239  */
ixgbe_aci_get_phy_caps(struct ixgbe_hw * hw,bool qual_mods,u8 report_mode,struct ixgbe_aci_cmd_get_phy_caps_data * pcaps)1240 s32 ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
1241 			   struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
1242 {
1243 	struct ixgbe_aci_cmd_get_phy_caps *cmd;
1244 	u16 pcaps_size = sizeof(*pcaps);
1245 	struct ixgbe_aci_desc desc;
1246 	s32 status;
1247 
1248 	cmd = &desc.params.get_phy;
1249 
1250 	if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
1251 		return IXGBE_ERR_PARAM;
1252 
1253 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
1254 
1255 	if (qual_mods)
1256 		cmd->param0 |= IXGBE_CPU_TO_LE16(IXGBE_ACI_GET_PHY_RQM);
1257 
1258 	cmd->param0 |= IXGBE_CPU_TO_LE16(report_mode);
1259 	status = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
1260 
1261 	if (status == IXGBE_SUCCESS &&
1262 	    report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
1263 		hw->phy.phy_type_low = IXGBE_LE64_TO_CPU(pcaps->phy_type_low);
1264 		hw->phy.phy_type_high = IXGBE_LE64_TO_CPU(pcaps->phy_type_high);
1265 		memcpy(hw->link.link_info.module_type, &pcaps->module_type,
1266 			   sizeof(hw->link.link_info.module_type));
1267 	}
1268 
1269 	return status;
1270 }
1271 
1272 /**
1273  * ixgbe_phy_caps_equals_cfg - check if capabilities match the PHY config
1274  * @phy_caps: PHY capabilities
1275  * @phy_cfg: PHY configuration
1276  *
1277  * Helper function to determine if PHY capabilities match PHY
1278  * configuration
1279  *
1280  * Return: true if PHY capabilities match PHY configuration.
1281  */
1282 bool
ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data * phy_caps,struct ixgbe_aci_cmd_set_phy_cfg_data * phy_cfg)1283 ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *phy_caps,
1284 			  struct ixgbe_aci_cmd_set_phy_cfg_data *phy_cfg)
1285 {
1286 	u8 caps_mask, cfg_mask;
1287 
1288 	if (!phy_caps || !phy_cfg)
1289 		return false;
1290 
1291 	/* These bits are not common between capabilities and configuration.
1292 	 * Do not use them to determine equality.
1293 	 */
1294 	caps_mask = IXGBE_ACI_PHY_CAPS_MASK & ~(IXGBE_ACI_PHY_AN_MODE |
1295 					      IXGBE_ACI_PHY_EN_MOD_QUAL);
1296 	cfg_mask = IXGBE_ACI_PHY_ENA_VALID_MASK &
1297 		   ~IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
1298 
1299 	if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
1300 	    phy_caps->phy_type_high != phy_cfg->phy_type_high ||
1301 	    ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
1302 	    phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
1303 	    phy_caps->eee_cap != phy_cfg->eee_cap ||
1304 	    phy_caps->eeer_value != phy_cfg->eeer_value ||
1305 	    phy_caps->link_fec_options != phy_cfg->link_fec_opt)
1306 		return false;
1307 
1308 	return true;
1309 }
1310 
1311 /**
1312  * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
1313  * @caps: PHY ability structure to copy data from
1314  * @cfg: PHY configuration structure to copy data to
1315  *
1316  * Helper function to copy data from PHY capabilities data structure
1317  * to PHY configuration data structure
1318  */
ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data * caps,struct ixgbe_aci_cmd_set_phy_cfg_data * cfg)1319 void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
1320 				struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
1321 {
1322 	if (!caps || !cfg)
1323 		return;
1324 
1325 	memset(cfg, 0, sizeof(*cfg));
1326 	cfg->phy_type_low = caps->phy_type_low;
1327 	cfg->phy_type_high = caps->phy_type_high;
1328 	cfg->caps = caps->caps;
1329 	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
1330 	cfg->eee_cap = caps->eee_cap;
1331 	cfg->eeer_value = caps->eeer_value;
1332 	cfg->link_fec_opt = caps->link_fec_options;
1333 	cfg->module_compliance_enforcement =
1334 		caps->module_compliance_enforcement;
1335 }
1336 
1337 /**
1338  * ixgbe_aci_set_phy_cfg - set PHY configuration
1339  * @hw: pointer to the HW struct
1340  * @cfg: structure with PHY configuration data to be set
1341  *
1342  * Set the various PHY configuration parameters supported on the Port
1343  * using ACI command (0x0601).
1344  * One or more of the Set PHY config parameters may be ignored in an MFP
1345  * mode as the PF may not have the privilege to set some of the PHY Config
1346  * parameters.
1347  *
1348  * Return: the exit code of the operation.
1349  */
ixgbe_aci_set_phy_cfg(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_set_phy_cfg_data * cfg)1350 s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
1351 			  struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
1352 {
1353 	struct ixgbe_aci_desc desc;
1354 	s32 status;
1355 
1356 	if (!cfg)
1357 		return IXGBE_ERR_PARAM;
1358 
1359 	/* Ensure that only valid bits of cfg->caps can be turned on. */
1360 	if (cfg->caps & ~IXGBE_ACI_PHY_ENA_VALID_MASK) {
1361 		cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
1362 	}
1363 
1364 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
1365 	desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
1366 
1367 	status = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
1368 
1369 	if (!status)
1370 		hw->phy.curr_user_phy_cfg = *cfg;
1371 
1372 	return status;
1373 }
1374 
1375 /**
1376  * ixgbe_aci_set_link_restart_an - set up link and restart AN
1377  * @hw: pointer to the HW struct
1378  * @ena_link: if true: enable link, if false: disable link
1379  *
1380  * Function sets up the link and restarts the Auto-Negotiation over the link.
1381  *
1382  * Return: the exit code of the operation.
1383  */
ixgbe_aci_set_link_restart_an(struct ixgbe_hw * hw,bool ena_link)1384 s32 ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
1385 {
1386 	struct ixgbe_aci_cmd_restart_an *cmd;
1387 	struct ixgbe_aci_desc desc;
1388 
1389 	cmd = &desc.params.restart_an;
1390 
1391 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
1392 
1393 	cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
1394 	if (ena_link)
1395 		cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
1396 	else
1397 		cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
1398 
1399 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1400 }
1401 
1402 /**
1403  * ixgbe_is_media_cage_present - check if media cage is present
1404  * @hw: pointer to the HW struct
1405  *
1406  * Identify presence of media cage using the ACI command (0x06E0).
1407  *
1408  * Return: true if media cage is present, else false. If no cage, then
1409  * media type is backplane or BASE-T.
1410  */
ixgbe_is_media_cage_present(struct ixgbe_hw * hw)1411 static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
1412 {
1413 	struct ixgbe_aci_cmd_get_link_topo *cmd;
1414 	struct ixgbe_aci_desc desc;
1415 
1416 	cmd = &desc.params.get_link_topo;
1417 
1418 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
1419 
1420 	cmd->addr.topo_params.node_type_ctx =
1421 		(IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT <<
1422 		 IXGBE_ACI_LINK_TOPO_NODE_CTX_S);
1423 
1424 	/* set node type */
1425 	cmd->addr.topo_params.node_type_ctx |=
1426 		(IXGBE_ACI_LINK_TOPO_NODE_TYPE_M &
1427 		 IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE);
1428 
1429 	/* Node type cage can be used to determine if cage is present. If AQC
1430 	 * returns error (ENOENT), then no cage present. If no cage present then
1431 	 * connection type is backplane or BASE-T.
1432 	 */
1433 	return ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
1434 }
1435 
1436 /**
1437  * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
1438  * @hw: pointer to the HW struct
1439  *
1440  * Try to identify the media type based on the phy type.
1441  * If more than one media type, the ixgbe_media_type_unknown is returned.
1442  * First, phy_type_low is checked, then phy_type_high.
1443  * If none are identified, the ixgbe_media_type_unknown is returned
1444  *
1445  * Return: type of a media based on phy type in form of enum.
1446  */
1447 static enum ixgbe_media_type
ixgbe_get_media_type_from_phy_type(struct ixgbe_hw * hw)1448 ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
1449 {
1450 	struct ixgbe_link_status *hw_link_info;
1451 
1452 	if (!hw)
1453 		return ixgbe_media_type_unknown;
1454 
1455 	hw_link_info = &hw->link.link_info;
1456 	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
1457 		/* If more than one media type is selected, report unknown */
1458 		return ixgbe_media_type_unknown;
1459 
1460 	if (hw_link_info->phy_type_low) {
1461 		/* 1G SGMII is a special case where some DA cable PHYs
1462 		 * may show this as an option when it really shouldn't
1463 		 * be since SGMII is meant to be between a MAC and a PHY
1464 		 * in a backplane. Try to detect this case and handle it
1465 		 */
1466 		if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
1467 		    (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
1468 		    IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
1469 		    hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
1470 		    IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
1471 			return ixgbe_media_type_da;
1472 
1473 		switch (hw_link_info->phy_type_low) {
1474 		case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
1475 		case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
1476 		case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
1477 		case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
1478 			return ixgbe_media_type_fiber;
1479 		case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
1480 			return ixgbe_media_type_fiber;
1481 		case IXGBE_PHY_TYPE_LOW_100BASE_TX:
1482 		case IXGBE_PHY_TYPE_LOW_1000BASE_T:
1483 		case IXGBE_PHY_TYPE_LOW_2500BASE_T:
1484 		case IXGBE_PHY_TYPE_LOW_5GBASE_T:
1485 		case IXGBE_PHY_TYPE_LOW_10GBASE_T:
1486 			return ixgbe_media_type_copper;
1487 		case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
1488 			return ixgbe_media_type_da;
1489 		case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
1490 		case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
1491 		case IXGBE_PHY_TYPE_LOW_2500BASE_X:
1492 		case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
1493 		case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
1494 		case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
1495 			return ixgbe_media_type_backplane;
1496 		}
1497 	} else {
1498 		switch (hw_link_info->phy_type_high) {
1499 		case IXGBE_PHY_TYPE_HIGH_10BASE_T:
1500 			return ixgbe_media_type_copper;
1501 		}
1502 	}
1503 	return ixgbe_media_type_unknown;
1504 }
1505 
1506 /**
1507  * ixgbe_update_link_info - update status of the HW network link
1508  * @hw: pointer to the HW struct
1509  *
1510  * Update the status of the HW network link.
1511  *
1512  * Return: the exit code of the operation.
1513  */
ixgbe_update_link_info(struct ixgbe_hw * hw)1514 s32 ixgbe_update_link_info(struct ixgbe_hw *hw)
1515 {
1516 	struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
1517 	struct ixgbe_link_status *li;
1518 	s32 status;
1519 
1520 	if (!hw)
1521 		return IXGBE_ERR_PARAM;
1522 
1523 	li = &hw->link.link_info;
1524 
1525 	status = ixgbe_aci_get_link_info(hw, true, NULL);
1526 	if (status)
1527 		return status;
1528 
1529 	if (li->link_info & IXGBE_ACI_MEDIA_AVAILABLE) {
1530 		pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
1531 			ixgbe_malloc(hw, sizeof(*pcaps));
1532 		if (!pcaps)
1533 			return IXGBE_ERR_OUT_OF_MEM;
1534 
1535 		status = ixgbe_aci_get_phy_caps(hw, false,
1536 						IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
1537 						pcaps);
1538 
1539 		if (status == IXGBE_SUCCESS)
1540 			memcpy(li->module_type, &pcaps->module_type,
1541 			       sizeof(li->module_type));
1542 
1543 		ixgbe_free(hw, pcaps);
1544 	}
1545 
1546 	return status;
1547 }
1548 
1549 /**
1550  * ixgbe_get_link_status - get status of the HW network link
1551  * @hw: pointer to the HW struct
1552  * @link_up: pointer to bool (true/false = linkup/linkdown)
1553  *
1554  * Variable link_up is true if link is up, false if link is down.
1555  * The variable link_up is invalid if status is non zero. As a
1556  * result of this call, link status reporting becomes enabled
1557  *
1558  * Return: the exit code of the operation.
1559  */
ixgbe_get_link_status(struct ixgbe_hw * hw,bool * link_up)1560 s32 ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
1561 {
1562 	s32 status = IXGBE_SUCCESS;
1563 
1564 	if (!hw || !link_up)
1565 		return IXGBE_ERR_PARAM;
1566 
1567 	if (hw->link.get_link_info) {
1568 		status = ixgbe_update_link_info(hw);
1569 		if (status) {
1570 			return status;
1571 		}
1572 	}
1573 
1574 	*link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
1575 
1576 	return status;
1577 }
1578 
1579 /**
1580  * ixgbe_aci_get_link_info - get the link status
1581  * @hw: pointer to the HW struct
1582  * @ena_lse: enable/disable LinkStatusEvent reporting
1583  * @link: pointer to link status structure - optional
1584  *
1585  * Get the current Link Status using ACI command (0x607).
1586  * The current link can be optionally provided to update
1587  * the status.
1588  *
1589  * Return: the link status of the adapter.
1590  */
ixgbe_aci_get_link_info(struct ixgbe_hw * hw,bool ena_lse,struct ixgbe_link_status * link)1591 s32 ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
1592 			    struct ixgbe_link_status *link)
1593 {
1594 	struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
1595 	struct ixgbe_aci_cmd_get_link_status *resp;
1596 	struct ixgbe_link_status *li_old, *li;
1597 	struct ixgbe_fc_info *hw_fc_info;
1598 	struct ixgbe_aci_desc desc;
1599 	bool tx_pause, rx_pause;
1600 	u8 cmd_flags;
1601 	s32 status;
1602 
1603 	if (!hw)
1604 		return IXGBE_ERR_PARAM;
1605 
1606 	li_old = &hw->link.link_info_old;
1607 	li = &hw->link.link_info;
1608 	hw_fc_info = &hw->fc;
1609 
1610 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
1611 	cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
1612 	resp = &desc.params.get_link_status;
1613 	resp->cmd_flags = cmd_flags;
1614 
1615 	status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
1616 
1617 	if (status != IXGBE_SUCCESS)
1618 		return status;
1619 
1620 	/* save off old link status information */
1621 	*li_old = *li;
1622 
1623 	/* update current link status information */
1624 	li->link_speed = IXGBE_LE16_TO_CPU(link_data.link_speed);
1625 	li->phy_type_low = IXGBE_LE64_TO_CPU(link_data.phy_type_low);
1626 	li->phy_type_high = IXGBE_LE64_TO_CPU(link_data.phy_type_high);
1627 	li->link_info = link_data.link_info;
1628 	li->link_cfg_err = link_data.link_cfg_err;
1629 	li->an_info = link_data.an_info;
1630 	li->ext_info = link_data.ext_info;
1631 	li->max_frame_size = IXGBE_LE16_TO_CPU(link_data.max_frame_size);
1632 	li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
1633 	li->topo_media_conflict = link_data.topo_media_conflict;
1634 	li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
1635 				      IXGBE_ACI_CFG_PACING_TYPE_M);
1636 
1637 	/* update fc info */
1638 	tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
1639 	rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
1640 	if (tx_pause && rx_pause)
1641 		hw_fc_info->current_mode = ixgbe_fc_full;
1642 	else if (tx_pause)
1643 		hw_fc_info->current_mode = ixgbe_fc_tx_pause;
1644 	else if (rx_pause)
1645 		hw_fc_info->current_mode = ixgbe_fc_rx_pause;
1646 	else
1647 		hw_fc_info->current_mode = ixgbe_fc_none;
1648 
1649 	li->lse_ena = !!(resp->cmd_flags & IXGBE_ACI_LSE_IS_ENABLED);
1650 
1651 	/* save link status information */
1652 	if (link)
1653 		*link = *li;
1654 
1655 	/* flag cleared so calling functions don't call AQ again */
1656 	hw->link.get_link_info = false;
1657 
1658 	return IXGBE_SUCCESS;
1659 }
1660 
1661 /**
1662  * ixgbe_aci_set_event_mask - set event mask
1663  * @hw: pointer to the HW struct
1664  * @port_num: port number of the physical function
1665  * @mask: event mask to be set
1666  *
1667  * Set the event mask using ACI command (0x0613).
1668  *
1669  * Return: the exit code of the operation.
1670  */
ixgbe_aci_set_event_mask(struct ixgbe_hw * hw,u8 port_num,u16 mask)1671 s32 ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
1672 {
1673 	struct ixgbe_aci_cmd_set_event_mask *cmd;
1674 	struct ixgbe_aci_desc desc;
1675 
1676 	cmd = &desc.params.set_event_mask;
1677 
1678 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
1679 
1680 	cmd->event_mask = IXGBE_CPU_TO_LE16(mask);
1681 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1682 }
1683 
1684 /**
1685  * ixgbe_configure_lse - enable/disable link status events
1686  * @hw: pointer to the HW struct
1687  * @activate: bool value deciding if lse should be enabled nor disabled
1688  * @mask: event mask to be set; a set bit means deactivation of the
1689  * corresponding event
1690  *
1691  * Set the event mask and then enable or disable link status events
1692  *
1693  * Return: the exit code of the operation.
1694  */
ixgbe_configure_lse(struct ixgbe_hw * hw,bool activate,u16 mask)1695 s32 ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
1696 {
1697 	s32 rc;
1698 
1699 	rc = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
1700 	if (rc) {
1701 		return rc;
1702 	}
1703 
1704 	/* Enabling link status events generation by fw */
1705 	rc = ixgbe_aci_get_link_info(hw, activate, NULL);
1706 	if (rc) {
1707 		return rc;
1708 	}
1709 	return IXGBE_SUCCESS;
1710 }
1711 
1712 /**
1713  * ixgbe_aci_get_netlist_node - get a node handle
1714  * @hw: pointer to the hw struct
1715  * @cmd: get_link_topo AQ structure
1716  * @node_part_number: output node part number if node found
1717  * @node_handle: output node handle parameter if node found
1718  *
1719  * Get the netlist node and assigns it to
1720  * the provided handle using ACI command (0x06E0).
1721  *
1722  * Return: the exit code of the operation.
1723  */
ixgbe_aci_get_netlist_node(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_get_link_topo * cmd,u8 * node_part_number,u16 * node_handle)1724 s32 ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
1725 			       struct ixgbe_aci_cmd_get_link_topo *cmd,
1726 			       u8 *node_part_number, u16 *node_handle)
1727 {
1728 	struct ixgbe_aci_desc desc;
1729 
1730 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
1731 	desc.params.get_link_topo = *cmd;
1732 
1733 	if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
1734 		return IXGBE_ERR_NOT_SUPPORTED;
1735 
1736 	if (node_handle)
1737 		*node_handle =
1738 			IXGBE_LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
1739 	if (node_part_number)
1740 		*node_part_number = desc.params.get_link_topo.node_part_num;
1741 
1742 	return IXGBE_SUCCESS;
1743 }
1744 
1745 /**
1746  * ixgbe_find_netlist_node - find a node handle
1747  * @hw: pointer to the hw struct
1748  * @node_type_ctx: type of netlist node to look for
1749  * @node_part_number: node part number to look for
1750  * @node_handle: output parameter if node found - optional
1751  *
1752  * Find and return the node handle for a given node type and part number in the
1753  * netlist. When found IXGBE_SUCCESS is returned, IXGBE_ERR_NOT_SUPPORTED
1754  * otherwise. If @node_handle provided, it would be set to found node handle.
1755  *
1756  * Return: the exit code of the operation.
1757  */
ixgbe_find_netlist_node(struct ixgbe_hw * hw,u8 node_type_ctx,u8 node_part_number,u16 * node_handle)1758 s32 ixgbe_find_netlist_node(struct ixgbe_hw *hw, u8 node_type_ctx,
1759 			    u8 node_part_number, u16 *node_handle)
1760 {
1761 	struct ixgbe_aci_cmd_get_link_topo cmd;
1762 	u8 rec_node_part_number;
1763 	u16 rec_node_handle;
1764 	s32 status;
1765 	u8 idx;
1766 
1767 	for (idx = 0; idx < IXGBE_MAX_NETLIST_SIZE; idx++) {
1768 		memset(&cmd, 0, sizeof(cmd));
1769 
1770 		cmd.addr.topo_params.node_type_ctx =
1771 			(node_type_ctx << IXGBE_ACI_LINK_TOPO_NODE_TYPE_S);
1772 		cmd.addr.topo_params.index = idx;
1773 
1774 		status = ixgbe_aci_get_netlist_node(hw, &cmd,
1775 						    &rec_node_part_number,
1776 						    &rec_node_handle);
1777 		if (status)
1778 			return status;
1779 
1780 		if (rec_node_part_number == node_part_number) {
1781 			if (node_handle)
1782 				*node_handle = rec_node_handle;
1783 			return IXGBE_SUCCESS;
1784 		}
1785 	}
1786 
1787 	return IXGBE_ERR_NOT_SUPPORTED;
1788 }
1789 
1790 /**
1791  * ixgbe_aci_read_i2c - read I2C register value
1792  * @hw: pointer to the hw struct
1793  * @topo_addr: topology address for a device to communicate with
1794  * @bus_addr: 7-bit I2C bus address
1795  * @addr: I2C memory address (I2C offset) with up to 16 bits
1796  * @params: I2C parameters: bit [7] - Repeated start,
1797  *				      bits [6:5] data offset size,
1798  *			    bit [4] - I2C address type, bits [3:0] - data size
1799  *				      to read (0-16 bytes)
1800  * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
1801  *
1802  * Read the value of the I2C pin register using ACI command (0x06E2).
1803  *
1804  * Return: the exit code of the operation.
1805  */
ixgbe_aci_read_i2c(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,u8 * data)1806 s32 ixgbe_aci_read_i2c(struct ixgbe_hw *hw,
1807 		       struct ixgbe_aci_cmd_link_topo_addr topo_addr,
1808 		       u16 bus_addr, __le16 addr, u8 params, u8 *data)
1809 {
1810 	struct ixgbe_aci_desc desc = { 0 };
1811 	struct ixgbe_aci_cmd_i2c *cmd;
1812 	u8 data_size;
1813 	s32 status;
1814 
1815 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_i2c);
1816 	cmd = &desc.params.read_write_i2c;
1817 
1818 	if (!data)
1819 		return IXGBE_ERR_PARAM;
1820 
1821 	data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
1822 		    IXGBE_ACI_I2C_DATA_SIZE_S;
1823 
1824 	cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
1825 	cmd->topo_addr = topo_addr;
1826 	cmd->i2c_params = params;
1827 	cmd->i2c_addr = addr;
1828 
1829 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1830 	if (!status) {
1831 		struct ixgbe_aci_cmd_read_i2c_resp *resp;
1832 		u8 i;
1833 
1834 		resp = &desc.params.read_i2c_resp;
1835 		for (i = 0; i < data_size; i++) {
1836 			*data = resp->i2c_data[i];
1837 			data++;
1838 		}
1839 	}
1840 
1841 	return status;
1842 }
1843 
1844 /**
1845  * ixgbe_aci_write_i2c - write a value to I2C register
1846  * @hw: pointer to the hw struct
1847  * @topo_addr: topology address for a device to communicate with
1848  * @bus_addr: 7-bit I2C bus address
1849  * @addr: I2C memory address (I2C offset) with up to 16 bits
1850  * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size
1851  *				      to write (0-7 bytes)
1852  * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
1853  *
1854  * Write a value to the I2C pin register using ACI command (0x06E3).
1855  *
1856  * Return: the exit code of the operation.
1857  */
ixgbe_aci_write_i2c(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,u8 * data)1858 s32 ixgbe_aci_write_i2c(struct ixgbe_hw *hw,
1859 			struct ixgbe_aci_cmd_link_topo_addr topo_addr,
1860 			u16 bus_addr, __le16 addr, u8 params, u8 *data)
1861 {
1862 	struct ixgbe_aci_desc desc = { 0 };
1863 	struct ixgbe_aci_cmd_i2c *cmd;
1864 	u8 i, data_size;
1865 
1866 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_i2c);
1867 	cmd = &desc.params.read_write_i2c;
1868 
1869 	data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
1870 		    IXGBE_ACI_I2C_DATA_SIZE_S;
1871 
1872 	/* data_size limited to 4 */
1873 	if (data_size > 4)
1874 		return IXGBE_ERR_PARAM;
1875 
1876 	cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
1877 	cmd->topo_addr = topo_addr;
1878 	cmd->i2c_params = params;
1879 	cmd->i2c_addr = addr;
1880 
1881 	for (i = 0; i < data_size; i++) {
1882 		cmd->i2c_data[i] = *data;
1883 		data++;
1884 	}
1885 
1886 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1887 }
1888 
1889 /**
1890  * ixgbe_aci_set_port_id_led - set LED value for the given port
1891  * @hw: pointer to the HW struct
1892  * @orig_mode: set LED original mode
1893  *
1894  * Set LED value for the given port (0x06E9)
1895  *
1896  * Return: the exit code of the operation.
1897  */
ixgbe_aci_set_port_id_led(struct ixgbe_hw * hw,bool orig_mode)1898 s32 ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode)
1899 {
1900 	struct ixgbe_aci_cmd_set_port_id_led *cmd;
1901 	struct ixgbe_aci_desc desc;
1902 
1903 	cmd = &desc.params.set_port_id_led;
1904 
1905 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led);
1906 
1907 	cmd->lport_num = (u8)hw->bus.func;
1908 	cmd->lport_num_valid = IXGBE_ACI_PORT_ID_PORT_NUM_VALID;
1909 
1910 	if (orig_mode)
1911 		cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_ORIG;
1912 	else
1913 		cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_BLINK;
1914 
1915 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1916 }
1917 
1918 /**
1919  * ixgbe_aci_set_gpio - set GPIO pin state
1920  * @hw: pointer to the hw struct
1921  * @gpio_ctrl_handle: GPIO controller node handle
1922  * @pin_idx: IO Number of the GPIO that needs to be set
1923  * @value: SW provide IO value to set in the LSB
1924  *
1925  * Set the GPIO pin state that is a part of the topology
1926  * using ACI command (0x06EC).
1927  *
1928  * Return: the exit code of the operation.
1929  */
ixgbe_aci_set_gpio(struct ixgbe_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool value)1930 s32 ixgbe_aci_set_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
1931 		       bool value)
1932 {
1933 	struct ixgbe_aci_cmd_gpio *cmd;
1934 	struct ixgbe_aci_desc desc;
1935 
1936 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_gpio);
1937 	cmd = &desc.params.read_write_gpio;
1938 	cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
1939 	cmd->gpio_num = pin_idx;
1940 	cmd->gpio_val = value ? 1 : 0;
1941 
1942 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1943 }
1944 
1945 /**
1946  * ixgbe_aci_get_gpio - get GPIO pin state
1947  * @hw: pointer to the hw struct
1948  * @gpio_ctrl_handle: GPIO controller node handle
1949  * @pin_idx: IO Number of the GPIO that needs to be set
1950  * @value: IO value read
1951  *
1952  * Get the value of a GPIO signal which is part of the topology
1953  * using ACI command (0x06ED).
1954  *
1955  * Return: the exit code of the operation.
1956  */
ixgbe_aci_get_gpio(struct ixgbe_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool * value)1957 s32 ixgbe_aci_get_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
1958 		       bool *value)
1959 {
1960 	struct ixgbe_aci_cmd_gpio *cmd;
1961 	struct ixgbe_aci_desc desc;
1962 	s32 status;
1963 
1964 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_gpio);
1965 	cmd = &desc.params.read_write_gpio;
1966 	cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
1967 	cmd->gpio_num = pin_idx;
1968 
1969 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1970 	if (status)
1971 		return status;
1972 
1973 	*value = !!cmd->gpio_val;
1974 	return IXGBE_SUCCESS;
1975 }
1976 
1977 /**
1978  * ixgbe_aci_sff_eeprom - read/write SFF EEPROM
1979  * @hw: pointer to the HW struct
1980  * @lport: bits [7:0] = logical port, bit [8] = logical port valid
1981  * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
1982  * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
1983  * @page: QSFP page
1984  * @page_bank_ctrl: configuration of SFF/CMIS paging and banking control
1985  * @data: pointer to data buffer to be read/written to the I2C device.
1986  * @length: 1-16 for read, 1 for write.
1987  * @write: 0 read, 1 for write.
1988  *
1989  * Read/write SFF EEPROM using ACI command (0x06EE).
1990  *
1991  * Return: the exit code of the operation.
1992  */
ixgbe_aci_sff_eeprom(struct ixgbe_hw * hw,u16 lport,u8 bus_addr,u16 mem_addr,u8 page,u8 page_bank_ctrl,u8 * data,u8 length,bool write)1993 s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr,
1994 			 u16 mem_addr, u8 page, u8 page_bank_ctrl, u8 *data,
1995 			 u8 length, bool write)
1996 {
1997 	struct ixgbe_aci_cmd_sff_eeprom *cmd;
1998 	struct ixgbe_aci_desc desc;
1999 	s32 status;
2000 
2001 	if (!data || (mem_addr & 0xff00))
2002 		return IXGBE_ERR_PARAM;
2003 
2004 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_sff_eeprom);
2005 	cmd = &desc.params.read_write_sff_param;
2006 	desc.flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
2007 	cmd->lport_num = (u8)(lport & 0xff);
2008 	cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
2009 	cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(((bus_addr >> 1) &
2010 					 IXGBE_ACI_SFF_I2CBUS_7BIT_M) |
2011 					((page_bank_ctrl <<
2012 					  IXGBE_ACI_SFF_PAGE_BANK_CTRL_S) &
2013 					 IXGBE_ACI_SFF_PAGE_BANK_CTRL_M));
2014 	cmd->i2c_offset = IXGBE_CPU_TO_LE16(mem_addr & 0xff);
2015 	cmd->module_page = page;
2016 	if (write)
2017 		cmd->i2c_bus_addr |= IXGBE_CPU_TO_LE16(IXGBE_ACI_SFF_IS_WRITE);
2018 
2019 	status = ixgbe_aci_send_cmd(hw, &desc, data, length);
2020 	return status;
2021 }
2022 
2023 /**
2024  * ixgbe_aci_prog_topo_dev_nvm - program Topology Device NVM
2025  * @hw: pointer to the hardware structure
2026  * @topo_params: pointer to structure storing topology parameters for a device
2027  *
2028  * Program Topology Device NVM using ACI command (0x06F2).
2029  *
2030  * Return: the exit code of the operation.
2031  */
ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_link_topo_params * topo_params)2032 s32 ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw *hw,
2033 			struct ixgbe_aci_cmd_link_topo_params *topo_params)
2034 {
2035 	struct ixgbe_aci_cmd_prog_topo_dev_nvm *cmd;
2036 	struct ixgbe_aci_desc desc;
2037 
2038 	cmd = &desc.params.prog_topo_dev_nvm;
2039 
2040 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_prog_topo_dev_nvm);
2041 
2042 	memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
2043 
2044 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2045 }
2046 
2047 /**
2048  * ixgbe_aci_read_topo_dev_nvm - read Topology Device NVM
2049  * @hw: pointer to the hardware structure
2050  * @topo_params: pointer to structure storing topology parameters for a device
2051  * @start_address: byte offset in the topology device NVM
2052  * @data: pointer to data buffer
2053  * @data_size: number of bytes to be read from the topology device NVM
2054  * Read Topology Device NVM (0x06F3)
2055  *
2056  * Read Topology of Device NVM using ACI command (0x06F3).
2057  *
2058  * Return: the exit code of the operation.
2059  */
ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_link_topo_params * topo_params,u32 start_address,u8 * data,u8 data_size)2060 s32 ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw *hw,
2061 			struct ixgbe_aci_cmd_link_topo_params *topo_params,
2062 			u32 start_address, u8 *data, u8 data_size)
2063 {
2064 	struct ixgbe_aci_cmd_read_topo_dev_nvm *cmd;
2065 	struct ixgbe_aci_desc desc;
2066 	s32 status;
2067 
2068 	if (!data || data_size == 0 ||
2069 	    data_size > IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
2070 		return IXGBE_ERR_PARAM;
2071 
2072 	cmd = &desc.params.read_topo_dev_nvm;
2073 
2074 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_topo_dev_nvm);
2075 
2076 	desc.datalen = IXGBE_CPU_TO_LE16(data_size);
2077 	memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
2078 	cmd->start_address = IXGBE_CPU_TO_LE32(start_address);
2079 
2080 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2081 	if (status)
2082 		return status;
2083 
2084 	memcpy(data, cmd->data_read, data_size);
2085 
2086 	return IXGBE_SUCCESS;
2087 }
2088 
2089 /**
2090  * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
2091  * @hw: pointer to the HW structure
2092  * @access: NVM access type (read or write)
2093  *
2094  * Request NVM ownership.
2095  *
2096  * Return: the exit code of the operation.
2097  */
ixgbe_acquire_nvm(struct ixgbe_hw * hw,enum ixgbe_aci_res_access_type access)2098 s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw,
2099 		      enum ixgbe_aci_res_access_type access)
2100 {
2101 	u32 fla;
2102 
2103 	/* Skip if we are in blank NVM programming mode */
2104 	fla = IXGBE_READ_REG(hw, GLNVM_FLA);
2105 	if ((fla & GLNVM_FLA_LOCKED_M) == 0)
2106 		return IXGBE_SUCCESS;
2107 
2108 	return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
2109 				 IXGBE_NVM_TIMEOUT);
2110 }
2111 
2112 /**
2113  * ixgbe_release_nvm - Generic request for releasing the NVM ownership
2114  * @hw: pointer to the HW structure
2115  *
2116  * Release NVM ownership.
2117  */
ixgbe_release_nvm(struct ixgbe_hw * hw)2118 void ixgbe_release_nvm(struct ixgbe_hw *hw)
2119 {
2120 	u32 fla;
2121 
2122 	/* Skip if we are in blank NVM programming mode */
2123 	fla = IXGBE_READ_REG(hw, GLNVM_FLA);
2124 	if ((fla & GLNVM_FLA_LOCKED_M) == 0)
2125 		return;
2126 
2127 	ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
2128 }
2129 
2130 
2131 /**
2132  * ixgbe_aci_read_nvm - read NVM
2133  * @hw: pointer to the HW struct
2134  * @module_typeid: module pointer location in words from the NVM beginning
2135  * @offset: byte offset from the module beginning
2136  * @length: length of the section to be read (in bytes from the offset)
2137  * @data: command buffer (size [bytes] = length)
2138  * @last_command: tells if this is the last command in a series
2139  * @read_shadow_ram: tell if this is a shadow RAM read
2140  *
2141  * Read the NVM using ACI command (0x0701).
2142  *
2143  * Return: the exit code of the operation.
2144  */
ixgbe_aci_read_nvm(struct ixgbe_hw * hw,u16 module_typeid,u32 offset,u16 length,void * data,bool last_command,bool read_shadow_ram)2145 s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
2146 		       u16 length, void *data, bool last_command,
2147 		       bool read_shadow_ram)
2148 {
2149 	struct ixgbe_aci_desc desc;
2150 	struct ixgbe_aci_cmd_nvm *cmd;
2151 
2152 	cmd = &desc.params.nvm;
2153 
2154 	if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
2155 		return IXGBE_ERR_PARAM;
2156 
2157 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
2158 
2159 	if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
2160 		cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
2161 
2162 	/* If this is the last command in a series, set the proper flag. */
2163 	if (last_command)
2164 		cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
2165 	cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
2166 	cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
2167 	cmd->offset_high = (offset >> 16) & 0xFF;
2168 	cmd->length = IXGBE_CPU_TO_LE16(length);
2169 
2170 	return ixgbe_aci_send_cmd(hw, &desc, data, length);
2171 }
2172 
2173 /**
2174  * ixgbe_aci_erase_nvm - erase NVM sector
2175  * @hw: pointer to the HW struct
2176  * @module_typeid: module pointer location in words from the NVM beginning
2177  *
2178  * Erase the NVM sector using the ACI command (0x0702).
2179  *
2180  * Return: the exit code of the operation.
2181  */
ixgbe_aci_erase_nvm(struct ixgbe_hw * hw,u16 module_typeid)2182 s32 ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid)
2183 {
2184 	struct ixgbe_aci_desc desc;
2185 	struct ixgbe_aci_cmd_nvm *cmd;
2186 	s32 status;
2187 	__le16 len;
2188 
2189 	/* read a length value from SR, so module_typeid is equal to 0 */
2190 	/* calculate offset where module size is placed from bytes to words */
2191 	/* set last command and read from SR values to true */
2192 	status = ixgbe_aci_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true,
2193 				 true);
2194 	if (status)
2195 		return status;
2196 
2197 	cmd = &desc.params.nvm;
2198 
2199 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_erase);
2200 
2201 	cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
2202 	cmd->length = len;
2203 	cmd->offset_low = 0;
2204 	cmd->offset_high = 0;
2205 
2206 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2207 }
2208 
2209 /**
2210  * ixgbe_aci_update_nvm - update NVM
2211  * @hw: pointer to the HW struct
2212  * @module_typeid: module pointer location in words from the NVM beginning
2213  * @offset: byte offset from the module beginning
2214  * @length: length of the section to be written (in bytes from the offset)
2215  * @data: command buffer (size [bytes] = length)
2216  * @last_command: tells if this is the last command in a series
2217  * @command_flags: command parameters
2218  *
2219  * Update the NVM using the ACI command (0x0703).
2220  *
2221  * Return: the exit code of the operation.
2222  */
ixgbe_aci_update_nvm(struct ixgbe_hw * hw,u16 module_typeid,u32 offset,u16 length,void * data,bool last_command,u8 command_flags)2223 s32 ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
2224 			 u32 offset, u16 length, void *data,
2225 			 bool last_command, u8 command_flags)
2226 {
2227 	struct ixgbe_aci_desc desc;
2228 	struct ixgbe_aci_cmd_nvm *cmd;
2229 
2230 	cmd = &desc.params.nvm;
2231 
2232 	/* In offset the highest byte must be zeroed. */
2233 	if (offset & 0xFF000000)
2234 		return IXGBE_ERR_PARAM;
2235 
2236 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_write);
2237 
2238 	cmd->cmd_flags |= command_flags;
2239 
2240 	/* If this is the last command in a series, set the proper flag. */
2241 	if (last_command)
2242 		cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
2243 	cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
2244 	cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
2245 	cmd->offset_high = (offset >> 16) & 0xFF;
2246 	cmd->length = IXGBE_CPU_TO_LE16(length);
2247 
2248 	desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
2249 
2250 	return ixgbe_aci_send_cmd(hw, &desc, data, length);
2251 }
2252 
2253 /**
2254  * ixgbe_aci_read_nvm_cfg - read an NVM config block
2255  * @hw: pointer to the HW struct
2256  * @cmd_flags: NVM access admin command bits
2257  * @field_id: field or feature ID
2258  * @data: buffer for result
2259  * @buf_size: buffer size
2260  * @elem_count: pointer to count of elements read by FW
2261  *
2262  * Reads a single or multiple feature/field ID and data using ACI command
2263  * (0x0704).
2264  *
2265  * Return: the exit code of the operation.
2266  */
ixgbe_aci_read_nvm_cfg(struct ixgbe_hw * hw,u8 cmd_flags,u16 field_id,void * data,u16 buf_size,u16 * elem_count)2267 s32 ixgbe_aci_read_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
2268 			   u16 field_id, void *data, u16 buf_size,
2269 			   u16 *elem_count)
2270 {
2271 	struct ixgbe_aci_cmd_nvm_cfg *cmd;
2272 	struct ixgbe_aci_desc desc;
2273 	s32 status;
2274 
2275 	cmd = &desc.params.nvm_cfg;
2276 
2277 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_read);
2278 
2279 	cmd->cmd_flags = cmd_flags;
2280 	cmd->id = IXGBE_CPU_TO_LE16(field_id);
2281 
2282 	status = ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
2283 	if (!status && elem_count)
2284 		*elem_count = IXGBE_LE16_TO_CPU(cmd->count);
2285 
2286 	return status;
2287 }
2288 
2289 /**
2290  * ixgbe_aci_write_nvm_cfg - write an NVM config block
2291  * @hw: pointer to the HW struct
2292  * @cmd_flags: NVM access admin command bits
2293  * @data: buffer for result
2294  * @buf_size: buffer size
2295  * @elem_count: count of elements to be written
2296  *
2297  * Writes a single or multiple feature/field ID and data using ACI command
2298  * (0x0705).
2299  *
2300  * Return: the exit code of the operation.
2301  */
ixgbe_aci_write_nvm_cfg(struct ixgbe_hw * hw,u8 cmd_flags,void * data,u16 buf_size,u16 elem_count)2302 s32 ixgbe_aci_write_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
2303 			    void *data, u16 buf_size, u16 elem_count)
2304 {
2305 	struct ixgbe_aci_cmd_nvm_cfg *cmd;
2306 	struct ixgbe_aci_desc desc;
2307 
2308 	cmd = &desc.params.nvm_cfg;
2309 
2310 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_write);
2311 	desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
2312 
2313 	cmd->count = IXGBE_CPU_TO_LE16(elem_count);
2314 	cmd->cmd_flags = cmd_flags;
2315 
2316 	return ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
2317 }
2318 
2319 /**
2320  * ixgbe_nvm_validate_checksum - validate checksum
2321  * @hw: pointer to the HW struct
2322  *
2323  * Verify NVM PFA checksum validity using ACI command (0x0706).
2324  * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
2325  * The function acquires and then releases the NVM ownership.
2326  *
2327  * Return: the exit code of the operation.
2328  */
ixgbe_nvm_validate_checksum(struct ixgbe_hw * hw)2329 s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
2330 {
2331 	struct ixgbe_aci_cmd_nvm_checksum *cmd;
2332 	struct ixgbe_aci_desc desc;
2333 	s32 status;
2334 
2335 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2336 	if (status)
2337 		return status;
2338 
2339 	cmd = &desc.params.nvm_checksum;
2340 
2341 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
2342 	cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
2343 
2344 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2345 
2346 	ixgbe_release_nvm(hw);
2347 
2348 	if (!status)
2349 		if (IXGBE_LE16_TO_CPU(cmd->checksum) !=
2350 		    IXGBE_ACI_NVM_CHECKSUM_CORRECT) {
2351 			ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
2352 				      "Invalid Shadow Ram checksum");
2353 			status = IXGBE_ERR_NVM_CHECKSUM;
2354 		}
2355 
2356 	return status;
2357 }
2358 
2359 /**
2360  * ixgbe_nvm_recalculate_checksum - recalculate checksum
2361  * @hw: pointer to the HW struct
2362  *
2363  * Recalculate NVM PFA checksum using ACI command (0x0706).
2364  * The function acquires and then releases the NVM ownership.
2365  *
2366  * Return: the exit code of the operation.
2367  */
ixgbe_nvm_recalculate_checksum(struct ixgbe_hw * hw)2368 s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw)
2369 {
2370 	struct ixgbe_aci_cmd_nvm_checksum *cmd;
2371 	struct ixgbe_aci_desc desc;
2372 	s32 status;
2373 
2374 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
2375 	if (status)
2376 		return status;
2377 
2378 	cmd = &desc.params.nvm_checksum;
2379 
2380 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
2381 	cmd->flags = IXGBE_ACI_NVM_CHECKSUM_RECALC;
2382 
2383 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2384 
2385 	ixgbe_release_nvm(hw);
2386 
2387 	return status;
2388 }
2389 
2390 /**
2391  * ixgbe_nvm_write_activate - NVM activate write
2392  * @hw: pointer to the HW struct
2393  * @cmd_flags: flags for write activate command
2394  * @response_flags: response indicators from firmware
2395  *
2396  * Update the control word with the required banks' validity bits
2397  * and dumps the Shadow RAM to flash using ACI command (0x0707).
2398  *
2399  * cmd_flags controls which banks to activate, the preservation level to use
2400  * when activating the NVM bank, and whether an EMP reset is required for
2401  * activation.
2402  *
2403  * Note that the 16bit cmd_flags value is split between two separate 1 byte
2404  * flag values in the descriptor.
2405  *
2406  * On successful return of the firmware command, the response_flags variable
2407  * is updated with the flags reported by firmware indicating certain status,
2408  * such as whether EMP reset is enabled.
2409  *
2410  * Return: the exit code of the operation.
2411  */
ixgbe_nvm_write_activate(struct ixgbe_hw * hw,u16 cmd_flags,u8 * response_flags)2412 s32 ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
2413 			     u8 *response_flags)
2414 {
2415 	struct ixgbe_aci_desc desc;
2416 	struct ixgbe_aci_cmd_nvm *cmd;
2417 	s32 status;
2418 
2419 	cmd = &desc.params.nvm;
2420 	ixgbe_fill_dflt_direct_cmd_desc(&desc,
2421 					ixgbe_aci_opc_nvm_write_activate);
2422 
2423 	cmd->cmd_flags = LO_BYTE(cmd_flags);
2424 	cmd->offset_high = HI_BYTE(cmd_flags);
2425 
2426 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2427 	if (!status && response_flags)
2428 		*response_flags = cmd->cmd_flags;
2429 
2430 	return status;
2431 }
2432 
2433 /**
2434  * ixgbe_get_flash_bank_offset - Get offset into requested flash bank
2435  * @hw: pointer to the HW structure
2436  * @bank: whether to read from the active or inactive flash bank
2437  * @module: the module to read from
2438  *
2439  * Based on the module, lookup the module offset from the beginning of the
2440  * flash.
2441  *
2442  * Return: the flash offset. Note that a value of zero is invalid and must be
2443  * treated as an error.
2444  */
ixgbe_get_flash_bank_offset(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u16 module)2445 static u32 ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw,
2446 				       enum ixgbe_bank_select bank,
2447 				       u16 module)
2448 {
2449 	struct ixgbe_bank_info *banks = &hw->flash.banks;
2450 	enum ixgbe_flash_bank active_bank;
2451 	bool second_bank_active;
2452 	u32 offset, size;
2453 
2454 	switch (module) {
2455 	case E610_SR_1ST_NVM_BANK_PTR:
2456 		offset = banks->nvm_ptr;
2457 		size = banks->nvm_size;
2458 		active_bank = banks->nvm_bank;
2459 		break;
2460 	case E610_SR_1ST_OROM_BANK_PTR:
2461 		offset = banks->orom_ptr;
2462 		size = banks->orom_size;
2463 		active_bank = banks->orom_bank;
2464 		break;
2465 	case E610_SR_NETLIST_BANK_PTR:
2466 		offset = banks->netlist_ptr;
2467 		size = banks->netlist_size;
2468 		active_bank = banks->netlist_bank;
2469 		break;
2470 	default:
2471 		return 0;
2472 	}
2473 
2474 	switch (active_bank) {
2475 	case IXGBE_1ST_FLASH_BANK:
2476 		second_bank_active = false;
2477 		break;
2478 	case IXGBE_2ND_FLASH_BANK:
2479 		second_bank_active = true;
2480 		break;
2481 	default:
2482 		return 0;
2483     }
2484 
2485 	/* The second flash bank is stored immediately following the first
2486 	 * bank. Based on whether the 1st or 2nd bank is active, and whether
2487 	 * we want the active or inactive bank, calculate the desired offset.
2488 	 */
2489 	switch (bank) {
2490 	case IXGBE_ACTIVE_FLASH_BANK:
2491 		return offset + (second_bank_active ? size : 0);
2492 	case IXGBE_INACTIVE_FLASH_BANK:
2493 		return offset + (second_bank_active ? 0 : size);
2494 	}
2495 
2496 	return 0;
2497 }
2498 
2499 /**
2500  * ixgbe_read_flash_module - Read a word from one of the main NVM modules
2501  * @hw: pointer to the HW structure
2502  * @bank: which bank of the module to read
2503  * @module: the module to read
2504  * @offset: the offset into the module in bytes
2505  * @data: storage for the word read from the flash
2506  * @length: bytes of data to read
2507  *
2508  * Read data from the specified flash module. The bank parameter indicates
2509  * whether or not to read from the active bank or the inactive bank of that
2510  * module.
2511  *
2512  * The word will be read using flat NVM access, and relies on the
2513  * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks()
2514  * during initialization.
2515  *
2516  * Return: the exit code of the operation.
2517  */
ixgbe_read_flash_module(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u16 module,u32 offset,u8 * data,u32 length)2518 static s32 ixgbe_read_flash_module(struct ixgbe_hw *hw,
2519 				   enum ixgbe_bank_select bank,
2520 				   u16 module, u32 offset, u8 *data, u32 length)
2521 {
2522 	s32 status;
2523 	u32 start;
2524 
2525 	start = ixgbe_get_flash_bank_offset(hw, bank, module);
2526 	if (!start) {
2527 		return IXGBE_ERR_PARAM;
2528 	}
2529 
2530 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2531 	if (status)
2532 		return status;
2533 
2534 	status = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false);
2535 
2536 	ixgbe_release_nvm(hw);
2537 
2538 	return status;
2539 }
2540 
2541 /**
2542  * ixgbe_read_netlist_module - Read data from the netlist module area
2543  * @hw: pointer to the HW structure
2544  * @bank: whether to read from the active or inactive module
2545  * @offset: offset into the netlist to read from
2546  * @data: storage for returned word value
2547  *
2548  * Read a word from the specified netlist bank.
2549  *
2550  * Return: the exit code of the operation.
2551  */
ixgbe_read_netlist_module(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 offset,u16 * data)2552 static s32 ixgbe_read_netlist_module(struct ixgbe_hw *hw,
2553 				     enum ixgbe_bank_select bank,
2554 				     u32 offset, u16 *data)
2555 {
2556 	__le16 data_local;
2557 	s32 status;
2558 
2559 	status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
2560 					 offset * sizeof(u16),
2561 					 (u8 *)&data_local,
2562 					 sizeof(u16));
2563 	if (!status)
2564 		*data = IXGBE_LE16_TO_CPU(data_local);
2565 
2566 	return status;
2567 }
2568 
2569 /**
2570  * ixgbe_read_nvm_module - Read from the active main NVM module
2571  * @hw: pointer to the HW structure
2572  * @bank: whether to read from active or inactive NVM module
2573  * @offset: offset into the NVM module to read, in words
2574  * @data: storage for returned word value
2575  *
2576  * Read the specified word from the active NVM module. This includes the CSS
2577  * header at the start of the NVM module.
2578  *
2579  * Return: the exit code of the operation.
2580  */
ixgbe_read_nvm_module(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 offset,u16 * data)2581 static s32 ixgbe_read_nvm_module(struct ixgbe_hw *hw,
2582 				 enum ixgbe_bank_select bank,
2583 				  u32 offset, u16 *data)
2584 {
2585 	__le16 data_local;
2586 	s32 status;
2587 
2588 	status = ixgbe_read_flash_module(hw, bank, E610_SR_1ST_NVM_BANK_PTR,
2589 					 offset * sizeof(u16),
2590 					 (u8 *)&data_local,
2591 					 sizeof(u16));
2592 	if (!status)
2593 		*data = IXGBE_LE16_TO_CPU(data_local);
2594 
2595 	return status;
2596 }
2597 
2598 /**
2599  * ixgbe_get_nvm_css_hdr_len - Read the CSS header length from the
2600  * NVM CSS header
2601  * @hw: pointer to the HW struct
2602  * @bank: whether to read from the active or inactive flash bank
2603  * @hdr_len: storage for header length in words
2604  *
2605  * Read the CSS header length from the NVM CSS header and add the
2606  * Authentication header size, and then convert to words.
2607  *
2608  * Return: the exit code of the operation.
2609  */
ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 * hdr_len)2610 static s32 ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw,
2611 				     enum ixgbe_bank_select bank,
2612 				     u32 *hdr_len)
2613 {
2614 	u16 hdr_len_l, hdr_len_h;
2615 	u32 hdr_len_dword;
2616 	s32 status;
2617 
2618 	status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L,
2619 				       &hdr_len_l);
2620 	if (status)
2621 		return status;
2622 
2623 	status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H,
2624 				       &hdr_len_h);
2625 	if (status)
2626 		return status;
2627 
2628 	/* CSS header length is in DWORD, so convert to words and add
2629 	 * authentication header size
2630 	 */
2631 	hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
2632 	*hdr_len = (hdr_len_dword * 2) + IXGBE_NVM_AUTH_HEADER_LEN;
2633 
2634 	return IXGBE_SUCCESS;
2635 }
2636 
2637 /**
2638  * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank
2639  * @hw: pointer to the HW structure
2640  * @bank: whether to read from the active or inactive NVM module
2641  * @offset: offset into the Shadow RAM copy to read, in words
2642  * @data: storage for returned word value
2643  *
2644  * Read the specified word from the copy of the Shadow RAM found in the
2645  * specified NVM module.
2646  *
2647  * Return: the exit code of the operation.
2648  */
ixgbe_read_nvm_sr_copy(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 offset,u16 * data)2649 static s32 ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw,
2650 				  enum ixgbe_bank_select bank,
2651 				  u32 offset, u16 *data)
2652 {
2653 	u32 hdr_len;
2654 	s32 status;
2655 
2656 	status = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
2657 	if (status)
2658 		return status;
2659 
2660 	hdr_len = ROUND_UP(hdr_len, 32);
2661 
2662 	return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data);
2663 }
2664 
2665 /**
2666  * ixgbe_get_nvm_minsrevs - Get the minsrevs values from flash
2667  * @hw: pointer to the HW struct
2668  * @minsrevs: structure to store NVM and OROM minsrev values
2669  *
2670  * Read the Minimum Security Revision TLV and extract
2671  * the revision values from the flash image
2672  * into a readable structure for processing.
2673  *
2674  * Return: the exit code of the operation.
2675  */
ixgbe_get_nvm_minsrevs(struct ixgbe_hw * hw,struct ixgbe_minsrev_info * minsrevs)2676 s32 ixgbe_get_nvm_minsrevs(struct ixgbe_hw *hw,
2677 			   struct ixgbe_minsrev_info *minsrevs)
2678 {
2679 	struct ixgbe_aci_cmd_nvm_minsrev data;
2680 	s32 status;
2681 	u16 valid;
2682 
2683 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2684 	if (status)
2685 		return status;
2686 
2687 	status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID,
2688 				    0, sizeof(data), &data,
2689 				    true, false);
2690 
2691 	ixgbe_release_nvm(hw);
2692 
2693 	if (status)
2694 		return status;
2695 
2696 	valid = IXGBE_LE16_TO_CPU(data.validity);
2697 
2698 	/* Extract NVM minimum security revision */
2699 	if (valid & IXGBE_ACI_NVM_MINSREV_NVM_VALID) {
2700 		u16 minsrev_l = IXGBE_LE16_TO_CPU(data.nvm_minsrev_l);
2701 		u16 minsrev_h = IXGBE_LE16_TO_CPU(data.nvm_minsrev_h);
2702 
2703 		minsrevs->nvm = minsrev_h << 16 | minsrev_l;
2704 		minsrevs->nvm_valid = true;
2705 	}
2706 
2707 	/* Extract the OROM minimum security revision */
2708 	if (valid & IXGBE_ACI_NVM_MINSREV_OROM_VALID) {
2709 		u16 minsrev_l = IXGBE_LE16_TO_CPU(data.orom_minsrev_l);
2710 		u16 minsrev_h = IXGBE_LE16_TO_CPU(data.orom_minsrev_h);
2711 
2712 		minsrevs->orom = minsrev_h << 16 | minsrev_l;
2713 		minsrevs->orom_valid = true;
2714 	}
2715 
2716 	return IXGBE_SUCCESS;
2717 }
2718 
2719 /**
2720  * ixgbe_update_nvm_minsrevs - Update minsrevs TLV data in flash
2721  * @hw: pointer to the HW struct
2722  * @minsrevs: minimum security revision information
2723  *
2724  * Update the NVM or Option ROM minimum security revision fields in the PFA
2725  * area of the flash. Reads the minsrevs->nvm_valid and minsrevs->orom_valid
2726  * fields to determine what update is being requested. If the valid bit is not
2727  * set for that module, then the associated minsrev will be left as is.
2728  *
2729  * Return: the exit code of the operation.
2730  */
ixgbe_update_nvm_minsrevs(struct ixgbe_hw * hw,struct ixgbe_minsrev_info * minsrevs)2731 s32 ixgbe_update_nvm_minsrevs(struct ixgbe_hw *hw,
2732 			      struct ixgbe_minsrev_info *minsrevs)
2733 {
2734 	struct ixgbe_aci_cmd_nvm_minsrev data;
2735 	s32 status;
2736 
2737 	if (!minsrevs->nvm_valid && !minsrevs->orom_valid) {
2738 		return IXGBE_ERR_PARAM;
2739 	}
2740 
2741 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
2742 	if (status)
2743 		return status;
2744 
2745 	/* Get current data */
2746 	status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
2747 				    sizeof(data), &data, true, false);
2748 	if (status)
2749 		goto exit_release_res;
2750 
2751 	if (minsrevs->nvm_valid) {
2752 		data.nvm_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->nvm & 0xFFFF);
2753 		data.nvm_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->nvm >> 16);
2754 		data.validity |=
2755 			IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_NVM_VALID);
2756 	}
2757 
2758 	if (minsrevs->orom_valid) {
2759 		data.orom_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->orom & 0xFFFF);
2760 		data.orom_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->orom >> 16);
2761 		data.validity |=
2762 			IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_OROM_VALID);
2763 	}
2764 
2765 	/* Update flash data */
2766 	status = ixgbe_aci_update_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
2767 				      sizeof(data), &data, false,
2768 				      IXGBE_ACI_NVM_SPECIAL_UPDATE);
2769 	if (status)
2770 		goto exit_release_res;
2771 
2772 	/* Dump the Shadow RAM to the flash */
2773 	status = ixgbe_nvm_write_activate(hw, 0, NULL);
2774 
2775 exit_release_res:
2776 	ixgbe_release_nvm(hw);
2777 
2778 	return status;
2779 }
2780 
2781 /**
2782  * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header
2783  * @hw: pointer to the HW struct
2784  * @bank: whether to read from the active or inactive flash bank
2785  * @srev: storage for security revision
2786  *
2787  * Read the security revision out of the CSS header of the active NVM module
2788  * bank.
2789  *
2790  * Return: the exit code of the operation.
2791  */
ixgbe_get_nvm_srev(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 * srev)2792 static s32 ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
2793 			      enum ixgbe_bank_select bank, u32 *srev)
2794 {
2795 	u16 srev_l, srev_h;
2796 	s32 status;
2797 
2798 	status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l);
2799 	if (status)
2800 		return status;
2801 
2802 	status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h);
2803 	if (status)
2804 		return status;
2805 
2806 	*srev = srev_h << 16 | srev_l;
2807 
2808 	return IXGBE_SUCCESS;
2809 }
2810 
2811 /**
2812  * ixgbe_get_nvm_ver_info - Read NVM version information
2813  * @hw: pointer to the HW struct
2814  * @bank: whether to read from the active or inactive flash bank
2815  * @nvm: pointer to NVM info structure
2816  *
2817  * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
2818  * in the nvm info structure.
2819  *
2820  * Return: the exit code of the operation.
2821  */
ixgbe_get_nvm_ver_info(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,struct ixgbe_nvm_info * nvm)2822 static s32 ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw,
2823 				  enum ixgbe_bank_select bank,
2824 				  struct ixgbe_nvm_info *nvm)
2825 {
2826 	u16 eetrack_lo, eetrack_hi, ver;
2827 	s32 status;
2828 
2829 	status = ixgbe_read_nvm_sr_copy(hw, bank,
2830 					E610_SR_NVM_DEV_STARTER_VER, &ver);
2831 	if (status) {
2832 		return status;
2833 	}
2834 
2835 	nvm->major = (ver & E610_NVM_VER_HI_MASK) >> E610_NVM_VER_HI_SHIFT;
2836 	nvm->minor = (ver & E610_NVM_VER_LO_MASK) >> E610_NVM_VER_LO_SHIFT;
2837 
2838 	status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_LO,
2839 					&eetrack_lo);
2840 	if (status) {
2841 		return status;
2842 	}
2843 	status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_HI,
2844 					&eetrack_hi);
2845 	if (status) {
2846 		return status;
2847 	}
2848 
2849 	nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
2850 
2851 	status = ixgbe_get_nvm_srev(hw, bank, &nvm->srev);
2852 
2853 	return IXGBE_SUCCESS;
2854 }
2855 
2856 /**
2857  * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
2858  * @hw: pointer to the HW structure
2859  * @nvm: storage for Option ROM version information
2860  *
2861  * Reads the NVM EETRACK ID, Map version, and security revision of the
2862  * inactive NVM bank. Used to access version data for a pending update that
2863  * has not yet been activated.
2864  *
2865  * Return: the exit code of the operation.
2866  */
ixgbe_get_inactive_nvm_ver(struct ixgbe_hw * hw,struct ixgbe_nvm_info * nvm)2867 s32 ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
2868 {
2869 	return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm);
2870 }
2871 
2872 /**
2873  * ixgbe_get_active_nvm_ver - Read Option ROM version from the active bank
2874  * @hw: pointer to the HW structure
2875  * @nvm: storage for Option ROM version information
2876  *
2877  * Reads the NVM EETRACK ID, Map version, and security revision of the
2878  * active NVM bank.
2879  *
2880  * Return: the exit code of the operation.
2881  */
ixgbe_get_active_nvm_ver(struct ixgbe_hw * hw,struct ixgbe_nvm_info * nvm)2882 s32 ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
2883 {
2884 	return ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, nvm);
2885 }
2886 
2887 /**
2888  * ixgbe_get_netlist_info
2889  * @hw: pointer to the HW struct
2890  * @bank: whether to read from the active or inactive flash bank
2891  * @netlist: pointer to netlist version info structure
2892  *
2893  * Get the netlist version information from the requested bank. Reads the Link
2894  * Topology section to find the Netlist ID block and extract the relevant
2895  * information into the netlist version structure.
2896  *
2897  * Return: the exit code of the operation.
2898  */
ixgbe_get_netlist_info(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,struct ixgbe_netlist_info * netlist)2899 static s32 ixgbe_get_netlist_info(struct ixgbe_hw *hw,
2900 				  enum ixgbe_bank_select bank,
2901 				  struct ixgbe_netlist_info *netlist)
2902 {
2903 	u16 module_id, length, node_count, i;
2904 	u16 *id_blk;
2905 	s32 status;
2906 
2907 	status = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET,
2908 					   &module_id);
2909 	if (status)
2910 		return status;
2911 
2912 	if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID) {
2913 		return IXGBE_ERR_NVM;
2914 	}
2915 
2916 	status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN,
2917 					   &length);
2918 	if (status)
2919 		return status;
2920 
2921 	/* sanity check that we have at least enough words to store the
2922 	 * netlist ID block
2923 	 */
2924 	if (length < IXGBE_NETLIST_ID_BLK_SIZE) {
2925 		return IXGBE_ERR_NVM;
2926 	}
2927 
2928 	status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT,
2929 					   &node_count);
2930 	if (status)
2931 		return status;
2932 	node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M;
2933 
2934 	id_blk = (u16 *)ixgbe_calloc(hw, IXGBE_NETLIST_ID_BLK_SIZE,
2935 		     sizeof(*id_blk));
2936 	if (!id_blk)
2937 		return IXGBE_ERR_NO_SPACE;
2938 
2939 	/* Read out the entire Netlist ID Block at once. */
2940 	status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
2941 				         IXGBE_NETLIST_ID_BLK_OFFSET(node_count) * sizeof(u16),
2942 				         (u8 *)id_blk,
2943 					 IXGBE_NETLIST_ID_BLK_SIZE * sizeof(u16));
2944 	if (status)
2945 		goto exit_error;
2946 
2947 	for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++)
2948 		id_blk[i] = IXGBE_LE16_TO_CPU(((__le16 *)id_blk)[i]);
2949 
2950 	netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
2951 			 id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW];
2952 	netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
2953 			 id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW];
2954 	netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
2955 			id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW];
2956 	netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 |
2957 		       id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW];
2958 	netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER];
2959 	/* Read the left most 4 bytes of SHA */
2960 	netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
2961 			id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
2962 
2963 exit_error:
2964 	ixgbe_free(hw, id_blk);
2965 
2966 	return status;
2967 }
2968 
2969 /**
2970  * ixgbe_get_inactive_netlist_ver
2971  * @hw: pointer to the HW struct
2972  * @netlist: pointer to netlist version info structure
2973  *
2974  * Read the netlist version data from the inactive netlist bank. Used to
2975  * extract version data of a pending flash update in order to display the
2976  * version data.
2977  *
2978  * Return: the exit code of the operation.
2979  */
ixgbe_get_inactive_netlist_ver(struct ixgbe_hw * hw,struct ixgbe_netlist_info * netlist)2980 s32 ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
2981 				   struct ixgbe_netlist_info *netlist)
2982 {
2983 	return ixgbe_get_netlist_info(hw, IXGBE_INACTIVE_FLASH_BANK, netlist);
2984 }
2985 
2986 /**
2987  * ixgbe_read_sr_pointer - Read the value of a Shadow RAM pointer word
2988  * @hw: pointer to the HW structure
2989  * @offset: the word offset of the Shadow RAM word to read
2990  * @pointer: pointer value read from Shadow RAM
2991  *
2992  * Read the given Shadow RAM word, and convert it to a pointer value specified
2993  * in bytes. This function assumes the specified offset is a valid pointer
2994  * word.
2995  *
2996  * Each pointer word specifies whether it is stored in word size or 4KB
2997  * sector size by using the highest bit. The reported pointer value will be in
2998  * bytes, intended for flat NVM reads.
2999  *
3000  * Return: the exit code of the operation.
3001  */
ixgbe_read_sr_pointer(struct ixgbe_hw * hw,u16 offset,u32 * pointer)3002 static s32 ixgbe_read_sr_pointer(struct ixgbe_hw *hw, u16 offset, u32 *pointer)
3003 {
3004 	s32 status;
3005 	u16 value;
3006 
3007 	status = ixgbe_read_ee_aci_E610(hw, offset, &value);
3008 	if (status)
3009 		return status;
3010 
3011 	/* Determine if the pointer is in 4KB or word units */
3012 	if (value & IXGBE_SR_NVM_PTR_4KB_UNITS)
3013 		*pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * 4 * 1024;
3014 	else
3015 		*pointer = value * 2;
3016 
3017 	return IXGBE_SUCCESS;
3018 }
3019 
3020 /**
3021  * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word
3022  * @hw: pointer to the HW structure
3023  * @offset: the word offset of the Shadow RAM to read
3024  * @size: size value read from the Shadow RAM
3025  *
3026  * Read the given Shadow RAM word, and convert it to an area size value
3027  * specified in bytes. This function assumes the specified offset is a valid
3028  * area size word.
3029  *
3030  * Each area size word is specified in 4KB sector units. This function reports
3031  * the size in bytes, intended for flat NVM reads.
3032  *
3033  * Return: the exit code of the operation.
3034  */
ixgbe_read_sr_area_size(struct ixgbe_hw * hw,u16 offset,u32 * size)3035 static s32 ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size)
3036 {
3037 	s32 status;
3038 	u16 value;
3039 
3040 	status = ixgbe_read_ee_aci_E610(hw, offset, &value);
3041 	if (status)
3042 		return status;
3043 
3044 	/* Area sizes are always specified in 4KB units */
3045 	*size = value * 4 * 1024;
3046 
3047 	return IXGBE_SUCCESS;
3048 }
3049 
3050 /**
3051  * ixgbe_discover_flash_size - Discover the available flash size.
3052  * @hw: pointer to the HW struct
3053  *
3054  * The device flash could be up to 16MB in size. However, it is possible that
3055  * the actual size is smaller. Use bisection to determine the accessible size
3056  * of flash memory.
3057  *
3058  * Return: the exit code of the operation.
3059  */
ixgbe_discover_flash_size(struct ixgbe_hw * hw)3060 static s32 ixgbe_discover_flash_size(struct ixgbe_hw *hw)
3061 {
3062 	u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
3063 	s32 status;
3064 
3065 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
3066 	if (status)
3067 		return status;
3068 
3069 	while ((max_size - min_size) > 1) {
3070 		u32 offset = (max_size + min_size) / 2;
3071 		u32 len = 1;
3072 		u8 data;
3073 
3074 		status = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
3075 		if (status == IXGBE_ERR_ACI_ERROR &&
3076 		    hw->aci.last_status == IXGBE_ACI_RC_EINVAL) {
3077 			status = IXGBE_SUCCESS;
3078 			max_size = offset;
3079 		} else if (!status) {
3080 			min_size = offset;
3081 		} else {
3082 			/* an unexpected error occurred */
3083 			goto err_read_flat_nvm;
3084 		}
3085 	}
3086 
3087 	hw->flash.flash_size = max_size;
3088 
3089 err_read_flat_nvm:
3090 	ixgbe_release_nvm(hw);
3091 
3092 	return status;
3093 }
3094 
3095 /**
3096  * ixgbe_determine_active_flash_banks - Discover active bank for each module
3097  * @hw: pointer to the HW struct
3098  *
3099  * Read the Shadow RAM control word and determine which banks are active for
3100  * the NVM, OROM, and Netlist modules. Also read and calculate the associated
3101  * pointer and size. These values are then cached into the ixgbe_flash_info
3102  * structure for later use in order to calculate the correct offset to read
3103  * from the active module.
3104  *
3105  * Return: the exit code of the operation.
3106  */
ixgbe_determine_active_flash_banks(struct ixgbe_hw * hw)3107 static s32 ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw)
3108 {
3109 	struct ixgbe_bank_info *banks = &hw->flash.banks;
3110 	u16 ctrl_word;
3111 	s32 status;
3112 
3113 	status = ixgbe_read_ee_aci_E610(hw, E610_SR_NVM_CTRL_WORD, &ctrl_word);
3114 	if (status) {
3115 		return status;
3116 	}
3117 
3118 	/* Check that the control word indicates validity */
3119 	if ((ctrl_word & IXGBE_SR_CTRL_WORD_1_M) >> IXGBE_SR_CTRL_WORD_1_S !=
3120 	    IXGBE_SR_CTRL_WORD_VALID) {
3121 		return IXGBE_ERR_CONFIG;
3122 	}
3123 
3124 	if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK))
3125 		banks->nvm_bank = IXGBE_1ST_FLASH_BANK;
3126 	else
3127 		banks->nvm_bank = IXGBE_2ND_FLASH_BANK;
3128 
3129 	if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK))
3130 		banks->orom_bank = IXGBE_1ST_FLASH_BANK;
3131 	else
3132 		banks->orom_bank = IXGBE_2ND_FLASH_BANK;
3133 
3134 	if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK))
3135 		banks->netlist_bank = IXGBE_1ST_FLASH_BANK;
3136 	else
3137 		banks->netlist_bank = IXGBE_2ND_FLASH_BANK;
3138 
3139 	status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_NVM_BANK_PTR,
3140 				       &banks->nvm_ptr);
3141 	if (status) {
3142 		return status;
3143 	}
3144 
3145 	status = ixgbe_read_sr_area_size(hw, E610_SR_NVM_BANK_SIZE,
3146 					 &banks->nvm_size);
3147 	if (status) {
3148 		return status;
3149 	}
3150 
3151 	status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_OROM_BANK_PTR,
3152 				       &banks->orom_ptr);
3153 	if (status) {
3154 		return status;
3155 	}
3156 
3157 	status = ixgbe_read_sr_area_size(hw, E610_SR_OROM_BANK_SIZE,
3158 					 &banks->orom_size);
3159 	if (status) {
3160 		return status;
3161 	}
3162 
3163 	status = ixgbe_read_sr_pointer(hw, E610_SR_NETLIST_BANK_PTR,
3164 				       &banks->netlist_ptr);
3165 	if (status) {
3166 		return status;
3167 	}
3168 
3169 	status = ixgbe_read_sr_area_size(hw, E610_SR_NETLIST_BANK_SIZE,
3170 					 &banks->netlist_size);
3171 	if (status) {
3172 		return status;
3173 	}
3174 
3175 	return IXGBE_SUCCESS;
3176 }
3177 
3178 /**
3179  * ixgbe_init_nvm - initializes NVM setting
3180  * @hw: pointer to the HW struct
3181  *
3182  * Read and populate NVM settings such as Shadow RAM size,
3183  * max_timeout, and blank_nvm_mode
3184  *
3185  * Return: the exit code of the operation.
3186  */
ixgbe_init_nvm(struct ixgbe_hw * hw)3187 s32 ixgbe_init_nvm(struct ixgbe_hw *hw)
3188 {
3189 	struct ixgbe_flash_info *flash = &hw->flash;
3190 	u32 fla, gens_stat, status;
3191 	u8 sr_size;
3192 
3193 	/* The SR size is stored regardless of the NVM programming mode
3194 	 * as the blank mode may be used in the factory line.
3195 	 */
3196 	gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
3197 	sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
3198 
3199 	/* Switching to words (sr_size contains power of 2) */
3200 	flash->sr_words = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
3201 
3202 	/* Check if we are in the normal or blank NVM programming mode */
3203 	fla = IXGBE_READ_REG(hw, GLNVM_FLA);
3204 	if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
3205 		flash->blank_nvm_mode = false;
3206 	} else {
3207 		/* Blank programming mode */
3208 		flash->blank_nvm_mode = true;
3209 		return IXGBE_ERR_NVM_BLANK_MODE;
3210 	}
3211 
3212 	status = ixgbe_discover_flash_size(hw);
3213 	if (status) {
3214 		return status;
3215 	}
3216 
3217 	status = ixgbe_determine_active_flash_banks(hw);
3218 	if (status) {
3219 		return status;
3220 	}
3221 
3222 	status = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
3223 					&flash->nvm);
3224 	if (status) {
3225 		return status;
3226 	}
3227 
3228 	/* read the netlist version information */
3229 	status = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK,
3230 					&flash->netlist);
3231 
3232 	return IXGBE_SUCCESS;
3233 }
3234 
3235 /**
3236  * ixgbe_sanitize_operate - Clear the user data
3237  * @hw: pointer to the HW struct
3238  *
3239  * Clear user data from NVM using ACI command (0x070C).
3240  *
3241  * Return: the exit code of the operation.
3242  */
ixgbe_sanitize_operate(struct ixgbe_hw * hw)3243 s32 ixgbe_sanitize_operate(struct ixgbe_hw *hw)
3244 {
3245 	s32 status;
3246 	u8 values;
3247 
3248 	u8 cmd_flags = IXGBE_ACI_SANITIZE_REQ_OPERATE |
3249 		       IXGBE_ACI_SANITIZE_OPERATE_SUBJECT_CLEAR;
3250 
3251 	status = ixgbe_sanitize_nvm(hw, cmd_flags, &values);
3252 	if (status)
3253 		return status;
3254 	if ((!(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
3255 	     !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE)) ||
3256 	    ((values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
3257 	     !(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS)) ||
3258 	    ((values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE) &&
3259 	     !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS)))
3260 		return IXGBE_ERR_ACI_ERROR;
3261 
3262 	return IXGBE_SUCCESS;
3263 }
3264 
3265 /**
3266  * ixgbe_sanitize_nvm - Sanitize NVM
3267  * @hw: pointer to the HW struct
3268  * @cmd_flags: flag to the ACI command
3269  * @values: values returned from the command
3270  *
3271  * Sanitize NVM using ACI command (0x070C).
3272  *
3273  * Return: the exit code of the operation.
3274  */
ixgbe_sanitize_nvm(struct ixgbe_hw * hw,u8 cmd_flags,u8 * values)3275 s32 ixgbe_sanitize_nvm(struct ixgbe_hw *hw, u8 cmd_flags, u8 *values)
3276 {
3277 	struct ixgbe_aci_desc desc;
3278 	struct ixgbe_aci_cmd_nvm_sanitization *cmd;
3279 	s32 status;
3280 
3281 	cmd = &desc.params.nvm_sanitization;
3282 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_sanitization);
3283 	cmd->cmd_flags = cmd_flags;
3284 
3285 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3286 	if (values)
3287 		*values = cmd->values;
3288 
3289 	return status;
3290 }
3291 
3292 /**
3293  * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
3294  * @hw: pointer to the HW structure
3295  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
3296  * @data: word read from the Shadow RAM
3297  *
3298  * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
3299  *
3300  * Return: the exit code of the operation.
3301  */
ixgbe_read_sr_word_aci(struct ixgbe_hw * hw,u16 offset,u16 * data)3302 s32 ixgbe_read_sr_word_aci(struct ixgbe_hw  *hw, u16 offset, u16 *data)
3303 {
3304 	u32 bytes = sizeof(u16);
3305 	__le16 data_local;
3306 	s32 status;
3307 
3308 	status = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
3309 				     (u8 *)&data_local, true);
3310 	if (status)
3311 		return status;
3312 
3313 	*data = IXGBE_LE16_TO_CPU(data_local);
3314 	return IXGBE_SUCCESS;
3315 }
3316 
3317 /**
3318  * ixgbe_read_sr_buf_aci - Reads Shadow RAM buf via ACI
3319  * @hw: pointer to the HW structure
3320  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
3321  * @words: (in) number of words to read; (out) number of words actually read
3322  * @data: words read from the Shadow RAM
3323  *
3324  * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is
3325  * taken before reading the buffer and later released.
3326  *
3327  * Return: the exit code of the operation.
3328  */
ixgbe_read_sr_buf_aci(struct ixgbe_hw * hw,u16 offset,u16 * words,u16 * data)3329 s32 ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
3330 			  u16 *data)
3331 {
3332 	u32 bytes = *words * 2, i;
3333 	s32 status;
3334 
3335 	status = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
3336 
3337 	*words = bytes / 2;
3338 
3339 	for (i = 0; i < *words; i++)
3340 		data[i] = IXGBE_LE16_TO_CPU(((__le16 *)data)[i]);
3341 
3342 	return status;
3343 }
3344 
3345 /**
3346  * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
3347  * @hw: pointer to the HW struct
3348  * @offset: offset from beginning of NVM
3349  * @length: (in) number of bytes to read; (out) number of bytes actually read
3350  * @data: buffer to return data in (sized to fit the specified length)
3351  * @read_shadow_ram: if true, read from shadow RAM instead of NVM
3352  *
3353  * Reads a portion of the NVM, as a flat memory space. This function correctly
3354  * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
3355  * from being exceeded in case of Shadow RAM read requests and ensures that no
3356  * single read request exceeds the maximum 4KB read for a single admin command.
3357  *
3358  * Returns a status code on failure. Note that the data pointer may be
3359  * partially updated if some reads succeed before a failure.
3360  *
3361  * Return: the exit code of the operation.
3362  */
ixgbe_read_flat_nvm(struct ixgbe_hw * hw,u32 offset,u32 * length,u8 * data,bool read_shadow_ram)3363 s32 ixgbe_read_flat_nvm(struct ixgbe_hw  *hw, u32 offset, u32 *length,
3364 			u8 *data, bool read_shadow_ram)
3365 {
3366 	u32 inlen = *length;
3367 	u32 bytes_read = 0;
3368 	bool last_cmd;
3369 	s32 status;
3370 
3371 	*length = 0;
3372 
3373 	/* Verify the length of the read if this is for the Shadow RAM */
3374 	if (read_shadow_ram && ((offset + inlen) >
3375 				(hw->eeprom.word_size * 2u))) {
3376 		return IXGBE_ERR_PARAM;
3377 	}
3378 
3379 	do {
3380 		u32 read_size, sector_offset;
3381 
3382 		/* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
3383 		 * Additionally, a read from the Shadow RAM may not cross over
3384 		 * a sector boundary. Conveniently, the sector size is also 4KB.
3385 		 */
3386 		sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
3387 		read_size = MIN_T(u32,
3388 				  IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
3389 				  inlen - bytes_read);
3390 
3391 		last_cmd = !(bytes_read + read_size < inlen);
3392 
3393 		/* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
3394 		 * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
3395 		 * maximum size guarantees that it will fit within the 2 bytes.
3396 		 */
3397 		status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
3398 					    offset, (u16)read_size,
3399 					    data + bytes_read, last_cmd,
3400 					    read_shadow_ram);
3401 		if (status)
3402 			break;
3403 
3404 		bytes_read += read_size;
3405 		offset += read_size;
3406 	} while (!last_cmd);
3407 
3408 	*length = bytes_read;
3409 	return status;
3410 }
3411 
3412 /**
3413  * ixgbe_check_sr_access_params - verify params for Shadow RAM R/W operations.
3414  * @hw: pointer to the HW structure
3415  * @offset: offset in words from module start
3416  * @words: number of words to access
3417  *
3418  * Check if all the parameters are valid
3419  * before performing any Shadow RAM read/write operations.
3420  *
3421  * Return: the exit code of the operation.
3422  * * - IXGBE_SUCCESS - success.
3423  * * - IXGBE_ERR_PARAM - NVM error: offset beyond SR limit or
3424  * NVM error: tried to access more words then the set limit or
3425  * NVM error: cannot spread over two sectors.
3426  */
ixgbe_check_sr_access_params(struct ixgbe_hw * hw,u32 offset,u16 words)3427 static s32 ixgbe_check_sr_access_params(struct ixgbe_hw *hw, u32 offset,
3428 					u16 words)
3429 {
3430 	if ((offset + words) > hw->eeprom.word_size) {
3431 		return IXGBE_ERR_PARAM;
3432 	}
3433 
3434 	if (words > IXGBE_SR_SECTOR_SIZE_IN_WORDS) {
3435 		/* We can access only up to 4KB (one sector),
3436 		 * in one Admin Command write
3437 		 */
3438 		return IXGBE_ERR_PARAM;
3439 	}
3440 
3441 	if (((offset + (words - 1)) / IXGBE_SR_SECTOR_SIZE_IN_WORDS) !=
3442 	    (offset / IXGBE_SR_SECTOR_SIZE_IN_WORDS)) {
3443 		/* A single access cannot spread over two sectors */
3444 		return IXGBE_ERR_PARAM;
3445 	}
3446 
3447 	return IXGBE_SUCCESS;
3448 }
3449 
3450 /**
3451  * ixgbe_write_sr_word_aci - Writes Shadow RAM word
3452  * @hw: pointer to the HW structure
3453  * @offset: offset of the Shadow RAM word to write
3454  * @data: word to write to the Shadow RAM
3455  *
3456  * Writes a 16 bit word to the Shadow RAM using the admin command.
3457  * NVM ownership must be acquired before calling this function and released
3458  * by a caller. To commit SR to NVM update checksum function should be called.
3459  *
3460  * Return: the exit code of the operation.
3461  */
ixgbe_write_sr_word_aci(struct ixgbe_hw * hw,u32 offset,const u16 * data)3462 s32 ixgbe_write_sr_word_aci(struct ixgbe_hw *hw, u32 offset, const u16 *data)
3463 {
3464 	__le16 data_local = IXGBE_CPU_TO_LE16(*data);
3465 	s32 status;
3466 
3467 	status = ixgbe_check_sr_access_params(hw, offset, 1);
3468 	if (!status)
3469 		status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
3470 					      BYTES_PER_WORD, &data_local,
3471 					      false, 0);
3472 
3473 	return status;
3474 }
3475 
3476 /**
3477  * ixgbe_write_sr_buf_aci - Writes Shadow RAM buf
3478  * @hw: pointer to the HW structure
3479  * @offset: offset of the Shadow RAM buffer to write
3480  * @words: number of words to write
3481  * @data: words to write to the Shadow RAM
3482  *
3483  * Writes a 16 bit word to the Shadow RAM using the admin command.
3484  * NVM ownership must be acquired before calling this function and released
3485  * by a caller. To commit SR to NVM update checksum function should be called.
3486  *
3487  * Return: the exit code of the operation.
3488  */
ixgbe_write_sr_buf_aci(struct ixgbe_hw * hw,u32 offset,u16 words,const u16 * data)3489 s32 ixgbe_write_sr_buf_aci(struct ixgbe_hw *hw, u32 offset, u16 words,
3490 			   const u16 *data)
3491 {
3492 	__le16 *data_local;
3493 	s32 status;
3494 	void *vmem;
3495 	u32 i;
3496 
3497 	vmem = ixgbe_calloc(hw, words, sizeof(u16));
3498 	if (!vmem)
3499 		return IXGBE_ERR_OUT_OF_MEM;
3500 	data_local = (__le16 *)vmem;
3501 
3502 	for (i = 0; i < words; i++)
3503 		data_local[i] = IXGBE_CPU_TO_LE16(data[i]);
3504 
3505 	/* Here we will only write one buffer as the size of the modules
3506 	 * mirrored in the Shadow RAM is always less than 4K.
3507 	 */
3508 	status = ixgbe_check_sr_access_params(hw, offset, words);
3509 	if (!status)
3510 		status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
3511 					      BYTES_PER_WORD * words,
3512 					      data_local, false, 0);
3513 
3514 	ixgbe_free(hw, vmem);
3515 
3516 	return status;
3517 }
3518 
3519 /**
3520  * ixgbe_aci_alternate_write - write to alternate structure
3521  * @hw: pointer to the hardware structure
3522  * @reg_addr0: address of first dword to be written
3523  * @reg_val0: value to be written under 'reg_addr0'
3524  * @reg_addr1: address of second dword to be written
3525  * @reg_val1: value to be written under 'reg_addr1'
3526  *
3527  * Write one or two dwords to alternate structure using ACI command (0x0900).
3528  * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
3529  *
3530  * Return: 0 on success and error code on failure.
3531  */
ixgbe_aci_alternate_write(struct ixgbe_hw * hw,u32 reg_addr0,u32 reg_val0,u32 reg_addr1,u32 reg_val1)3532 s32 ixgbe_aci_alternate_write(struct ixgbe_hw *hw, u32 reg_addr0,
3533 			      u32 reg_val0, u32 reg_addr1, u32 reg_val1)
3534 {
3535 	struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
3536 	struct ixgbe_aci_desc desc;
3537 	s32 status;
3538 
3539 	cmd = &desc.params.read_write_alt_direct;
3540 
3541 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_alt_direct);
3542 	cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
3543 	cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
3544 	cmd->dword0_value = IXGBE_CPU_TO_LE32(reg_val0);
3545 	cmd->dword1_value = IXGBE_CPU_TO_LE32(reg_val1);
3546 
3547 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3548 
3549 	return status;
3550 }
3551 
3552 /**
3553  * ixgbe_aci_alternate_read - read from alternate structure
3554  * @hw: pointer to the hardware structure
3555  * @reg_addr0: address of first dword to be read
3556  * @reg_val0: pointer for data read from 'reg_addr0'
3557  * @reg_addr1: address of second dword to be read
3558  * @reg_val1: pointer for data read from 'reg_addr1'
3559  *
3560  * Read one or two dwords from alternate structure using ACI command (0x0902).
3561  * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
3562  * If 'reg_val1' pointer is not passed then only register at 'reg_addr0'
3563  * is read.
3564  *
3565  * Return: 0 on success and error code on failure.
3566  */
ixgbe_aci_alternate_read(struct ixgbe_hw * hw,u32 reg_addr0,u32 * reg_val0,u32 reg_addr1,u32 * reg_val1)3567 s32 ixgbe_aci_alternate_read(struct ixgbe_hw *hw, u32 reg_addr0,
3568 			     u32 *reg_val0, u32 reg_addr1, u32 *reg_val1)
3569 {
3570 	struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
3571 	struct ixgbe_aci_desc desc;
3572 	s32 status;
3573 
3574 	cmd = &desc.params.read_write_alt_direct;
3575 
3576 	if (!reg_val0)
3577 		return IXGBE_ERR_PARAM;
3578 
3579 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_alt_direct);
3580 	cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
3581 	cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
3582 
3583 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3584 
3585 	if (status == IXGBE_SUCCESS) {
3586 		*reg_val0 = IXGBE_LE32_TO_CPU(cmd->dword0_value);
3587 
3588 		if (reg_val1)
3589 			*reg_val1 = IXGBE_LE32_TO_CPU(cmd->dword1_value);
3590 	}
3591 
3592 	return status;
3593 }
3594 
3595 /**
3596  * ixgbe_aci_alternate_write_done - check if writing to alternate structure
3597  * is done
3598  * @hw: pointer to the HW structure.
3599  * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
3600  * @reset_needed: indicates the SW should trigger GLOBAL reset
3601  *
3602  * Indicates to the FW that alternate structures have been changed.
3603  *
3604  * Return: 0 on success and error code on failure.
3605  */
ixgbe_aci_alternate_write_done(struct ixgbe_hw * hw,u8 bios_mode,bool * reset_needed)3606 s32 ixgbe_aci_alternate_write_done(struct ixgbe_hw *hw, u8 bios_mode,
3607 				   bool *reset_needed)
3608 {
3609 	struct ixgbe_aci_cmd_done_alt_write *cmd;
3610 	struct ixgbe_aci_desc desc;
3611 	s32 status;
3612 
3613 	cmd = &desc.params.done_alt_write;
3614 
3615 	if (!reset_needed)
3616 		return IXGBE_ERR_PARAM;
3617 
3618 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_done_alt_write);
3619 	cmd->flags = bios_mode;
3620 
3621 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3622 	if (!status)
3623 		*reset_needed = (IXGBE_LE16_TO_CPU(cmd->flags) &
3624 				 IXGBE_ACI_RESP_RESET_NEEDED) != 0;
3625 
3626 	return status;
3627 }
3628 
3629 /**
3630  * ixgbe_aci_alternate_clear - clear alternate structure
3631  * @hw: pointer to the HW structure.
3632  *
3633  * Clear the alternate structures of the port from which the function
3634  * is called.
3635  *
3636  * Return: 0 on success and error code on failure.
3637  */
ixgbe_aci_alternate_clear(struct ixgbe_hw * hw)3638 s32 ixgbe_aci_alternate_clear(struct ixgbe_hw *hw)
3639 {
3640 	struct ixgbe_aci_desc desc;
3641 	s32 status;
3642 
3643 	ixgbe_fill_dflt_direct_cmd_desc(&desc,
3644 					ixgbe_aci_opc_clear_port_alt_write);
3645 
3646 	status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3647 
3648 	return status;
3649 }
3650 
3651 /**
3652  * ixgbe_aci_get_internal_data - get internal FW/HW data
3653  * @hw: pointer to the hardware structure
3654  * @cluster_id: specific cluster to dump
3655  * @table_id: table ID within cluster
3656  * @start: index of line in the block to read
3657  * @buf: dump buffer
3658  * @buf_size: dump buffer size
3659  * @ret_buf_size: return buffer size (returned by FW)
3660  * @ret_next_cluster: next cluster to read (returned by FW)
3661  * @ret_next_table: next block to read (returned by FW)
3662  * @ret_next_index: next index to read (returned by FW)
3663  *
3664  * Get internal FW/HW data using ACI command (0xFF08) for debug purposes.
3665  *
3666  * Return: the exit code of the operation.
3667  */
ixgbe_aci_get_internal_data(struct ixgbe_hw * hw,u16 cluster_id,u16 table_id,u32 start,void * buf,u16 buf_size,u16 * ret_buf_size,u16 * ret_next_cluster,u16 * ret_next_table,u32 * ret_next_index)3668 s32 ixgbe_aci_get_internal_data(struct ixgbe_hw *hw, u16 cluster_id,
3669 				u16 table_id, u32 start, void *buf,
3670 				u16 buf_size, u16 *ret_buf_size,
3671 				u16 *ret_next_cluster, u16 *ret_next_table,
3672 				u32 *ret_next_index)
3673 {
3674 	struct ixgbe_aci_cmd_debug_dump_internals *cmd;
3675 	struct ixgbe_aci_desc desc;
3676 	s32 status;
3677 
3678 	cmd = &desc.params.debug_dump;
3679 
3680 	if (buf_size == 0 || !buf)
3681 		return IXGBE_ERR_PARAM;
3682 
3683 	ixgbe_fill_dflt_direct_cmd_desc(&desc,
3684 					ixgbe_aci_opc_debug_dump_internals);
3685 
3686 	cmd->cluster_id = IXGBE_CPU_TO_LE16(cluster_id);
3687 	cmd->table_id = IXGBE_CPU_TO_LE16(table_id);
3688 	cmd->idx = IXGBE_CPU_TO_LE32(start);
3689 
3690 	status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
3691 
3692 	if (!status) {
3693 		if (ret_buf_size)
3694 			*ret_buf_size = IXGBE_LE16_TO_CPU(desc.datalen);
3695 		if (ret_next_cluster)
3696 			*ret_next_cluster = IXGBE_LE16_TO_CPU(cmd->cluster_id);
3697 		if (ret_next_table)
3698 			*ret_next_table = IXGBE_LE16_TO_CPU(cmd->table_id);
3699 		if (ret_next_index)
3700 			*ret_next_index = IXGBE_LE32_TO_CPU(cmd->idx);
3701 	}
3702 
3703 	return status;
3704 }
3705 
3706 /**
3707  * ixgbe_validate_nvm_rw_reg - Check that an NVM access request is valid
3708  * @cmd: NVM access command structure
3709  *
3710  * Validates that an NVM access structure is request to read or write a valid
3711  * register offset. First validates that the module and flags are correct, and
3712  * then ensures that the register offset is one of the accepted registers.
3713  *
3714  * Return: 0 if the register access is valid, out of range error code otherwise.
3715  */
3716 static s32
ixgbe_validate_nvm_rw_reg(struct ixgbe_nvm_access_cmd * cmd)3717 ixgbe_validate_nvm_rw_reg(struct ixgbe_nvm_access_cmd *cmd)
3718 {
3719 	u16 i;
3720 
3721 	switch (cmd->offset) {
3722 	case GL_HICR:
3723 	case GL_HICR_EN: /* Note, this register is read only */
3724 	case GL_FWSTS:
3725 	case GL_MNG_FWSM:
3726 	case GLNVM_GENS:
3727 	case GLNVM_FLA:
3728 	case GL_FWRESETCNT:
3729 		return 0;
3730 	default:
3731 		break;
3732 	}
3733 
3734 	for (i = 0; i <= GL_HIDA_MAX_INDEX; i++)
3735 		if (cmd->offset == (u32)GL_HIDA(i))
3736 			return 0;
3737 
3738 	for (i = 0; i <= GL_HIBA_MAX_INDEX; i++)
3739 		if (cmd->offset == (u32)GL_HIBA(i))
3740 			return 0;
3741 
3742 	/* All other register offsets are not valid */
3743 	return IXGBE_ERR_OUT_OF_RANGE;
3744 }
3745 
3746 /**
3747  * ixgbe_nvm_access_read - Handle an NVM read request
3748  * @hw: pointer to the HW struct
3749  * @cmd: NVM access command to process
3750  * @data: storage for the register value read
3751  *
3752  * Process an NVM access request to read a register.
3753  *
3754  * Return: 0 if the register read is valid and successful,
3755  * out of range error code otherwise.
3756  */
ixgbe_nvm_access_read(struct ixgbe_hw * hw,struct ixgbe_nvm_access_cmd * cmd,struct ixgbe_nvm_access_data * data)3757 static s32 ixgbe_nvm_access_read(struct ixgbe_hw *hw,
3758 			struct ixgbe_nvm_access_cmd *cmd,
3759 			struct ixgbe_nvm_access_data *data)
3760 {
3761 	s32 status;
3762 
3763 	/* Always initialize the output data, even on failure */
3764 	memset(&data->regval, 0, cmd->data_size);
3765 
3766 	/* Make sure this is a valid read/write access request */
3767 	status = ixgbe_validate_nvm_rw_reg(cmd);
3768 	if (status)
3769 		return status;
3770 
3771 	DEBUGOUT1("NVM access: reading register %08x\n", cmd->offset);
3772 
3773 	/* Read the register and store the contents in the data field */
3774 	data->regval = IXGBE_READ_REG(hw, cmd->offset);
3775 
3776 	return 0;
3777 }
3778 
3779 /**
3780  * ixgbe_nvm_access_write - Handle an NVM write request
3781  * @hw: pointer to the HW struct
3782  * @cmd: NVM access command to process
3783  * @data: NVM access data to write
3784  *
3785  * Process an NVM access request to write a register.
3786  *
3787  * Return: 0 if the register write is valid and successful,
3788  * out of range error code otherwise.
3789  */
ixgbe_nvm_access_write(struct ixgbe_hw * hw,struct ixgbe_nvm_access_cmd * cmd,struct ixgbe_nvm_access_data * data)3790 static s32 ixgbe_nvm_access_write(struct ixgbe_hw *hw,
3791 			struct ixgbe_nvm_access_cmd *cmd,
3792 			struct ixgbe_nvm_access_data *data)
3793 {
3794 	s32 status;
3795 
3796 	/* Make sure this is a valid read/write access request */
3797 	status = ixgbe_validate_nvm_rw_reg(cmd);
3798 	if (status)
3799 		return status;
3800 
3801 	/* Reject requests to write to read-only registers */
3802 	switch (cmd->offset) {
3803 	case GL_HICR_EN:
3804 		return IXGBE_ERR_OUT_OF_RANGE;
3805 	default:
3806 		break;
3807 	}
3808 
3809 	DEBUGOUT2("NVM access: writing register %08x with value %08x\n",
3810 		cmd->offset, data->regval);
3811 
3812 	/* Write the data field to the specified register */
3813 	IXGBE_WRITE_REG(hw, cmd->offset, data->regval);
3814 
3815 	return 0;
3816 }
3817 
3818 /**
3819  * ixgbe_handle_nvm_access - Handle an NVM access request
3820  * @hw: pointer to the HW struct
3821  * @cmd: NVM access command info
3822  * @data: pointer to read or return data
3823  *
3824  * Process an NVM access request. Read the command structure information and
3825  * determine if it is valid. If not, report an error indicating the command
3826  * was invalid.
3827  *
3828  * For valid commands, perform the necessary function, copying the data into
3829  * the provided data buffer.
3830  *
3831  * Return: 0 if the nvm access request is valid and successful,
3832  * error code otherwise.
3833  */
ixgbe_handle_nvm_access(struct ixgbe_hw * hw,struct ixgbe_nvm_access_cmd * cmd,struct ixgbe_nvm_access_data * data)3834 s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
3835 			struct ixgbe_nvm_access_cmd *cmd,
3836 			struct ixgbe_nvm_access_data *data)
3837 {
3838 	switch (cmd->command) {
3839 	case IXGBE_NVM_CMD_READ:
3840 		return ixgbe_nvm_access_read(hw, cmd, data);
3841 	case IXGBE_NVM_CMD_WRITE:
3842 		return ixgbe_nvm_access_write(hw, cmd, data);
3843 	default:
3844 		return IXGBE_ERR_PARAM;
3845 	}
3846 }
3847 
3848 /**
3849  * ixgbe_aci_set_health_status_config - Configure FW health events
3850  * @hw: pointer to the HW struct
3851  * @event_source: type of diagnostic events to enable
3852  *
3853  * Configure the health status event types that the firmware will send to this
3854  * PF using ACI command (0xFF20). The supported event types are: PF-specific,
3855  * all PFs, and global.
3856  *
3857  * Return: the exit code of the operation.
3858  */
ixgbe_aci_set_health_status_config(struct ixgbe_hw * hw,u8 event_source)3859 s32 ixgbe_aci_set_health_status_config(struct ixgbe_hw *hw, u8 event_source)
3860 {
3861 	struct ixgbe_aci_cmd_set_health_status_config *cmd;
3862 	struct ixgbe_aci_desc desc;
3863 
3864 	cmd = &desc.params.set_health_status_config;
3865 
3866 	ixgbe_fill_dflt_direct_cmd_desc(&desc,
3867 				      ixgbe_aci_opc_set_health_status_config);
3868 
3869 	cmd->event_source = event_source;
3870 
3871 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3872 }
3873 
3874 /**
3875  * ixgbe_init_ops_E610 - Inits func ptrs and MAC type
3876  * @hw: pointer to hardware structure
3877  *
3878  * Initialize the function pointers and assign the MAC type for E610.
3879  * Does not touch the hardware.
3880  *
3881  * Return: the exit code of the operation.
3882  */
ixgbe_init_ops_E610(struct ixgbe_hw * hw)3883 s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw)
3884 {
3885 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
3886 	struct ixgbe_mac_info *mac = &hw->mac;
3887 	struct ixgbe_phy_info *phy = &hw->phy;
3888 	s32 ret_val;
3889 
3890 	ret_val = ixgbe_init_ops_X550(hw);
3891 
3892 	/* MAC */
3893 	mac->ops.reset_hw = ixgbe_reset_hw_E610;
3894 	mac->ops.start_hw = ixgbe_start_hw_E610;
3895 	mac->ops.get_media_type = ixgbe_get_media_type_E610;
3896 	mac->ops.get_supported_physical_layer =
3897 		ixgbe_get_supported_physical_layer_E610;
3898 	mac->ops.get_san_mac_addr = NULL;
3899 	mac->ops.set_san_mac_addr = NULL;
3900 	mac->ops.get_wwn_prefix = NULL;
3901 	mac->ops.setup_link = ixgbe_setup_link_E610;
3902 	mac->ops.check_link = ixgbe_check_link_E610;
3903 	mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_E610;
3904 	mac->ops.setup_fc = ixgbe_setup_fc_E610;
3905 	mac->ops.fc_autoneg = ixgbe_fc_autoneg_E610;
3906 	mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_E610;
3907 	mac->ops.disable_rx = ixgbe_disable_rx_E610;
3908 	mac->ops.setup_eee = ixgbe_setup_eee_E610;
3909 	mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_E610;
3910 	mac->ops.fw_rollback_mode = ixgbe_fw_rollback_mode_E610;
3911 	mac->ops.get_fw_tsam_mode = ixgbe_get_fw_tsam_mode_E610;
3912 	mac->ops.get_fw_version = ixgbe_aci_get_fw_ver;
3913 	mac->ops.get_nvm_version = ixgbe_get_active_nvm_ver;
3914        mac->ops.get_thermal_sensor_data = NULL;
3915        mac->ops.init_thermal_sensor_thresh = NULL;
3916 
3917 	/* PHY */
3918 	phy->ops.init = ixgbe_init_phy_ops_E610;
3919 	phy->ops.identify = ixgbe_identify_phy_E610;
3920 	phy->eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
3921 				    IXGBE_LINK_SPEED_100_FULL |
3922 				    IXGBE_LINK_SPEED_1GB_FULL;
3923 	phy->eee_speeds_advertised = phy->eee_speeds_supported;
3924 
3925 	/* Additional ops overrides for e610 to go here */
3926 	eeprom->ops.init_params = ixgbe_init_eeprom_params_E610;
3927 	eeprom->ops.read = ixgbe_read_ee_aci_E610;
3928 	eeprom->ops.read_buffer = ixgbe_read_ee_aci_buffer_E610;
3929 	eeprom->ops.write = ixgbe_write_ee_aci_E610;
3930 	eeprom->ops.write_buffer = ixgbe_write_ee_aci_buffer_E610;
3931 	eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_E610;
3932 	eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_E610;
3933 	eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_E610;
3934 	eeprom->ops.read_pba_string = ixgbe_read_pba_string_E610;
3935 
3936 	/* Initialize bus function number */
3937 	hw->mac.ops.set_lan_id(hw);
3938 
3939 	return ret_val;
3940 }
3941 
3942 /**
3943  * ixgbe_reset_hw_E610 - Perform hardware reset
3944  * @hw: pointer to hardware structure
3945  *
3946  * Resets the hardware by resetting the transmit and receive units, masks
3947  * and clears all interrupts, and perform a reset.
3948  *
3949  * Return: the exit code of the operation.
3950  */
ixgbe_reset_hw_E610(struct ixgbe_hw * hw)3951 s32 ixgbe_reset_hw_E610(struct ixgbe_hw *hw)
3952 {
3953 	u32 swfw_mask = hw->phy.phy_semaphore_mask;
3954 	u32 ctrl, i;
3955 	s32 status;
3956 
3957 	DEBUGFUNC("ixgbe_reset_hw_E610");
3958 
3959 	/* Call adapter stop to disable tx/rx and clear interrupts */
3960 	status = hw->mac.ops.stop_adapter(hw);
3961 	if (status != IXGBE_SUCCESS)
3962 		goto reset_hw_out;
3963 
3964 	/* flush pending Tx transactions */
3965 	ixgbe_clear_tx_pending(hw);
3966 
3967 	status = hw->phy.ops.init(hw);
3968 	if (status != IXGBE_SUCCESS)
3969 		DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
3970 			  status);
3971 mac_reset_top:
3972 	status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
3973 	if (status != IXGBE_SUCCESS) {
3974 		ERROR_REPORT2(IXGBE_ERROR_CAUTION,
3975 			      "semaphore failed with %d", status);
3976 		return IXGBE_ERR_SWFW_SYNC;
3977 	}
3978 	ctrl = IXGBE_CTRL_RST;
3979 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
3980 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
3981 	IXGBE_WRITE_FLUSH(hw);
3982 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
3983 
3984 	/* Poll for reset bit to self-clear indicating reset is complete */
3985 	for (i = 0; i < 10; i++) {
3986 		usec_delay(1);
3987 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
3988 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
3989 			break;
3990 	}
3991 
3992 	if (ctrl & IXGBE_CTRL_RST_MASK) {
3993 		status = IXGBE_ERR_RESET_FAILED;
3994 		ERROR_REPORT1(IXGBE_ERROR_POLLING,
3995 			      "Reset polling failed to complete.\n");
3996 	}
3997 	msec_delay(100);
3998 
3999 	/*
4000 	 * Double resets are required for recovery from certain error
4001 	 * conditions.  Between resets, it is necessary to stall to allow time
4002 	 * for any pending HW events to complete.
4003 	 */
4004 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
4005 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
4006 		goto mac_reset_top;
4007 	}
4008 
4009 	/* Set the Rx packet buffer size. */
4010 	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
4011 
4012 	/* Store the permanent mac address */
4013 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
4014 
4015 	/*
4016 	 * Store MAC address from RAR0, clear receive address registers, and
4017 	 * clear the multicast table.  Also reset num_rar_entries to 128,
4018 	 * since we modify this value when programming the SAN MAC address.
4019 	 */
4020 	hw->mac.num_rar_entries = 128;
4021 	hw->mac.ops.init_rx_addrs(hw);
4022 
4023 reset_hw_out:
4024 	return status;
4025 }
4026 
4027 /**
4028  * ixgbe_start_hw_E610 - Prepare hardware for Tx/Rx
4029  * @hw: pointer to hardware structure
4030  *
4031  * Gets firmware version and if API version matches it
4032  * starts the hardware using the generic start_hw function
4033  * and the generation start_hw function.
4034  * Then performs revision-specific operations, if any.
4035  **/
ixgbe_start_hw_E610(struct ixgbe_hw * hw)4036 s32 ixgbe_start_hw_E610(struct ixgbe_hw *hw)
4037 {
4038 	s32 ret_val = IXGBE_SUCCESS;
4039 
4040 	ret_val = hw->mac.ops.get_fw_version(hw);
4041 	if (ret_val)
4042 		goto out;
4043 
4044 	ret_val = ixgbe_start_hw_generic(hw);
4045 	if (ret_val != IXGBE_SUCCESS)
4046 		goto out;
4047 
4048 	ixgbe_start_hw_gen2(hw);
4049 
4050 out:
4051 	return ret_val;
4052 }
4053 
4054 /**
4055  * ixgbe_get_media_type_E610 - Gets media type
4056  * @hw: pointer to the HW struct
4057  *
4058  * In order to get the media type, the function gets PHY
4059  * capabilities and later on use them to identify the PHY type
4060  * checking phy_type_high and phy_type_low.
4061  *
4062  * Return: the type of media in form of ixgbe_media_type enum
4063  * or ixgbe_media_type_unknown in case of an error.
4064  */
ixgbe_get_media_type_E610(struct ixgbe_hw * hw)4065 enum ixgbe_media_type ixgbe_get_media_type_E610(struct ixgbe_hw *hw)
4066 {
4067 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
4068 	u64 phy_mask = 0;
4069 	s32 rc;
4070 	u8 i;
4071 
4072 	rc = ixgbe_update_link_info(hw);
4073 	if (rc) {
4074 		return ixgbe_media_type_unknown;
4075 	}
4076 
4077 	/* If there is no link but PHY (dongle) is available SW should use
4078 	 * Get PHY Caps admin command instead of Get Link Status, find most
4079 	 * significant bit that is set in PHY types reported by the command
4080 	 * and use it to discover media type.
4081 	 */
4082 	if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
4083 	    (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
4084 		/* Get PHY Capabilities */
4085 		rc = ixgbe_aci_get_phy_caps(hw, false,
4086 					    IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
4087 					    &pcaps);
4088 		if (rc) {
4089 			return ixgbe_media_type_unknown;
4090 		}
4091 
4092 		/* Check if there is some bit set in phy_type_high */
4093 		for (i = 64; i > 0; i--) {
4094 			phy_mask = (u64)((u64)1 << (i - 1));
4095 			if ((pcaps.phy_type_high & phy_mask) != 0) {
4096 				/* If any bit is set treat it as PHY type */
4097 				hw->link.link_info.phy_type_high = phy_mask;
4098 				hw->link.link_info.phy_type_low = 0;
4099 				break;
4100 			}
4101 			phy_mask = 0;
4102 		}
4103 
4104 		/* If nothing found in phy_type_high search in phy_type_low */
4105 		if (phy_mask == 0) {
4106 			for (i = 64; i > 0; i--) {
4107 				phy_mask = (u64)((u64)1 << (i - 1));
4108 				if ((pcaps.phy_type_low & phy_mask) != 0) {
4109 					/* If any bit is set treat it as PHY type */
4110 					hw->link.link_info.phy_type_high = 0;
4111 					hw->link.link_info.phy_type_low = phy_mask;
4112 					break;
4113 				}
4114 			}
4115 		}
4116 
4117 	}
4118 
4119 	/* Based on link status or search above try to discover media type */
4120 	hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
4121 
4122 	return hw->phy.media_type;
4123 }
4124 
4125 /**
4126  * ixgbe_get_supported_physical_layer_E610 - Returns physical layer type
4127  * @hw: pointer to hardware structure
4128  *
4129  * Determines physical layer capabilities of the current configuration.
4130  *
4131  * Return: the exit code of the operation.
4132  **/
ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw * hw)4133 u64 ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw *hw)
4134 {
4135 	u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
4136 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
4137 	u64 phy_type;
4138 	s32 rc;
4139 
4140 	rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
4141 				    &pcaps);
4142 	if (rc)
4143 		return IXGBE_PHYSICAL_LAYER_UNKNOWN;
4144 
4145 	phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_low);
4146 	if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_T)
4147 		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
4148 	if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_T)
4149 		physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
4150 	if(phy_type & IXGBE_PHY_TYPE_LOW_100BASE_TX)
4151 		physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
4152 	if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_LR)
4153 		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_LR;
4154 	if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_SR)
4155 		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_SR;
4156 	if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
4157 		physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
4158 	if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
4159 		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
4160 	if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_SX)
4161 		physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_SX;
4162 	if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_KX)
4163 		physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_KX;
4164 	if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_T)
4165 		physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_T;
4166 	if(phy_type & IXGBE_PHY_TYPE_LOW_5GBASE_T)
4167 		physical_layer |= IXGBE_PHYSICAL_LAYER_5000BASE_T;
4168 
4169 	phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_high);
4170 	if(phy_type & IXGBE_PHY_TYPE_HIGH_10BASE_T)
4171 		physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
4172 
4173 	return physical_layer;
4174 }
4175 
4176 /**
4177  * ixgbe_setup_link_E610 - Set up link
4178  * @hw: pointer to hardware structure
4179  * @speed: new link speed
4180  * @autoneg_wait: true when waiting for completion is needed
4181  *
4182  * Set up the link with the specified speed.
4183  *
4184  * Return: the exit code of the operation.
4185  */
ixgbe_setup_link_E610(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)4186 s32 ixgbe_setup_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
4187 			  bool autoneg_wait)
4188 {
4189 	/* Simply request FW to perform proper PHY setup */
4190 	return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
4191 }
4192 
4193 /**
4194  * ixgbe_check_link_E610 - Determine link and speed status
4195  * @hw: pointer to hardware structure
4196  * @speed: pointer to link speed
4197  * @link_up: true when link is up
4198  * @link_up_wait_to_complete: bool used to wait for link up or not
4199  *
4200  * Determine if the link is up and the current link speed
4201  * using ACI command (0x0607).
4202  *
4203  * Return: the exit code of the operation.
4204  */
ixgbe_check_link_E610(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)4205 s32 ixgbe_check_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4206 			  bool *link_up, bool link_up_wait_to_complete)
4207 {
4208 	s32 rc;
4209 	u32 i;
4210 
4211 	if (!speed || !link_up)
4212 		return IXGBE_ERR_PARAM;
4213 
4214 	/* Set get_link_info flag to ensure that fresh
4215 	 * link information will be obtained from FW
4216 	 * by sending Get Link Status admin command. */
4217 	hw->link.get_link_info = true;
4218 
4219 	/* Update link information in adapter context. */
4220 	rc = ixgbe_get_link_status(hw, link_up);
4221 	if (rc)
4222 		return rc;
4223 
4224 	/* Wait for link up if it was requested. */
4225 	if (link_up_wait_to_complete && *link_up == false) {
4226 		for (i = 0; i < hw->mac.max_link_up_time; i++) {
4227 			msec_delay(100);
4228 			hw->link.get_link_info = true;
4229 			rc = ixgbe_get_link_status(hw, link_up);
4230 			if (rc)
4231 				return rc;
4232 			if (*link_up)
4233 				break;
4234 		}
4235 	}
4236 
4237 	/* Use link information in adapter context updated by the call
4238 	 * to ixgbe_get_link_status() to determine current link speed.
4239 	 * Link speed information is valid only when link up was
4240 	 * reported by FW. */
4241 	if (*link_up) {
4242 		switch (hw->link.link_info.link_speed) {
4243 		case IXGBE_ACI_LINK_SPEED_10MB:
4244 			*speed = IXGBE_LINK_SPEED_10_FULL;
4245 			break;
4246 		case IXGBE_ACI_LINK_SPEED_100MB:
4247 			*speed = IXGBE_LINK_SPEED_100_FULL;
4248 			break;
4249 		case IXGBE_ACI_LINK_SPEED_1000MB:
4250 			*speed = IXGBE_LINK_SPEED_1GB_FULL;
4251 			break;
4252 		case IXGBE_ACI_LINK_SPEED_2500MB:
4253 			*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4254 			break;
4255 		case IXGBE_ACI_LINK_SPEED_5GB:
4256 			*speed = IXGBE_LINK_SPEED_5GB_FULL;
4257 			break;
4258 		case IXGBE_ACI_LINK_SPEED_10GB:
4259 			*speed = IXGBE_LINK_SPEED_10GB_FULL;
4260 			break;
4261 		default:
4262 			*speed = IXGBE_LINK_SPEED_UNKNOWN;
4263 			break;
4264 		}
4265 	} else {
4266 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
4267 	}
4268 
4269 	return IXGBE_SUCCESS;
4270 }
4271 
4272 /**
4273  * ixgbe_get_link_capabilities_E610 - Determine link capabilities
4274  * @hw: pointer to hardware structure
4275  * @speed: pointer to link speed
4276  * @autoneg: true when autoneg or autotry is enabled
4277  *
4278  * Determine speed and AN parameters of a link.
4279  *
4280  * Return: the exit code of the operation.
4281  */
ixgbe_get_link_capabilities_E610(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)4282 s32 ixgbe_get_link_capabilities_E610(struct ixgbe_hw *hw,
4283 				     ixgbe_link_speed *speed,
4284 				     bool *autoneg)
4285 {
4286 	if (!speed || !autoneg)
4287 		return IXGBE_ERR_PARAM;
4288 
4289 	*autoneg = true;
4290 	*speed = hw->phy.speeds_supported;
4291 
4292 	return IXGBE_SUCCESS;
4293 }
4294 
4295 /**
4296  * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
4297  * @hw: pointer to hardware structure
4298  * @cfg: PHY configuration data to set FC mode
4299  * @req_mode: FC mode to configure
4300  *
4301  * Configures PHY Flow Control according to the provided configuration.
4302  *
4303  * Return: the exit code of the operation.
4304  */
ixgbe_cfg_phy_fc(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_set_phy_cfg_data * cfg,enum ixgbe_fc_mode req_mode)4305 s32 ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
4306 		     struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
4307 		     enum ixgbe_fc_mode req_mode)
4308 {
4309 	struct ixgbe_aci_cmd_get_phy_caps_data* pcaps = NULL;
4310 	s32 status = IXGBE_SUCCESS;
4311 	u8 pause_mask = 0x0;
4312 
4313 	if (!cfg)
4314 		return IXGBE_ERR_PARAM;
4315 
4316 	switch (req_mode) {
4317 	case ixgbe_fc_auto:
4318 	{
4319 		pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
4320 			ixgbe_malloc(hw, sizeof(*pcaps));
4321 		if (!pcaps) {
4322 			status = IXGBE_ERR_OUT_OF_MEM;
4323 			goto out;
4324 		}
4325 
4326 		/* Query the value of FC that both the NIC and the attached
4327 		 * media can do. */
4328 		status = ixgbe_aci_get_phy_caps(hw, false,
4329 			IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, pcaps);
4330 		if (status)
4331 			goto out;
4332 
4333 		pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
4334 		pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
4335 
4336 		break;
4337 	}
4338 	case ixgbe_fc_full:
4339 		pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
4340 		pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
4341 		break;
4342 	case ixgbe_fc_rx_pause:
4343 		pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
4344 		break;
4345 	case ixgbe_fc_tx_pause:
4346 		pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
4347 		break;
4348 	default:
4349 		break;
4350 	}
4351 
4352 	/* clear the old pause settings */
4353 	cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
4354 		IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
4355 
4356 	/* set the new capabilities */
4357 	cfg->caps |= pause_mask;
4358 
4359 out:
4360 	if (pcaps)
4361 		ixgbe_free(hw, pcaps);
4362 	return status;
4363 }
4364 
4365 /**
4366  * ixgbe_setup_fc_E610 - Set up flow control
4367  * @hw: pointer to hardware structure
4368  *
4369  * Set up flow control. This has to be done during init time.
4370  *
4371  * Return: the exit code of the operation.
4372  */
ixgbe_setup_fc_E610(struct ixgbe_hw * hw)4373 s32 ixgbe_setup_fc_E610(struct ixgbe_hw *hw)
4374 {
4375 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps = { 0 };
4376 	struct ixgbe_aci_cmd_set_phy_cfg_data cfg = { 0 };
4377 	s32 status;
4378 
4379 	/* Get the current PHY config */
4380 	status = ixgbe_aci_get_phy_caps(hw, false,
4381 		IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
4382 	if (status)
4383 		return status;
4384 
4385 	ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
4386 
4387 	/* Configure the set PHY data */
4388 	status = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
4389 	if (status)
4390 		return status;
4391 
4392 	/* If the capabilities have changed, then set the new config */
4393 	if (cfg.caps != pcaps.caps) {
4394 		cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
4395 
4396 		status = ixgbe_aci_set_phy_cfg(hw, &cfg);
4397 		if (status)
4398 			return status;
4399 	}
4400 
4401 	return status;
4402 }
4403 
4404 /**
4405  * ixgbe_fc_autoneg_E610 - Configure flow control
4406  * @hw: pointer to hardware structure
4407  *
4408  * Configure Flow Control.
4409  */
ixgbe_fc_autoneg_E610(struct ixgbe_hw * hw)4410 void ixgbe_fc_autoneg_E610(struct ixgbe_hw *hw)
4411 {
4412 	s32 status;
4413 
4414 	/* Get current link status.
4415 	 * Current FC mode will be stored in the hw context. */
4416 	status = ixgbe_aci_get_link_info(hw, false, NULL);
4417 	if (status) {
4418 		goto out;
4419 	}
4420 
4421 	/* Check if the link is up */
4422 	if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)) {
4423 		status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4424 		goto out;
4425 	}
4426 
4427 	/* Check if auto-negotiation has completed */
4428 	if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED)) {
4429 		status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4430 		goto out;
4431 	}
4432 
4433 out:
4434 	if (status == IXGBE_SUCCESS) {
4435 		hw->fc.fc_was_autonegged = true;
4436 	} else {
4437 		hw->fc.fc_was_autonegged = false;
4438 		hw->fc.current_mode = hw->fc.requested_mode;
4439 	}
4440 }
4441 
4442 /**
4443  * ixgbe_set_fw_drv_ver_E610 - Send driver version to FW
4444  * @hw: pointer to the HW structure
4445  * @maj: driver version major number
4446  * @minor: driver version minor number
4447  * @build: driver version build number
4448  * @sub: driver version sub build number
4449  * @len: length of driver_ver string
4450  * @driver_ver: driver string
4451  *
4452  * Send driver version number to Firmware using ACI command (0x0002).
4453  *
4454  * Return: the exit code of the operation.
4455  * IXGBE_SUCCESS - OK
4456  * IXGBE_ERR_PARAM - incorrect parameters were given
4457  * IXGBE_ERR_ACI_ERROR - encountered an error during sending the command
4458  * IXGBE_ERR_ACI_TIMEOUT - a timeout occurred
4459  * IXGBE_ERR_OUT_OF_MEM - ran out of memory
4460  */
ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw * hw,u8 maj,u8 minor,u8 build,u8 sub,u16 len,const char * driver_ver)4461 s32 ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw *hw, u8 maj, u8 minor, u8 build,
4462 			      u8 sub, u16 len, const char *driver_ver)
4463 {
4464 	size_t limited_len = min(len, (u16)IXGBE_DRV_VER_STR_LEN_E610);
4465 	struct ixgbe_driver_ver dv;
4466 
4467 	DEBUGFUNC("ixgbe_set_fw_drv_ver_E610");
4468 
4469 	if (!len || !driver_ver)
4470 		return IXGBE_ERR_PARAM;
4471 
4472 	dv.major_ver = maj;
4473 	dv.minor_ver = minor;
4474 	dv.build_ver = build;
4475 	dv.subbuild_ver = sub;
4476 
4477 	memset(dv.driver_string, 0, IXGBE_DRV_VER_STR_LEN_E610);
4478 	memcpy(dv.driver_string, driver_ver, limited_len);
4479 
4480 	return ixgbe_aci_send_driver_ver(hw, &dv);
4481 }
4482 
4483 /**
4484  * ixgbe_disable_rx_E610 - Disable RX unit
4485  * @hw: pointer to hardware structure
4486  *
4487  * Disable RX DMA unit on E610 with use of ACI command (0x000C).
4488  *
4489  * Return: the exit code of the operation.
4490  */
ixgbe_disable_rx_E610(struct ixgbe_hw * hw)4491 void ixgbe_disable_rx_E610(struct ixgbe_hw *hw)
4492 {
4493 	u32 rxctrl;
4494 
4495 	DEBUGFUNC("ixgbe_disable_rx_E610");
4496 
4497 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4498 	if (rxctrl & IXGBE_RXCTRL_RXEN) {
4499 		u32 pfdtxgswc;
4500 		s32 status;
4501 
4502 		pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4503 		if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
4504 			pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
4505 			IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4506 			hw->mac.set_lben = true;
4507 		} else {
4508 			hw->mac.set_lben = false;
4509 		}
4510 
4511 		status = ixgbe_aci_disable_rxen(hw);
4512 
4513 		/* If we fail - disable RX using register write */
4514 		if (status) {
4515 			rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4516 			if (rxctrl & IXGBE_RXCTRL_RXEN) {
4517 				rxctrl &= ~IXGBE_RXCTRL_RXEN;
4518 				IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
4519 			}
4520 		}
4521 	}
4522 }
4523 
4524 /**
4525  * ixgbe_setup_eee_E610 - Enable/disable EEE support
4526  * @hw: pointer to the HW structure
4527  * @enable_eee: boolean flag to enable EEE
4528  *
4529  * Enables/disable EEE based on enable_eee flag.
4530  *
4531  * Return: the exit code of the operation.
4532  */
ixgbe_setup_eee_E610(struct ixgbe_hw * hw,bool enable_eee)4533 s32 ixgbe_setup_eee_E610(struct ixgbe_hw *hw, bool enable_eee)
4534 {
4535 	struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
4536 	struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
4537 	u16 eee_cap = 0;
4538 	s32 status;
4539 
4540 	status = ixgbe_aci_get_phy_caps(hw, false,
4541 		IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
4542 	if (status != IXGBE_SUCCESS)
4543 		return status;
4544 
4545 	ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
4546 
4547 	phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
4548 	phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
4549 
4550 	if (enable_eee) {
4551 		if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX)
4552 			eee_cap |= IXGBE_ACI_PHY_EEE_EN_100BASE_TX;
4553 		if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T)
4554 			eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_T;
4555 		if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
4556 			eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_KX;
4557 		if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T)
4558 			eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_T;
4559 		if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
4560 			eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_KR;
4561 		if (phy_caps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T)
4562 			eee_cap |= IXGBE_ACI_PHY_EEE_EN_10BASE_T;
4563 	}
4564 
4565 	/* Set EEE capability for particular PHY types */
4566 	phy_cfg.eee_cap = IXGBE_CPU_TO_LE16(eee_cap);
4567 
4568 	status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
4569 
4570 	return status;
4571 }
4572 
4573 /**
4574  * ixgbe_fw_recovery_mode_E610 - Check FW NVM recovery mode
4575  * @hw: pointer to hardware structure
4576  *
4577  * Checks FW NVM recovery mode by
4578  * reading the value of the dedicated register.
4579  *
4580  * Return: true if FW is in recovery mode, otherwise false.
4581  */
ixgbe_fw_recovery_mode_E610(struct ixgbe_hw * hw)4582 bool ixgbe_fw_recovery_mode_E610(struct ixgbe_hw *hw)
4583 {
4584 	u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
4585 
4586 	return !!(fwsm & GL_MNG_FWSM_FW_MODES_RECOVERY_M);
4587 }
4588 
4589 /**
4590  * ixgbe_fw_rollback_mode_E610 - Check FW NVM Rollback
4591  * @hw: pointer to hardware structure
4592  *
4593  * Checks FW NVM Rollback mode by reading the
4594  * value of the dedicated register.
4595  *
4596  * Return: true if FW is in Rollback mode, otherwise false.
4597  */
ixgbe_fw_rollback_mode_E610(struct ixgbe_hw * hw)4598 bool ixgbe_fw_rollback_mode_E610(struct ixgbe_hw *hw)
4599 {
4600 	u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
4601 
4602 	return !!(fwsm & GL_MNG_FWSM_FW_MODES_ROLLBACK_M);
4603 }
4604 
4605 /**
4606  * ixgbe_get_fw_tsam_mode_E610 - Check FW NVM Thermal Sensor Autonomous Mode
4607  * @hw: pointer to hardware structure
4608  *
4609  * Checks Thermal Sensor Autonomous Mode by reading the
4610  * value of the dedicated register.
4611  *
4612  * Return: true if FW is in TSAM, otherwise false.
4613  */
ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw * hw)4614 bool ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw *hw)
4615 {
4616 	u32 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_X550EM_a);
4617 
4618 	return !!(fwsm & IXGBE_FWSM_TS_ENABLED);
4619 }
4620 
4621 /**
4622  * ixgbe_init_phy_ops_E610 - PHY specific init
4623  * @hw: pointer to hardware structure
4624  *
4625  * Initialize any function pointers that were not able to be
4626  * set during init_shared_code because the PHY type was not known.
4627  *
4628  * Return: the exit code of the operation.
4629  */
ixgbe_init_phy_ops_E610(struct ixgbe_hw * hw)4630 s32 ixgbe_init_phy_ops_E610(struct ixgbe_hw *hw)
4631 {
4632 	struct ixgbe_mac_info *mac = &hw->mac;
4633 	struct ixgbe_phy_info *phy = &hw->phy;
4634 	s32 ret_val;
4635 
4636 	phy->ops.identify_sfp = ixgbe_identify_module_E610;
4637 	phy->ops.read_reg = NULL; /* PHY reg access is not required */
4638 	phy->ops.write_reg = NULL;
4639 	phy->ops.read_reg_mdi = NULL;
4640 	phy->ops.write_reg_mdi = NULL;
4641 	phy->ops.setup_link = ixgbe_setup_phy_link_E610;
4642 	phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_E610;
4643 	phy->ops.read_i2c_byte = NULL; /* disabled for E610 */
4644 	phy->ops.write_i2c_byte = NULL; /* disabled for E610 */
4645 	phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_E610;
4646 	phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_E610;
4647 	phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_E610;
4648 	phy->ops.i2c_bus_clear = NULL; /* do not use generic implementation  */
4649 	phy->ops.check_overtemp = ixgbe_check_overtemp_E610;
4650 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
4651 		phy->ops.set_phy_power = ixgbe_set_phy_power_E610;
4652 	else
4653 		phy->ops.set_phy_power = NULL;
4654 	phy->ops.enter_lplu = ixgbe_enter_lplu_E610;
4655 	phy->ops.handle_lasi = NULL; /* no implementation for E610 */
4656 	phy->ops.read_i2c_byte_unlocked = NULL; /* disabled for E610 */
4657 	phy->ops.write_i2c_byte_unlocked = NULL; /* disabled for E610 */
4658 
4659 	/* TODO: Set functions pointers based on device ID */
4660 
4661 	/* Identify the PHY */
4662 	ret_val = phy->ops.identify(hw);
4663 	if (ret_val != IXGBE_SUCCESS)
4664 		return ret_val;
4665 
4666 	/* TODO: Set functions pointers based on PHY type */
4667 
4668 	return ret_val;
4669 }
4670 
4671 /**
4672  * ixgbe_identify_phy_E610 - Identify PHY
4673  * @hw: pointer to hardware structure
4674  *
4675  * Determine PHY type, supported speeds and PHY ID.
4676  *
4677  * Return: the exit code of the operation.
4678  */
ixgbe_identify_phy_E610(struct ixgbe_hw * hw)4679 s32 ixgbe_identify_phy_E610(struct ixgbe_hw *hw)
4680 {
4681 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
4682 	s32 rc;
4683 
4684 	/* Set PHY type */
4685 	hw->phy.type = ixgbe_phy_fw;
4686 
4687 	rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
4688 				    &pcaps);
4689 	if (rc)
4690 		return rc;
4691 
4692 	if (!(pcaps.module_compliance_enforcement &
4693 	      IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
4694 		/* Handle lenient mode */
4695 		rc = ixgbe_aci_get_phy_caps(hw, false,
4696 					    IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
4697 					    &pcaps);
4698 		if (rc)
4699 			return rc;
4700 	}
4701 
4702 	/* Determine supported speeds */
4703 	hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
4704 
4705 	if (pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
4706 	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
4707 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
4708 	if (pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
4709 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
4710 	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
4711 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
4712 	if (pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_T  ||
4713 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
4714 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
4715 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
4716 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_1G_SGMII    ||
4717 	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
4718 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
4719 	if (pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_T       ||
4720 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10G_SFI_DA      ||
4721 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_SR      ||
4722 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_LR      ||
4723 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1  ||
4724 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
4725 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C     ||
4726 	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
4727 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
4728 
4729 	/* 2.5 and 5 Gbps link speeds must be excluded from the
4730 	 * auto-negotiation set used during driver initialization due to
4731 	 * compatibility issues with certain switches. Those issues do not
4732 	 * exist in case of E610 2.5G SKU device (0x57b1).
4733 	 */
4734 	if (!hw->phy.autoneg_advertised &&
4735 	    hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
4736 		hw->phy.autoneg_advertised = hw->phy.speeds_supported;
4737 
4738 	if (pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_2500BASE_T   ||
4739 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_2500BASE_X   ||
4740 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_2500BASE_KX  ||
4741 	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
4742 	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
4743 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
4744 
4745 	if (!hw->phy.autoneg_advertised &&
4746 	    hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
4747 		hw->phy.autoneg_advertised = hw->phy.speeds_supported;
4748 
4749 	if (pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_5GBASE_T  ||
4750 	    pcaps.phy_type_low  & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
4751 	    pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
4752 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
4753 
4754 	/* Set PHY ID */
4755 	memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
4756 
4757 	return IXGBE_SUCCESS;
4758 }
4759 
4760 /**
4761  * ixgbe_identify_module_E610 - Identify SFP module type
4762  * @hw: pointer to hardware structure
4763  *
4764  * Identify the SFP module type.
4765  *
4766  * Return: the exit code of the operation.
4767  */
ixgbe_identify_module_E610(struct ixgbe_hw * hw)4768 s32 ixgbe_identify_module_E610(struct ixgbe_hw *hw)
4769 {
4770 	bool media_available;
4771 	u8 module_type;
4772 	s32 rc;
4773 
4774 	rc = ixgbe_update_link_info(hw);
4775 	if (rc)
4776 		goto err;
4777 
4778 	media_available =
4779 		(hw->link.link_info.link_info &
4780 		 IXGBE_ACI_MEDIA_AVAILABLE) ? true : false;
4781 
4782 	if (media_available) {
4783 		hw->phy.sfp_type = ixgbe_sfp_type_unknown;
4784 
4785 		/* Get module type from hw context updated by ixgbe_update_link_info() */
4786 		module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
4787 
4788 		if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
4789 		    (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
4790 			hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
4791 		} else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
4792 			hw->phy.sfp_type = ixgbe_sfp_type_sr;
4793 		} else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
4794 			   (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
4795 			hw->phy.sfp_type = ixgbe_sfp_type_lr;
4796 		}
4797 		rc = IXGBE_SUCCESS;
4798 	} else {
4799 		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
4800 		rc = IXGBE_ERR_SFP_NOT_PRESENT;
4801 	}
4802 err:
4803 	return rc;
4804 }
4805 
4806 /**
4807  * ixgbe_setup_phy_link_E610 - Sets up firmware-controlled PHYs
4808  * @hw: pointer to hardware structure
4809  *
4810  * Set the parameters for the firmware-controlled PHYs.
4811  *
4812  * Return: the exit code of the operation.
4813  */
ixgbe_setup_phy_link_E610(struct ixgbe_hw * hw)4814 s32 ixgbe_setup_phy_link_E610(struct ixgbe_hw *hw)
4815 {
4816 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
4817 	struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
4818 	u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
4819 	u64 sup_phy_type_low, sup_phy_type_high;
4820 	s32 rc;
4821 
4822 	rc = ixgbe_aci_get_link_info(hw, false, NULL);
4823 	if (rc) {
4824 		goto err;
4825 	}
4826 
4827 	/* If media is not available get default config */
4828 	if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
4829 		rmode = IXGBE_ACI_REPORT_DFLT_CFG;
4830 
4831 	rc = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
4832 	if (rc) {
4833 		goto err;
4834 	}
4835 
4836 	sup_phy_type_low = pcaps.phy_type_low;
4837 	sup_phy_type_high = pcaps.phy_type_high;
4838 
4839 	/* Get Active configuration to avoid unintended changes */
4840 	rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
4841 				    &pcaps);
4842 	if (rc) {
4843 		goto err;
4844 	}
4845 	ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
4846 
4847 	/* Set default PHY types for a given speed */
4848 	pcfg.phy_type_low = 0;
4849 	pcfg.phy_type_high = 0;
4850 
4851 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
4852 		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
4853 		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
4854 	}
4855 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
4856 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
4857 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
4858 		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
4859 	}
4860 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
4861 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
4862 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
4863 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
4864 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
4865 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
4866 		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
4867 	}
4868 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
4869 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
4870 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
4871 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
4872 		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
4873 		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
4874 	}
4875 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
4876 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
4877 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
4878 		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
4879 	}
4880 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
4881 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
4882 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
4883 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
4884 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
4885 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
4886 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
4887 		pcfg.phy_type_low  |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
4888 		pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
4889 	}
4890 
4891 	/* Mask the set values to avoid requesting unsupported link types */
4892 	pcfg.phy_type_low &= sup_phy_type_low;
4893 	pcfg.phy_type_high &= sup_phy_type_high;
4894 
4895 	if (pcfg.phy_type_high != pcaps.phy_type_high ||
4896 	    pcfg.phy_type_low != pcaps.phy_type_low ||
4897 	    pcfg.caps != pcaps.caps) {
4898 		pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
4899 		pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
4900 
4901 		rc = ixgbe_aci_set_phy_cfg(hw, &pcfg);
4902 	}
4903 
4904 err:
4905 	return rc;
4906 }
4907 
4908 /**
4909  * ixgbe_get_phy_firmware_version_E610 - Gets the PHY Firmware Version
4910  * @hw: pointer to hardware structure
4911  * @firmware_version: pointer to the PHY Firmware Version
4912  *
4913  * Determines PHY FW version based on response to Get PHY Capabilities
4914  * admin command (0x0600).
4915  *
4916  * Return: the exit code of the operation.
4917  */
ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw * hw,u16 * firmware_version)4918 s32 ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw *hw,
4919 					u16 *firmware_version)
4920 {
4921 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
4922 	s32 status;
4923 
4924 	if (!firmware_version)
4925 		return IXGBE_ERR_PARAM;
4926 
4927 	status = ixgbe_aci_get_phy_caps(hw, false,
4928 					IXGBE_ACI_REPORT_ACTIVE_CFG,
4929 					&pcaps);
4930 	if (status)
4931 		return status;
4932 
4933 	/* TODO: determine which bytes of the 8-byte phy_fw_ver
4934 	 * field should be written to the 2-byte firmware_version
4935 	 * output argument. */
4936 	memcpy(firmware_version, pcaps.phy_fw_ver, sizeof(u16));
4937 
4938 	return IXGBE_SUCCESS;
4939 }
4940 
4941 /**
4942  * ixgbe_read_i2c_sff8472_E610 - Reads 8 bit word over I2C interface
4943  * @hw: pointer to hardware structure
4944  * @byte_offset: byte offset at address 0xA2
4945  * @sff8472_data: value read
4946  *
4947  * Performs byte read operation from SFP module's SFF-8472 data over I2C.
4948  *
4949  * Return: the exit code of the operation.
4950  **/
ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw * hw,u8 byte_offset,u8 * sff8472_data)4951 s32 ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw *hw, u8 byte_offset,
4952 				u8 *sff8472_data)
4953 {
4954 	return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR2,
4955 				    byte_offset, 0,
4956 				    IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
4957 				    sff8472_data, 1, false);
4958 }
4959 
4960 /**
4961  * ixgbe_read_i2c_eeprom_E610 - Reads 8 bit EEPROM word over I2C interface
4962  * @hw: pointer to hardware structure
4963  * @byte_offset: EEPROM byte offset to read
4964  * @eeprom_data: value read
4965  *
4966  * Performs byte read operation from SFP module's EEPROM over I2C interface.
4967  *
4968  * Return: the exit code of the operation.
4969  **/
ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw * hw,u8 byte_offset,u8 * eeprom_data)4970 s32 ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
4971 			       u8 *eeprom_data)
4972 {
4973 	return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
4974 				    byte_offset, 0,
4975 				    IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
4976 				    eeprom_data, 1, false);
4977 }
4978 
4979 /**
4980  * ixgbe_write_i2c_eeprom_E610 - Writes 8 bit EEPROM word over I2C interface
4981  * @hw: pointer to hardware structure
4982  * @byte_offset: EEPROM byte offset to write
4983  * @eeprom_data: value to write
4984  *
4985  * Performs byte write operation to SFP module's EEPROM over I2C interface.
4986  *
4987  * Return: the exit code of the operation.
4988  **/
ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw * hw,u8 byte_offset,u8 eeprom_data)4989 s32 ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
4990 				u8 eeprom_data)
4991 {
4992 	return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
4993 				    byte_offset, 0,
4994 				    IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
4995 				    &eeprom_data, 1, true);
4996 }
4997 
4998 /**
4999  * ixgbe_check_overtemp_E610 - Check firmware-controlled PHYs for overtemp
5000  * @hw: pointer to hardware structure
5001  *
5002  * Get the link status and check if the PHY temperature alarm detected.
5003  *
5004  * Return: the exit code of the operation.
5005  */
ixgbe_check_overtemp_E610(struct ixgbe_hw * hw)5006 s32 ixgbe_check_overtemp_E610(struct ixgbe_hw *hw)
5007 {
5008 	struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
5009 	struct ixgbe_aci_cmd_get_link_status *resp;
5010 	struct ixgbe_aci_desc desc;
5011 	s32 status = IXGBE_SUCCESS;
5012 
5013 	if (!hw)
5014 		return IXGBE_ERR_PARAM;
5015 
5016 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
5017 	resp = &desc.params.get_link_status;
5018 	resp->cmd_flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_LSE_NOP);
5019 
5020 	status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
5021 	if (status != IXGBE_SUCCESS)
5022 		return status;
5023 
5024 	if (link_data.ext_info & IXGBE_ACI_LINK_PHY_TEMP_ALARM) {
5025 		ERROR_REPORT1(IXGBE_ERROR_CAUTION,
5026 			      "PHY Temperature Alarm detected");
5027 		status = IXGBE_ERR_OVERTEMP;
5028 	}
5029 
5030 	return status;
5031 }
5032 
5033 /**
5034  * ixgbe_set_phy_power_E610 - Control power for copper PHY
5035  * @hw: pointer to hardware structure
5036  * @on: true for on, false for off
5037  *
5038  * Set the power on/off of the PHY
5039  * by getting its capabilities and setting the appropriate
5040  * configuration parameters.
5041  *
5042  * Return: the exit code of the operation.
5043  */
ixgbe_set_phy_power_E610(struct ixgbe_hw * hw,bool on)5044 s32 ixgbe_set_phy_power_E610(struct ixgbe_hw *hw, bool on)
5045 {
5046 	struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
5047 	struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
5048 	s32 status;
5049 
5050 	status = ixgbe_aci_get_phy_caps(hw, false,
5051 		IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
5052 	if (status != IXGBE_SUCCESS)
5053 		return status;
5054 
5055 	ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
5056 
5057 	if (on) {
5058 		phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
5059 	} else {
5060 		phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
5061 	}
5062 
5063 	/* PHY is already in requested power mode */
5064 	if (phy_caps.caps == phy_cfg.caps)
5065 		return IXGBE_SUCCESS;
5066 
5067 	phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
5068 	phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
5069 
5070 	status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
5071 
5072 	return status;
5073 }
5074 
5075 /**
5076  * ixgbe_enter_lplu_E610 - Transition to low power states
5077  * @hw: pointer to hardware structure
5078  *
5079  * Configures Low Power Link Up on transition to low power states
5080  * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
5081  * X557 PHY immediately prior to entering LPLU.
5082  *
5083  * Return: the exit code of the operation.
5084  */
ixgbe_enter_lplu_E610(struct ixgbe_hw * hw)5085 s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw)
5086 {
5087 	struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
5088 	struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
5089 	s32 status;
5090 
5091 	status = ixgbe_aci_get_phy_caps(hw, false,
5092 		IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
5093 	if (status != IXGBE_SUCCESS)
5094 		return status;
5095 
5096 	ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
5097 
5098 	phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
5099 
5100 	status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
5101 
5102 	return status;
5103 }
5104 
5105 /**
5106  * ixgbe_init_eeprom_params_E610 - Initialize EEPROM params
5107  * @hw: pointer to hardware structure
5108  *
5109  * Initializes the EEPROM parameters ixgbe_eeprom_info within the
5110  * ixgbe_hw struct in order to set up EEPROM access.
5111  *
5112  * Return: the exit code of the operation.
5113  */
ixgbe_init_eeprom_params_E610(struct ixgbe_hw * hw)5114 s32 ixgbe_init_eeprom_params_E610(struct ixgbe_hw *hw)
5115 {
5116 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
5117 	u32 gens_stat;
5118 	u8 sr_size;
5119 
5120 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
5121 		eeprom->type = ixgbe_flash;
5122 
5123 		gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
5124 		sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >>
5125 			  GLNVM_GENS_SR_SIZE_S;
5126 
5127 		/* Switching to words (sr_size contains power of 2) */
5128 		eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
5129 
5130 		DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
5131 			  eeprom->type, eeprom->word_size);
5132 	}
5133 
5134 	return IXGBE_SUCCESS;
5135 }
5136 
5137 /**
5138  * ixgbe_read_ee_aci_E610 - Read EEPROM word using the admin command.
5139  * @hw: pointer to hardware structure
5140  * @offset: offset of  word in the EEPROM to read
5141  * @data: word read from the EEPROM
5142  *
5143  * Reads a 16 bit word from the EEPROM using the ACI.
5144  * If the EEPROM params are not initialized, the function
5145  * initialize them before proceeding with reading.
5146  * The function acquires and then releases the NVM ownership.
5147  *
5148  * Return: the exit code of the operation.
5149  */
ixgbe_read_ee_aci_E610(struct ixgbe_hw * hw,u16 offset,u16 * data)5150 s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data)
5151 {
5152 	s32 status;
5153 
5154 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5155 		status = ixgbe_init_eeprom_params(hw);
5156 		if (status)
5157 			return status;
5158 	}
5159 
5160 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
5161 	if (status)
5162 		return status;
5163 
5164 	status = ixgbe_read_sr_word_aci(hw, offset, data);
5165 	ixgbe_release_nvm(hw);
5166 
5167 	return status;
5168 }
5169 
5170 /**
5171  * ixgbe_read_ee_aci_buffer_E610- Read EEPROM word(s) using admin commands.
5172  * @hw: pointer to hardware structure
5173  * @offset: offset of  word in the EEPROM to read
5174  * @words: number of words
5175  * @data: word(s) read from the EEPROM
5176  *
5177  * Reads a 16 bit word(s) from the EEPROM using the ACI.
5178  * If the EEPROM params are not initialized, the function
5179  * initialize them before proceeding with reading.
5180  * The function acquires and then releases the NVM ownership.
5181  *
5182  * Return: the exit code of the operation.
5183  */
ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)5184 s32 ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
5185 				  u16 words, u16 *data)
5186 {
5187 	s32 status;
5188 
5189 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5190 		status = ixgbe_init_eeprom_params(hw);
5191 		if (status)
5192 			return status;
5193 	}
5194 
5195 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
5196 	if (status)
5197 		return status;
5198 
5199 	status = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
5200 	ixgbe_release_nvm(hw);
5201 
5202 	return status;
5203 }
5204 
5205 /**
5206  * ixgbe_write_ee_aci_E610 - Write EEPROM word using the admin command.
5207  * @hw: pointer to hardware structure
5208  * @offset: offset of  word in the EEPROM to write
5209  * @data: word write to the EEPROM
5210  *
5211  * Write a 16 bit word to the EEPROM using the ACI.
5212  * If the EEPROM params are not initialized, the function
5213  * initialize them before proceeding with writing.
5214  * The function acquires and then releases the NVM ownership.
5215  *
5216  * Return: the exit code of the operation.
5217  */
ixgbe_write_ee_aci_E610(struct ixgbe_hw * hw,u16 offset,u16 data)5218 s32 ixgbe_write_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 data)
5219 {
5220 	s32 status;
5221 
5222 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5223 		status = ixgbe_init_eeprom_params(hw);
5224 		if (status)
5225 			return status;
5226 	}
5227 
5228 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
5229 	if (status)
5230 		return status;
5231 
5232 	status = ixgbe_write_sr_word_aci(hw, (u32)offset, &data);
5233 	ixgbe_release_nvm(hw);
5234 
5235 	return status;
5236 }
5237 
5238 /**
5239  * ixgbe_write_ee_aci_buffer_E610 - Write EEPROM word(s) using admin commands.
5240  * @hw: pointer to hardware structure
5241  * @offset: offset of  word in the EEPROM to write
5242  * @words: number of words
5243  * @data: word(s) write to the EEPROM
5244  *
5245  * Write a 16 bit word(s) to the EEPROM using the ACI.
5246  * If the EEPROM params are not initialized, the function
5247  * initialize them before proceeding with writing.
5248  * The function acquires and then releases the NVM ownership.
5249  *
5250  * Return: the exit code of the operation.
5251  */
ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)5252 s32 ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
5253 				   u16 words, u16 *data)
5254 {
5255 	s32 status;
5256 
5257 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5258 		status = ixgbe_init_eeprom_params(hw);
5259 		if (status)
5260 			return status;
5261 	}
5262 
5263 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
5264 	if (status)
5265 		return status;
5266 
5267 	status = ixgbe_write_sr_buf_aci(hw, (u32)offset, words, data);
5268 	ixgbe_release_nvm(hw);
5269 
5270 	return status;
5271 }
5272 
5273 /**
5274  * ixgbe_calc_eeprom_checksum_E610 - Calculates and returns the checksum
5275  * @hw: pointer to hardware structure
5276  *
5277  * Calculate SW Checksum that covers the whole 64kB shadow RAM
5278  * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
5279  * is customer specific and unknown. Therefore, this function skips all maximum
5280  * possible size of VPD (1kB).
5281  * If the EEPROM params are not initialized, the function
5282  * initializes them before proceeding.
5283  * The function acquires and then releases the NVM ownership.
5284  *
5285  * Return: the negative error code on error, or the 16-bit checksum
5286  */
ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw * hw)5287 s32 ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw *hw)
5288 {
5289 	bool nvm_acquired = false;
5290 	u16 pcie_alt_module = 0;
5291 	u16 checksum_local = 0;
5292 	u16 checksum = 0;
5293 	u16 vpd_module;
5294 	void *vmem;
5295 	s32 status;
5296 	u16 *data;
5297 	u16 i;
5298 
5299 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5300 		status = ixgbe_init_eeprom_params(hw);
5301 		if (status)
5302 			return status;
5303 	}
5304 
5305 	vmem = ixgbe_calloc(hw, IXGBE_SR_SECTOR_SIZE_IN_WORDS, sizeof(u16));
5306 	if (!vmem)
5307 		return IXGBE_ERR_OUT_OF_MEM;
5308 	data = (u16 *)vmem;
5309 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
5310 	if (status)
5311 		goto ixgbe_calc_sr_checksum_exit;
5312 	nvm_acquired = true;
5313 
5314 	/* read pointer to VPD area */
5315 	status = ixgbe_read_sr_word_aci(hw, E610_SR_VPD_PTR, &vpd_module);
5316 	if (status)
5317 		goto ixgbe_calc_sr_checksum_exit;
5318 
5319 	/* read pointer to PCIe Alt Auto-load module */
5320 	status = ixgbe_read_sr_word_aci(hw, E610_SR_PCIE_ALT_AUTO_LOAD_PTR,
5321 					&pcie_alt_module);
5322 	if (status)
5323 		goto ixgbe_calc_sr_checksum_exit;
5324 
5325 	/* Calculate SW checksum that covers the whole 64kB shadow RAM
5326 	 * except the VPD and PCIe ALT Auto-load modules
5327 	 */
5328 	for (i = 0; i < hw->eeprom.word_size; i++) {
5329 		/* Read SR page */
5330 		if ((i % IXGBE_SR_SECTOR_SIZE_IN_WORDS) == 0) {
5331 			u16 words = IXGBE_SR_SECTOR_SIZE_IN_WORDS;
5332 
5333 			status = ixgbe_read_sr_buf_aci(hw, i, &words, data);
5334 			if (status != IXGBE_SUCCESS)
5335 				goto ixgbe_calc_sr_checksum_exit;
5336 		}
5337 
5338 		/* Skip Checksum word */
5339 		if (i == E610_SR_SW_CHECKSUM_WORD)
5340 			continue;
5341 		/* Skip VPD module (convert byte size to word count) */
5342 		if (i >= (u32)vpd_module &&
5343 		    i < ((u32)vpd_module + E610_SR_VPD_SIZE_WORDS))
5344 			continue;
5345 		/* Skip PCIe ALT module (convert byte size to word count) */
5346 		if (i >= (u32)pcie_alt_module &&
5347 		    i < ((u32)pcie_alt_module + E610_SR_PCIE_ALT_SIZE_WORDS))
5348 			continue;
5349 
5350 		checksum_local += data[i % IXGBE_SR_SECTOR_SIZE_IN_WORDS];
5351 	}
5352 
5353 	checksum = (u16)IXGBE_SR_SW_CHECKSUM_BASE - checksum_local;
5354 
5355 ixgbe_calc_sr_checksum_exit:
5356 	if(nvm_acquired)
5357 		ixgbe_release_nvm(hw);
5358 	ixgbe_free(hw, vmem);
5359 
5360 	if(!status)
5361 		return (s32)checksum;
5362 	else
5363 		return status;
5364 }
5365 
5366 /**
5367  * ixgbe_update_eeprom_checksum_E610 - Updates the EEPROM checksum and flash
5368  * @hw: pointer to hardware structure
5369  *
5370  * After writing EEPROM to Shadow RAM, software sends the admin command
5371  * to recalculate and update EEPROM checksum and instructs the hardware
5372  * to update the flash.
5373  * If the EEPROM params are not initialized, the function
5374  * initialize them before proceeding.
5375  * The function acquires and then releases the NVM ownership.
5376  *
5377  * Return: the exit code of the operation.
5378  */
ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw * hw)5379 s32 ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw *hw)
5380 {
5381 	s32 status;
5382 
5383 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5384 		status = ixgbe_init_eeprom_params(hw);
5385 		if (status)
5386 			return status;
5387 	}
5388 
5389 	status = ixgbe_nvm_recalculate_checksum(hw);
5390 	if (status)
5391 		return status;
5392 	status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
5393 	if (status)
5394 		return status;
5395 
5396 	status = ixgbe_nvm_write_activate(hw, IXGBE_ACI_NVM_ACTIV_REQ_EMPR,
5397 					  NULL);
5398 	ixgbe_release_nvm(hw);
5399 
5400 	return status;
5401 }
5402 
5403 /**
5404  * ixgbe_validate_eeprom_checksum_E610 - Validate EEPROM checksum
5405  * @hw: pointer to hardware structure
5406  * @checksum_val: calculated checksum
5407  *
5408  * Performs checksum calculation and validates the EEPROM checksum. If the
5409  * caller does not need checksum_val, the value can be NULL.
5410  * If the EEPROM params are not initialized, the function
5411  * initialize them before proceeding.
5412  * The function acquires and then releases the NVM ownership.
5413  *
5414  * Return: the exit code of the operation.
5415  */
ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw * hw,u16 * checksum_val)5416 s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val)
5417 {
5418 	u32 status;
5419 
5420 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5421 		status = ixgbe_init_eeprom_params(hw);
5422 		if (status)
5423 			return status;
5424 	}
5425 
5426 	status = ixgbe_nvm_validate_checksum(hw);
5427 
5428 	if (status)
5429 		return status;
5430 
5431 	if (checksum_val) {
5432 		u16 tmp_checksum;
5433 		status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
5434 		if (status)
5435 			return status;
5436 
5437 		status = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD,
5438 						&tmp_checksum);
5439 		ixgbe_release_nvm(hw);
5440 
5441 		if (!status)
5442 			*checksum_val = tmp_checksum;
5443 	}
5444 
5445 	return status;
5446 }
5447 
5448 /**
5449  * ixgbe_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
5450  * @hw: pointer to hardware structure
5451  * @module_tlv: pointer to module TLV to return
5452  * @module_tlv_len: pointer to module TLV length to return
5453  * @module_type: module type requested
5454  *
5455  * Finds the requested sub module TLV type from the Preserved Field
5456  * Area (PFA) and returns the TLV pointer and length. The caller can
5457  * use these to read the variable length TLV value.
5458  *
5459  * Return: the exit code of the operation.
5460  */
ixgbe_get_pfa_module_tlv(struct ixgbe_hw * hw,u16 * module_tlv,u16 * module_tlv_len,u16 module_type)5461 static s32 ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv,
5462 				    u16 *module_tlv_len, u16 module_type)
5463 {
5464 	u16 pfa_len, pfa_ptr, pfa_end_ptr;
5465 	u16 next_tlv;
5466 	s32 status;
5467 
5468 	status = ixgbe_read_ee_aci_E610(hw, E610_SR_PFA_PTR, &pfa_ptr);
5469 	if (status != IXGBE_SUCCESS) {
5470 		return status;
5471 	}
5472 	status = ixgbe_read_ee_aci_E610(hw, pfa_ptr, &pfa_len);
5473 	if (status != IXGBE_SUCCESS) {
5474 		return status;
5475 	}
5476 	/* Starting with first TLV after PFA length, iterate through the list
5477 	 * of TLVs to find the requested one.
5478 	 */
5479 	next_tlv = pfa_ptr + 1;
5480 	pfa_end_ptr = pfa_ptr + pfa_len;
5481 	while (next_tlv < pfa_end_ptr) {
5482 		u16 tlv_sub_module_type, tlv_len;
5483 
5484 		/* Read TLV type */
5485 		status = ixgbe_read_ee_aci_E610(hw, next_tlv,
5486 						&tlv_sub_module_type);
5487 		if (status != IXGBE_SUCCESS) {
5488 			break;
5489 		}
5490 		/* Read TLV length */
5491 		status = ixgbe_read_ee_aci_E610(hw, next_tlv + 1, &tlv_len);
5492 		if (status != IXGBE_SUCCESS) {
5493 			break;
5494 		}
5495 		if (tlv_sub_module_type == module_type) {
5496 			if (tlv_len) {
5497 				*module_tlv = next_tlv;
5498 				*module_tlv_len = tlv_len;
5499 				return IXGBE_SUCCESS;
5500 			}
5501 			return IXGBE_ERR_INVAL_SIZE;
5502 		}
5503 		/* Check next TLV, i.e. current TLV pointer + length + 2 words
5504 		 * (for current TLV's type and length)
5505 		 */
5506 		next_tlv = next_tlv + tlv_len + 2;
5507 	}
5508 	/* Module does not exist */
5509 	return IXGBE_ERR_DOES_NOT_EXIST;
5510 }
5511 
5512 /**
5513  * ixgbe_read_pba_string_E610 - Reads part number string from NVM
5514  * @hw: pointer to hardware structure
5515  * @pba_num: stores the part number string from the NVM
5516  * @pba_num_size: part number string buffer length
5517  *
5518  * Reads the part number string from the NVM.
5519  *
5520  * Return: the exit code of the operation.
5521  */
ixgbe_read_pba_string_E610(struct ixgbe_hw * hw,u8 * pba_num,u32 pba_num_size)5522 s32 ixgbe_read_pba_string_E610(struct ixgbe_hw *hw, u8 *pba_num,
5523 			       u32 pba_num_size)
5524 {
5525 	u16 pba_tlv, pba_tlv_len;
5526 	u16 pba_word, pba_size;
5527 	s32 status;
5528 	u16 i;
5529 
5530 	status = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
5531 					E610_SR_PBA_BLOCK_PTR);
5532 	if (status != IXGBE_SUCCESS) {
5533 		return status;
5534 	}
5535 
5536 	/* pba_size is the next word */
5537 	status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2), &pba_size);
5538 	if (status != IXGBE_SUCCESS) {
5539 		return status;
5540 	}
5541 
5542 	if (pba_tlv_len < pba_size) {
5543 		return IXGBE_ERR_INVAL_SIZE;
5544 	}
5545 
5546 	/* Subtract one to get PBA word count (PBA Size word is included in
5547 	 * total size)
5548 	 */
5549 	pba_size--;
5550 	if (pba_num_size < (((u32)pba_size * 2) + 1)) {
5551 		return IXGBE_ERR_PARAM;
5552 	}
5553 
5554 	for (i = 0; i < pba_size; i++) {
5555 		status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2 + 1) + i,
5556 						&pba_word);
5557 		if (status != IXGBE_SUCCESS) {
5558 			return status;
5559 		}
5560 
5561 		pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
5562 		pba_num[(i * 2) + 1] = pba_word & 0xFF;
5563 	}
5564 	pba_num[(pba_size * 2)] = '\0';
5565 
5566 	return status;
5567 }
5568