xref: /linux/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c (revision 8210ff738077ed3581e022e5cc8721aa041d42cb)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2024 Intel Corporation. */
3 
4 #include "ixgbe_common.h"
5 #include "ixgbe_e610.h"
6 #include "ixgbe_x550.h"
7 #include "ixgbe_type.h"
8 #include "ixgbe_x540.h"
9 #include "ixgbe_mbx.h"
10 #include "ixgbe_phy.h"
11 
12 /**
13  * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
14  * be resent
15  * @opcode: ACI opcode
16  *
17  * Check if ACI command should be sent again depending on the provided opcode.
18  * It may happen when CSR is busy during link state changes.
19  *
20  * Return: true if the sending command routine should be repeated,
21  * otherwise false.
22  */
23 static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
24 {
25 	switch (opcode) {
26 	case ixgbe_aci_opc_disable_rxen:
27 	case ixgbe_aci_opc_get_phy_caps:
28 	case ixgbe_aci_opc_get_link_status:
29 	case ixgbe_aci_opc_get_link_topo:
30 		return true;
31 	}
32 
33 	return false;
34 }
35 
36 /**
37  * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
38  * Command Interface
39  * @hw: pointer to the HW struct
40  * @desc: descriptor describing the command
41  * @buf: buffer to use for indirect commands (NULL for direct commands)
42  * @buf_size: size of buffer for indirect commands (0 for direct commands)
43  *
44  * Admin Command is sent using CSR by setting descriptor and buffer in specific
45  * registers.
46  *
47  * Return: the exit code of the operation.
48  * * - 0 - success.
49  * * - -EIO - CSR mechanism is not enabled.
50  * * - -EBUSY - CSR mechanism is busy.
51  * * - -EINVAL - buf_size is too big or
52  * invalid argument buf or buf_size.
53  * * - -ETIME - Admin Command X command timeout.
54  * * - -EIO - Admin Command X invalid state of HICR register or
55  * Admin Command failed because of bad opcode was returned or
56  * Admin Command failed with error Y.
57  */
58 static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw,
59 				      struct ixgbe_aci_desc *desc,
60 				      void *buf, u16 buf_size)
61 {
62 	u16 opcode, buf_tail_size = buf_size % 4;
63 	u32 *raw_desc = (u32 *)desc;
64 	u32 hicr, i, buf_tail = 0;
65 	bool valid_buf = false;
66 
67 	hw->aci.last_status = IXGBE_ACI_RC_OK;
68 
69 	/* It's necessary to check if mechanism is enabled */
70 	hicr = IXGBE_READ_REG(hw, IXGBE_PF_HICR);
71 
72 	if (!(hicr & IXGBE_PF_HICR_EN))
73 		return -EIO;
74 
75 	if (hicr & IXGBE_PF_HICR_C) {
76 		hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
77 		return -EBUSY;
78 	}
79 
80 	opcode = le16_to_cpu(desc->opcode);
81 
82 	if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE)
83 		return -EINVAL;
84 
85 	if (buf)
86 		desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_BUF);
87 
88 	if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_BUF)) {
89 		if ((buf && !buf_size) ||
90 		    (!buf && buf_size))
91 			return -EINVAL;
92 		if (buf && buf_size)
93 			valid_buf = true;
94 	}
95 
96 	if (valid_buf) {
97 		if (buf_tail_size)
98 			memcpy(&buf_tail, buf + buf_size - buf_tail_size,
99 			       buf_tail_size);
100 
101 		if (((buf_size + 3) & ~0x3) > IXGBE_ACI_LG_BUF)
102 			desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_LB);
103 
104 		desc->datalen = cpu_to_le16(buf_size);
105 
106 		if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_RD)) {
107 			for (i = 0; i < buf_size / 4; i++)
108 				IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), ((u32 *)buf)[i]);
109 			if (buf_tail_size)
110 				IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), buf_tail);
111 		}
112 	}
113 
114 	/* Descriptor is written to specific registers */
115 	for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
116 		IXGBE_WRITE_REG(hw, IXGBE_PF_HIDA(i), raw_desc[i]);
117 
118 	/* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
119 	 * PF_HICR_EV
120 	 */
121 	hicr = (IXGBE_READ_REG(hw, IXGBE_PF_HICR) | IXGBE_PF_HICR_C) &
122 	       ~(IXGBE_PF_HICR_SV | IXGBE_PF_HICR_EV);
123 	IXGBE_WRITE_REG(hw, IXGBE_PF_HICR, hicr);
124 
125 #define MAX_SLEEP_RESP_US 1000
126 #define MAX_TMOUT_RESP_SYNC_US 100000000
127 
128 	/* Wait for sync Admin Command response */
129 	read_poll_timeout(IXGBE_READ_REG, hicr,
130 			  (hicr & IXGBE_PF_HICR_SV) ||
131 			  !(hicr & IXGBE_PF_HICR_C),
132 			  MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_SYNC_US, true, hw,
133 			  IXGBE_PF_HICR);
134 
135 #define MAX_TMOUT_RESP_ASYNC_US 150000000
136 
137 	/* Wait for async Admin Command response */
138 	read_poll_timeout(IXGBE_READ_REG, hicr,
139 			  (hicr & IXGBE_PF_HICR_EV) ||
140 			  !(hicr & IXGBE_PF_HICR_C),
141 			  MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_ASYNC_US, true, hw,
142 			  IXGBE_PF_HICR);
143 
144 	/* Read sync Admin Command response */
145 	if ((hicr & IXGBE_PF_HICR_SV)) {
146 		for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
147 			raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA(i));
148 			raw_desc[i] = raw_desc[i];
149 		}
150 	}
151 
152 	/* Read async Admin Command response */
153 	if ((hicr & IXGBE_PF_HICR_EV) && !(hicr & IXGBE_PF_HICR_C)) {
154 		for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
155 			raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA_2(i));
156 			raw_desc[i] = raw_desc[i];
157 		}
158 	}
159 
160 	/* Handle timeout and invalid state of HICR register */
161 	if (hicr & IXGBE_PF_HICR_C)
162 		return -ETIME;
163 
164 	if (!(hicr & IXGBE_PF_HICR_SV) && !(hicr & IXGBE_PF_HICR_EV))
165 		return -EIO;
166 
167 	/* For every command other than 0x0014 treat opcode mismatch
168 	 * as an error. Response to 0x0014 command read from HIDA_2
169 	 * is a descriptor of an event which is expected to contain
170 	 * different opcode than the command.
171 	 */
172 	if (desc->opcode != cpu_to_le16(opcode) &&
173 	    opcode != ixgbe_aci_opc_get_fw_event)
174 		return -EIO;
175 
176 	if (desc->retval) {
177 		hw->aci.last_status = (enum ixgbe_aci_err)
178 			le16_to_cpu(desc->retval);
179 		return -EIO;
180 	}
181 
182 	/* Write a response values to a buf */
183 	if (valid_buf) {
184 		for (i = 0; i < buf_size / 4; i++)
185 			((u32 *)buf)[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
186 		if (buf_tail_size) {
187 			buf_tail = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
188 			memcpy(buf + buf_size - buf_tail_size, &buf_tail,
189 			       buf_tail_size);
190 		}
191 	}
192 
193 	return 0;
194 }
195 
196 /**
197  * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
198  * @hw: pointer to the HW struct
199  * @desc: descriptor describing the command
200  * @buf: buffer to use for indirect commands (NULL for direct commands)
201  * @buf_size: size of buffer for indirect commands (0 for direct commands)
202  *
203  * Helper function to send FW Admin Commands to the FW Admin Command Interface.
204  *
205  * Retry sending the FW Admin Command multiple times to the FW ACI
206  * if the EBUSY Admin Command error is returned.
207  *
208  * Return: the exit code of the operation.
209  */
210 int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
211 		       void *buf, u16 buf_size)
212 {
213 	u16 opcode = le16_to_cpu(desc->opcode);
214 	struct ixgbe_aci_desc desc_cpy;
215 	enum ixgbe_aci_err last_status;
216 	u8 idx = 0, *buf_cpy = NULL;
217 	bool is_cmd_for_retry;
218 	unsigned long timeout;
219 	int err;
220 
221 	is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
222 	if (is_cmd_for_retry) {
223 		if (buf) {
224 			buf_cpy = kmalloc(buf_size, GFP_KERNEL);
225 			if (!buf_cpy)
226 				return -ENOMEM;
227 			*buf_cpy = *(u8 *)buf;
228 		}
229 		desc_cpy = *desc;
230 	}
231 
232 	timeout = jiffies + msecs_to_jiffies(IXGBE_ACI_SEND_TIMEOUT_MS);
233 	do {
234 		mutex_lock(&hw->aci.lock);
235 		err = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
236 		last_status = hw->aci.last_status;
237 		mutex_unlock(&hw->aci.lock);
238 
239 		if (!is_cmd_for_retry || !err ||
240 		    last_status != IXGBE_ACI_RC_EBUSY)
241 			break;
242 
243 		if (buf)
244 			memcpy(buf, buf_cpy, buf_size);
245 		*desc = desc_cpy;
246 
247 		msleep(IXGBE_ACI_SEND_DELAY_TIME_MS);
248 	} while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE &&
249 		 time_before(jiffies, timeout));
250 
251 	kfree(buf_cpy);
252 
253 	return err;
254 }
255 
256 /**
257  * ixgbe_aci_check_event_pending - check if there are any pending events
258  * @hw: pointer to the HW struct
259  *
260  * Determine if there are any pending events.
261  *
262  * Return: true if there are any currently pending events
263  * otherwise false.
264  */
265 bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
266 {
267 	u32 ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
268 	u32 fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
269 
270 	return (fwsts & ep_bit_mask) ? true : false;
271 }
272 
273 /**
274  * ixgbe_aci_get_event - get an event from ACI
275  * @hw: pointer to the HW struct
276  * @e: event information structure
277  * @pending: optional flag signaling that there are more pending events
278  *
279  * Obtain an event from ACI and return its content
280  * through 'e' using ACI command (0x0014).
281  * Provide information if there are more events
282  * to retrieve through 'pending'.
283  *
284  * Return: the exit code of the operation.
285  */
286 int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
287 			bool *pending)
288 {
289 	struct ixgbe_aci_desc desc;
290 	int err;
291 
292 	if (!e || (!e->msg_buf && e->buf_len))
293 		return -EINVAL;
294 
295 	mutex_lock(&hw->aci.lock);
296 
297 	/* Check if there are any events pending */
298 	if (!ixgbe_aci_check_event_pending(hw)) {
299 		err = -ENOENT;
300 		goto aci_get_event_exit;
301 	}
302 
303 	/* Obtain pending event */
304 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
305 	err = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
306 	if (err)
307 		goto aci_get_event_exit;
308 
309 	/* Returned 0x0014 opcode indicates that no event was obtained */
310 	if (desc.opcode == cpu_to_le16(ixgbe_aci_opc_get_fw_event)) {
311 		err = -ENOENT;
312 		goto aci_get_event_exit;
313 	}
314 
315 	/* Determine size of event data */
316 	e->msg_len = min_t(u16, le16_to_cpu(desc.datalen), e->buf_len);
317 	/* Write event descriptor to event info structure */
318 	memcpy(&e->desc, &desc, sizeof(e->desc));
319 
320 	/* Check if there are any further events pending */
321 	if (pending)
322 		*pending = ixgbe_aci_check_event_pending(hw);
323 
324 aci_get_event_exit:
325 	mutex_unlock(&hw->aci.lock);
326 
327 	return err;
328 }
329 
330 /**
331  * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
332  * @desc: pointer to the temp descriptor (non DMA mem)
333  * @opcode: the opcode can be used to decide which flags to turn off or on
334  *
335  * Helper function to fill the descriptor desc with default values
336  * and the provided opcode.
337  */
338 void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
339 {
340 	/* Zero out the desc. */
341 	memset(desc, 0, sizeof(*desc));
342 	desc->opcode = cpu_to_le16(opcode);
343 	desc->flags = cpu_to_le16(IXGBE_ACI_FLAG_SI);
344 }
345 
346 /**
347  * ixgbe_aci_req_res - request a common resource
348  * @hw: pointer to the HW struct
349  * @res: resource ID
350  * @access: access type
351  * @sdp_number: resource number
352  * @timeout: the maximum time in ms that the driver may hold the resource
353  *
354  * Requests a common resource using the ACI command (0x0008).
355  * Specifies the maximum time the driver may hold the resource.
356  * If the requested resource is currently occupied by some other driver,
357  * a busy return value is returned and the timeout field value indicates the
358  * maximum time the current owner has to free it.
359  *
360  * Return: the exit code of the operation.
361  */
362 static int ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
363 			     enum ixgbe_aci_res_access_type access,
364 			     u8 sdp_number, u32 *timeout)
365 {
366 	struct ixgbe_aci_cmd_req_res *cmd_resp;
367 	struct ixgbe_aci_desc desc;
368 	int err;
369 
370 	cmd_resp = &desc.params.res_owner;
371 
372 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
373 
374 	cmd_resp->res_id = cpu_to_le16(res);
375 	cmd_resp->access_type = cpu_to_le16(access);
376 	cmd_resp->res_number = cpu_to_le32(sdp_number);
377 	cmd_resp->timeout = cpu_to_le32(*timeout);
378 	*timeout = 0;
379 
380 	err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
381 
382 	/* If the resource is held by some other driver, the command completes
383 	 * with a busy return value and the timeout field indicates the maximum
384 	 * time the current owner of the resource has to free it.
385 	 */
386 	if (!err || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
387 		*timeout = le32_to_cpu(cmd_resp->timeout);
388 
389 	return err;
390 }
391 
392 /**
393  * ixgbe_aci_release_res - release a common resource using ACI
394  * @hw: pointer to the HW struct
395  * @res: resource ID
396  * @sdp_number: resource number
397  *
398  * Release a common resource using ACI command (0x0009).
399  *
400  * Return: the exit code of the operation.
401  */
402 static int ixgbe_aci_release_res(struct ixgbe_hw *hw,
403 				 enum ixgbe_aci_res_ids res, u8 sdp_number)
404 {
405 	struct ixgbe_aci_cmd_req_res *cmd;
406 	struct ixgbe_aci_desc desc;
407 
408 	cmd = &desc.params.res_owner;
409 
410 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
411 
412 	cmd->res_id = cpu_to_le16(res);
413 	cmd->res_number = cpu_to_le32(sdp_number);
414 
415 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
416 }
417 
418 /**
419  * ixgbe_acquire_res - acquire the ownership of a resource
420  * @hw: pointer to the HW structure
421  * @res: resource ID
422  * @access: access type (read or write)
423  * @timeout: timeout in milliseconds
424  *
425  * Make an attempt to acquire the ownership of a resource using
426  * the ixgbe_aci_req_res to utilize ACI.
427  * In case if some other driver has previously acquired the resource and
428  * performed any necessary updates, the -EALREADY is returned,
429  * and the caller does not obtain the resource and has no further work to do.
430  * If needed, the function will poll until the current lock owner timeouts.
431  *
432  * Return: the exit code of the operation.
433  */
434 int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
435 		      enum ixgbe_aci_res_access_type access, u32 timeout)
436 {
437 #define IXGBE_RES_POLLING_DELAY_MS	10
438 	u32 delay = IXGBE_RES_POLLING_DELAY_MS;
439 	u32 res_timeout = timeout;
440 	u32 retry_timeout;
441 	int err;
442 
443 	err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
444 
445 	/* A return code of -EALREADY means that another driver has
446 	 * previously acquired the resource and performed any necessary updates;
447 	 * in this case the caller does not obtain the resource and has no
448 	 * further work to do.
449 	 */
450 	if (err == -EALREADY)
451 		return err;
452 
453 	/* If necessary, poll until the current lock owner timeouts.
454 	 * Set retry_timeout to the timeout value reported by the FW in the
455 	 * response to the "Request Resource Ownership" (0x0008) Admin Command
456 	 * as it indicates the maximum time the current owner of the resource
457 	 * is allowed to hold it.
458 	 */
459 	retry_timeout = res_timeout;
460 	while (err && retry_timeout && res_timeout) {
461 		msleep(delay);
462 		retry_timeout = (retry_timeout > delay) ?
463 			retry_timeout - delay : 0;
464 		err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
465 
466 		/* Success - lock acquired.
467 		 * -EALREADY - lock free, no work to do.
468 		 */
469 		if (!err || err == -EALREADY)
470 			break;
471 	}
472 
473 	return err;
474 }
475 
476 /**
477  * ixgbe_release_res - release a common resource
478  * @hw: pointer to the HW structure
479  * @res: resource ID
480  *
481  * Release a common resource using ixgbe_aci_release_res.
482  */
483 void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
484 {
485 	u32 total_delay = 0;
486 	int err;
487 
488 	err = ixgbe_aci_release_res(hw, res, 0);
489 
490 	/* There are some rare cases when trying to release the resource
491 	 * results in an admin command timeout, so handle them correctly.
492 	 */
493 	while (err == -ETIME &&
494 	       total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT) {
495 		usleep_range(1000, 1500);
496 		err = ixgbe_aci_release_res(hw, res, 0);
497 		total_delay++;
498 	}
499 }
500 
501 /**
502  * ixgbe_parse_e610_caps - Parse common device/function capabilities
503  * @hw: pointer to the HW struct
504  * @caps: pointer to common capabilities structure
505  * @elem: the capability element to parse
506  * @prefix: message prefix for tracing capabilities
507  *
508  * Given a capability element, extract relevant details into the common
509  * capability structure.
510  *
511  * Return: true if the capability matches one of the common capability ids,
512  * false otherwise.
513  */
514 static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw,
515 				  struct ixgbe_hw_caps *caps,
516 				  struct ixgbe_aci_cmd_list_caps_elem *elem,
517 				  const char *prefix)
518 {
519 	u32 logical_id = le32_to_cpu(elem->logical_id);
520 	u32 phys_id = le32_to_cpu(elem->phys_id);
521 	u32 number = le32_to_cpu(elem->number);
522 	u16 cap = le16_to_cpu(elem->cap);
523 
524 	switch (cap) {
525 	case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
526 		caps->valid_functions = number;
527 		break;
528 	case IXGBE_ACI_CAPS_SRIOV:
529 		caps->sr_iov_1_1 = (number == 1);
530 		break;
531 	case IXGBE_ACI_CAPS_VMDQ:
532 		caps->vmdq = (number == 1);
533 		break;
534 	case IXGBE_ACI_CAPS_DCB:
535 		caps->dcb = (number == 1);
536 		caps->active_tc_bitmap = logical_id;
537 		caps->maxtc = phys_id;
538 		break;
539 	case IXGBE_ACI_CAPS_RSS:
540 		caps->rss_table_size = number;
541 		caps->rss_table_entry_width = logical_id;
542 		break;
543 	case IXGBE_ACI_CAPS_RXQS:
544 		caps->num_rxq = number;
545 		caps->rxq_first_id = phys_id;
546 		break;
547 	case IXGBE_ACI_CAPS_TXQS:
548 		caps->num_txq = number;
549 		caps->txq_first_id = phys_id;
550 		break;
551 	case IXGBE_ACI_CAPS_MSIX:
552 		caps->num_msix_vectors = number;
553 		caps->msix_vector_first_id = phys_id;
554 		break;
555 	case IXGBE_ACI_CAPS_NVM_VER:
556 		break;
557 	case IXGBE_ACI_CAPS_MAX_MTU:
558 		caps->max_mtu = number;
559 		break;
560 	case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
561 		caps->pcie_reset_avoidance = (number > 0);
562 		break;
563 	case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
564 		caps->reset_restrict_support = (number == 1);
565 		break;
566 	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
567 	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
568 	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
569 	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
570 	{
571 		u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
572 
573 		caps->ext_topo_dev_img_ver_high[index] = number;
574 		caps->ext_topo_dev_img_ver_low[index] = logical_id;
575 		caps->ext_topo_dev_img_part_num[index] =
576 			FIELD_GET(IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M, phys_id);
577 		caps->ext_topo_dev_img_load_en[index] =
578 			(phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
579 		caps->ext_topo_dev_img_prog_en[index] =
580 			(phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
581 		break;
582 	}
583 	default:
584 		/* Not one of the recognized common capabilities */
585 		return false;
586 	}
587 
588 	return true;
589 }
590 
591 /**
592  * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
593  * @hw: pointer to the HW struct
594  * @dev_p: pointer to device capabilities structure
595  * @cap: capability element to parse
596  *
597  * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
598  */
599 static void
600 ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
601 				struct ixgbe_hw_dev_caps *dev_p,
602 				struct ixgbe_aci_cmd_list_caps_elem *cap)
603 {
604 	dev_p->num_funcs = hweight32(le32_to_cpu(cap->number));
605 }
606 
607 /**
608  * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps
609  * @hw: pointer to the HW struct
610  * @dev_p: pointer to device capabilities structure
611  * @cap: capability element to parse
612  *
613  * Parse IXGBE_ACI_CAPS_VF for device capabilities.
614  */
615 static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
616 				    struct ixgbe_hw_dev_caps *dev_p,
617 				    struct ixgbe_aci_cmd_list_caps_elem *cap)
618 {
619 	dev_p->num_vfs_exposed = le32_to_cpu(cap->number);
620 }
621 
622 /**
623  * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
624  * @hw: pointer to the HW struct
625  * @dev_p: pointer to device capabilities structure
626  * @cap: capability element to parse
627  *
628  * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
629  */
630 static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
631 				     struct ixgbe_hw_dev_caps *dev_p,
632 				     struct ixgbe_aci_cmd_list_caps_elem *cap)
633 {
634 	dev_p->num_vsi_allocd_to_host = le32_to_cpu(cap->number);
635 }
636 
637 /**
638  * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
639  * @hw: pointer to the HW struct
640  * @dev_p: pointer to device capabilities structure
641  * @cap: capability element to parse
642  *
643  * Parse IXGBE_ACI_CAPS_FD for device capabilities.
644  */
645 static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
646 				      struct ixgbe_hw_dev_caps *dev_p,
647 				      struct ixgbe_aci_cmd_list_caps_elem *cap)
648 {
649 	dev_p->num_flow_director_fltr = le32_to_cpu(cap->number);
650 }
651 
652 /**
653  * ixgbe_parse_dev_caps - Parse device capabilities
654  * @hw: pointer to the HW struct
655  * @dev_p: pointer to device capabilities structure
656  * @buf: buffer containing the device capability records
657  * @cap_count: the number of capabilities
658  *
659  * Helper device to parse device (0x000B) capabilities list. For
660  * capabilities shared between device and function, this relies on
661  * ixgbe_parse_e610_caps.
662  *
663  * Loop through the list of provided capabilities and extract the relevant
664  * data into the device capabilities structured.
665  */
666 static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
667 				 struct ixgbe_hw_dev_caps *dev_p,
668 				 void *buf, u32 cap_count)
669 {
670 	struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
671 	u32 i;
672 
673 	cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
674 
675 	memset(dev_p, 0, sizeof(*dev_p));
676 
677 	for (i = 0; i < cap_count; i++) {
678 		u16 cap = le16_to_cpu(cap_resp[i].cap);
679 
680 		ixgbe_parse_e610_caps(hw, &dev_p->common_cap, &cap_resp[i],
681 				      "dev caps");
682 
683 		switch (cap) {
684 		case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
685 			ixgbe_parse_valid_functions_cap(hw, dev_p,
686 							&cap_resp[i]);
687 			break;
688 		case IXGBE_ACI_CAPS_VF:
689 			ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
690 			break;
691 		case IXGBE_ACI_CAPS_VSI:
692 			ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
693 			break;
694 		case  IXGBE_ACI_CAPS_FD:
695 			ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
696 			break;
697 		default:
698 			/* Don't list common capabilities as unknown */
699 			break;
700 		}
701 	}
702 }
703 
704 /**
705  * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps
706  * @hw: pointer to the HW struct
707  * @func_p: pointer to function capabilities structure
708  * @cap: pointer to the capability element to parse
709  *
710  * Extract function capabilities for IXGBE_ACI_CAPS_VF.
711  */
712 static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
713 				     struct ixgbe_hw_func_caps *func_p,
714 				     struct ixgbe_aci_cmd_list_caps_elem *cap)
715 {
716 	func_p->num_allocd_vfs = le32_to_cpu(cap->number);
717 	func_p->vf_base_id = le32_to_cpu(cap->logical_id);
718 }
719 
720 /**
721  * ixgbe_get_num_per_func - determine number of resources per PF
722  * @hw: pointer to the HW structure
723  * @max: value to be evenly split between each PF
724  *
725  * Determine the number of valid functions by going through the bitmap returned
726  * from parsing capabilities and use this to calculate the number of resources
727  * per PF based on the max value passed in.
728  *
729  * Return: the number of resources per PF or 0, if no PH are available.
730  */
731 static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
732 {
733 #define IXGBE_CAPS_VALID_FUNCS_M	GENMASK(7, 0)
734 	u8 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
735 			    IXGBE_CAPS_VALID_FUNCS_M);
736 
737 	return funcs ? (max / funcs) : 0;
738 }
739 
740 /**
741  * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
742  * @hw: pointer to the HW struct
743  * @func_p: pointer to function capabilities structure
744  * @cap: pointer to the capability element to parse
745  *
746  * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
747  */
748 static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
749 				      struct ixgbe_hw_func_caps *func_p,
750 				      struct ixgbe_aci_cmd_list_caps_elem *cap)
751 {
752 	func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
753 }
754 
755 /**
756  * ixgbe_parse_func_caps - Parse function capabilities
757  * @hw: pointer to the HW struct
758  * @func_p: pointer to function capabilities structure
759  * @buf: buffer containing the function capability records
760  * @cap_count: the number of capabilities
761  *
762  * Helper function to parse function (0x000A) capabilities list. For
763  * capabilities shared between device and function, this relies on
764  * ixgbe_parse_e610_caps.
765  *
766  * Loop through the list of provided capabilities and extract the relevant
767  * data into the function capabilities structured.
768  */
769 static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
770 				  struct ixgbe_hw_func_caps *func_p,
771 				  void *buf, u32 cap_count)
772 {
773 	struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
774 	u32 i;
775 
776 	cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
777 
778 	memset(func_p, 0, sizeof(*func_p));
779 
780 	for (i = 0; i < cap_count; i++) {
781 		u16 cap = le16_to_cpu(cap_resp[i].cap);
782 
783 		ixgbe_parse_e610_caps(hw, &func_p->common_cap,
784 				      &cap_resp[i], "func caps");
785 
786 		switch (cap) {
787 		case IXGBE_ACI_CAPS_VF:
788 			ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
789 			break;
790 		case IXGBE_ACI_CAPS_VSI:
791 			ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
792 			break;
793 		default:
794 			/* Don't list common capabilities as unknown */
795 			break;
796 		}
797 	}
798 }
799 
800 /**
801  * ixgbe_aci_list_caps - query function/device capabilities
802  * @hw: pointer to the HW struct
803  * @buf: a buffer to hold the capabilities
804  * @buf_size: size of the buffer
805  * @cap_count: if not NULL, set to the number of capabilities reported
806  * @opc: capabilities type to discover, device or function
807  *
808  * Get the function (0x000A) or device (0x000B) capabilities description from
809  * firmware and store it in the buffer.
810  *
811  * If the cap_count pointer is not NULL, then it is set to the number of
812  * capabilities firmware will report. Note that if the buffer size is too
813  * small, it is possible the command will return -ENOMEM. The
814  * cap_count will still be updated in this case. It is recommended that the
815  * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
816  * buffer that firmware could return) to avoid this.
817  *
818  * Return: the exit code of the operation.
819  * Exit code of -ENOMEM means the buffer size is too small.
820  */
821 int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
822 			u32 *cap_count, enum ixgbe_aci_opc opc)
823 {
824 	struct ixgbe_aci_cmd_list_caps *cmd;
825 	struct ixgbe_aci_desc desc;
826 	int err;
827 
828 	cmd = &desc.params.get_cap;
829 
830 	if (opc != ixgbe_aci_opc_list_func_caps &&
831 	    opc != ixgbe_aci_opc_list_dev_caps)
832 		return -EINVAL;
833 
834 	ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
835 	err = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
836 
837 	if (cap_count)
838 		*cap_count = le32_to_cpu(cmd->count);
839 
840 	return err;
841 }
842 
843 /**
844  * ixgbe_discover_dev_caps - Read and extract device capabilities
845  * @hw: pointer to the hardware structure
846  * @dev_caps: pointer to device capabilities structure
847  *
848  * Read the device capabilities and extract them into the dev_caps structure
849  * for later use.
850  *
851  * Return: the exit code of the operation.
852  */
853 int ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
854 			    struct ixgbe_hw_dev_caps *dev_caps)
855 {
856 	u32 cap_count;
857 	u8 *cbuf;
858 	int err;
859 
860 	cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
861 	if (!cbuf)
862 		return -ENOMEM;
863 
864 	/* Although the driver doesn't know the number of capabilities the
865 	 * device will return, we can simply send a 4KB buffer, the maximum
866 	 * possible size that firmware can return.
867 	 */
868 	cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
869 		    sizeof(struct ixgbe_aci_cmd_list_caps_elem);
870 
871 	err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
872 				  &cap_count,
873 				  ixgbe_aci_opc_list_dev_caps);
874 	if (!err)
875 		ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
876 
877 	kfree(cbuf);
878 
879 	return 0;
880 }
881 
882 /**
883  * ixgbe_discover_func_caps - Read and extract function capabilities
884  * @hw: pointer to the hardware structure
885  * @func_caps: pointer to function capabilities structure
886  *
887  * Read the function capabilities and extract them into the func_caps structure
888  * for later use.
889  *
890  * Return: the exit code of the operation.
891  */
892 int ixgbe_discover_func_caps(struct ixgbe_hw *hw,
893 			     struct ixgbe_hw_func_caps *func_caps)
894 {
895 	u32 cap_count;
896 	u8 *cbuf;
897 	int err;
898 
899 	cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
900 	if (!cbuf)
901 		return -ENOMEM;
902 
903 	/* Although the driver doesn't know the number of capabilities the
904 	 * device will return, we can simply send a 4KB buffer, the maximum
905 	 * possible size that firmware can return.
906 	 */
907 	cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
908 		    sizeof(struct ixgbe_aci_cmd_list_caps_elem);
909 
910 	err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
911 				  &cap_count,
912 				  ixgbe_aci_opc_list_func_caps);
913 	if (!err)
914 		ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
915 
916 	kfree(cbuf);
917 
918 	return 0;
919 }
920 
921 /**
922  * ixgbe_get_caps - get info about the HW
923  * @hw: pointer to the hardware structure
924  *
925  * Retrieve both device and function capabilities.
926  *
927  * Return: the exit code of the operation.
928  */
929 int ixgbe_get_caps(struct ixgbe_hw *hw)
930 {
931 	int err;
932 
933 	err = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
934 	if (err)
935 		return err;
936 
937 	return ixgbe_discover_func_caps(hw, &hw->func_caps);
938 }
939 
940 /**
941  * ixgbe_aci_disable_rxen - disable RX
942  * @hw: pointer to the HW struct
943  *
944  * Request a safe disable of Receive Enable using ACI command (0x000C).
945  *
946  * Return: the exit code of the operation.
947  */
948 int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
949 {
950 	struct ixgbe_aci_cmd_disable_rxen *cmd;
951 	struct ixgbe_aci_desc desc;
952 
953 	cmd = &desc.params.disable_rxen;
954 
955 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
956 
957 	cmd->lport_num = hw->bus.func;
958 
959 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
960 }
961 
962 /**
963  * ixgbe_aci_get_phy_caps - returns PHY capabilities
964  * @hw: pointer to the HW struct
965  * @qual_mods: report qualified modules
966  * @report_mode: report mode capabilities
967  * @pcaps: structure for PHY capabilities to be filled
968  *
969  * Returns the various PHY capabilities supported on the Port
970  * using ACI command (0x0600).
971  *
972  * Return: the exit code of the operation.
973  */
974 int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
975 			   struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
976 {
977 	struct ixgbe_aci_cmd_get_phy_caps *cmd;
978 	u16 pcaps_size = sizeof(*pcaps);
979 	struct ixgbe_aci_desc desc;
980 	int err;
981 
982 	cmd = &desc.params.get_phy;
983 
984 	if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
985 		return -EINVAL;
986 
987 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
988 
989 	if (qual_mods)
990 		cmd->param0 |= cpu_to_le16(IXGBE_ACI_GET_PHY_RQM);
991 
992 	cmd->param0 |= cpu_to_le16(report_mode);
993 	err = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
994 	if (!err && report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
995 		hw->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
996 		hw->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
997 		memcpy(hw->link.link_info.module_type, &pcaps->module_type,
998 		       sizeof(hw->link.link_info.module_type));
999 	}
1000 
1001 	return err;
1002 }
1003 
1004 /**
1005  * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
1006  * @caps: PHY ability structure to copy data from
1007  * @cfg: PHY configuration structure to copy data to
1008  *
1009  * Helper function to copy data from PHY capabilities data structure
1010  * to PHY configuration data structure
1011  */
1012 void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
1013 				struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
1014 {
1015 	if (!caps || !cfg)
1016 		return;
1017 
1018 	memset(cfg, 0, sizeof(*cfg));
1019 	cfg->phy_type_low = caps->phy_type_low;
1020 	cfg->phy_type_high = caps->phy_type_high;
1021 	cfg->caps = caps->caps;
1022 	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
1023 	cfg->eee_cap = caps->eee_cap;
1024 	cfg->eeer_value = caps->eeer_value;
1025 	cfg->link_fec_opt = caps->link_fec_options;
1026 	cfg->module_compliance_enforcement =
1027 		caps->module_compliance_enforcement;
1028 }
1029 
1030 /**
1031  * ixgbe_aci_set_phy_cfg - set PHY configuration
1032  * @hw: pointer to the HW struct
1033  * @cfg: structure with PHY configuration data to be set
1034  *
1035  * Set the various PHY configuration parameters supported on the Port
1036  * using ACI command (0x0601).
1037  * One or more of the Set PHY config parameters may be ignored in an MFP
1038  * mode as the PF may not have the privilege to set some of the PHY Config
1039  * parameters.
1040  *
1041  * Return: the exit code of the operation.
1042  */
1043 int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
1044 			  struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
1045 {
1046 	struct ixgbe_aci_desc desc;
1047 	int err;
1048 
1049 	if (!cfg)
1050 		return -EINVAL;
1051 
1052 	/* Ensure that only valid bits of cfg->caps can be turned on. */
1053 	cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
1054 
1055 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
1056 	desc.params.set_phy.lport_num = hw->bus.func;
1057 	desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
1058 
1059 	err = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
1060 	if (!err)
1061 		hw->phy.curr_user_phy_cfg = *cfg;
1062 
1063 	return err;
1064 }
1065 
1066 /**
1067  * ixgbe_aci_set_link_restart_an - set up link and restart AN
1068  * @hw: pointer to the HW struct
1069  * @ena_link: if true: enable link, if false: disable link
1070  *
1071  * Function sets up the link and restarts the Auto-Negotiation over the link.
1072  *
1073  * Return: the exit code of the operation.
1074  */
1075 int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
1076 {
1077 	struct ixgbe_aci_cmd_restart_an *cmd;
1078 	struct ixgbe_aci_desc desc;
1079 
1080 	cmd = &desc.params.restart_an;
1081 
1082 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
1083 
1084 	cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
1085 	cmd->lport_num = hw->bus.func;
1086 	if (ena_link)
1087 		cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
1088 	else
1089 		cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
1090 
1091 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1092 }
1093 
1094 /**
1095  * ixgbe_is_media_cage_present - check if media cage is present
1096  * @hw: pointer to the HW struct
1097  *
1098  * Identify presence of media cage using the ACI command (0x06E0).
1099  *
1100  * Return: true if media cage is present, else false. If no cage, then
1101  * media type is backplane or BASE-T.
1102  */
1103 static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
1104 {
1105 	struct ixgbe_aci_cmd_get_link_topo *cmd;
1106 	struct ixgbe_aci_desc desc;
1107 
1108 	cmd = &desc.params.get_link_topo;
1109 
1110 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
1111 
1112 	cmd->addr.topo_params.node_type_ctx =
1113 		FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_CTX_M,
1114 			   IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT);
1115 
1116 	/* Set node type. */
1117 	cmd->addr.topo_params.node_type_ctx |=
1118 		FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_TYPE_M,
1119 			   IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE);
1120 
1121 	/* Node type cage can be used to determine if cage is present. If AQC
1122 	 * returns error (ENOENT), then no cage present. If no cage present then
1123 	 * connection type is backplane or BASE-T.
1124 	 */
1125 	return !ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
1126 }
1127 
1128 /**
1129  * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
1130  * @hw: pointer to the HW struct
1131  *
1132  * Try to identify the media type based on the phy type.
1133  * If more than one media type, the ixgbe_media_type_unknown is returned.
1134  * First, phy_type_low is checked, then phy_type_high.
1135  * If none are identified, the ixgbe_media_type_unknown is returned
1136  *
1137  * Return: type of a media based on phy type in form of enum.
1138  */
1139 static enum ixgbe_media_type
1140 ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
1141 {
1142 	struct ixgbe_link_status *hw_link_info;
1143 
1144 	if (!hw)
1145 		return ixgbe_media_type_unknown;
1146 
1147 	hw_link_info = &hw->link.link_info;
1148 	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
1149 		/* If more than one media type is selected, report unknown */
1150 		return ixgbe_media_type_unknown;
1151 
1152 	if (hw_link_info->phy_type_low) {
1153 		/* 1G SGMII is a special case where some DA cable PHYs
1154 		 * may show this as an option when it really shouldn't
1155 		 * be since SGMII is meant to be between a MAC and a PHY
1156 		 * in a backplane. Try to detect this case and handle it
1157 		 */
1158 		if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
1159 		    (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
1160 		    IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
1161 		    hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
1162 		    IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
1163 			return ixgbe_media_type_da;
1164 
1165 		switch (hw_link_info->phy_type_low) {
1166 		case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
1167 		case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
1168 		case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
1169 		case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
1170 		case IXGBE_PHY_TYPE_LOW_25GBASE_SR:
1171 		case IXGBE_PHY_TYPE_LOW_25GBASE_LR:
1172 			return ixgbe_media_type_fiber;
1173 		case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
1174 		case IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
1175 			return ixgbe_media_type_fiber;
1176 		case IXGBE_PHY_TYPE_LOW_100BASE_TX:
1177 		case IXGBE_PHY_TYPE_LOW_1000BASE_T:
1178 		case IXGBE_PHY_TYPE_LOW_2500BASE_T:
1179 		case IXGBE_PHY_TYPE_LOW_5GBASE_T:
1180 		case IXGBE_PHY_TYPE_LOW_10GBASE_T:
1181 		case IXGBE_PHY_TYPE_LOW_25GBASE_T:
1182 			return ixgbe_media_type_copper;
1183 		case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
1184 		case IXGBE_PHY_TYPE_LOW_25GBASE_CR:
1185 		case IXGBE_PHY_TYPE_LOW_25GBASE_CR_S:
1186 		case IXGBE_PHY_TYPE_LOW_25GBASE_CR1:
1187 			return ixgbe_media_type_da;
1188 		case IXGBE_PHY_TYPE_LOW_25G_AUI_C2C:
1189 			if (ixgbe_is_media_cage_present(hw))
1190 				return ixgbe_media_type_aui;
1191 			fallthrough;
1192 		case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
1193 		case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
1194 		case IXGBE_PHY_TYPE_LOW_2500BASE_X:
1195 		case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
1196 		case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
1197 		case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
1198 		case IXGBE_PHY_TYPE_LOW_25GBASE_KR:
1199 		case IXGBE_PHY_TYPE_LOW_25GBASE_KR1:
1200 		case IXGBE_PHY_TYPE_LOW_25GBASE_KR_S:
1201 			return ixgbe_media_type_backplane;
1202 		}
1203 	} else {
1204 		switch (hw_link_info->phy_type_high) {
1205 		case IXGBE_PHY_TYPE_HIGH_10BASE_T:
1206 			return ixgbe_media_type_copper;
1207 		}
1208 	}
1209 	return ixgbe_media_type_unknown;
1210 }
1211 
1212 /**
1213  * ixgbe_update_link_info - update status of the HW network link
1214  * @hw: pointer to the HW struct
1215  *
1216  * Update the status of the HW network link.
1217  *
1218  * Return: the exit code of the operation.
1219  */
1220 int ixgbe_update_link_info(struct ixgbe_hw *hw)
1221 {
1222 	struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
1223 	struct ixgbe_link_status *li;
1224 	int err;
1225 
1226 	if (!hw)
1227 		return -EINVAL;
1228 
1229 	li = &hw->link.link_info;
1230 
1231 	err = ixgbe_aci_get_link_info(hw, true, NULL);
1232 	if (err)
1233 		return err;
1234 
1235 	if (!(li->link_info & IXGBE_ACI_MEDIA_AVAILABLE))
1236 		return 0;
1237 
1238 	pcaps =	kzalloc(sizeof(*pcaps), GFP_KERNEL);
1239 	if (!pcaps)
1240 		return -ENOMEM;
1241 
1242 	err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
1243 				     pcaps);
1244 
1245 	if (!err)
1246 		memcpy(li->module_type, &pcaps->module_type,
1247 		       sizeof(li->module_type));
1248 
1249 	kfree(pcaps);
1250 
1251 	return err;
1252 }
1253 
1254 /**
1255  * ixgbe_get_link_status - get status of the HW network link
1256  * @hw: pointer to the HW struct
1257  * @link_up: pointer to bool (true/false = linkup/linkdown)
1258  *
1259  * Variable link_up is true if link is up, false if link is down.
1260  * The variable link_up is invalid if status is non zero. As a
1261  * result of this call, link status reporting becomes enabled
1262  *
1263  * Return: the exit code of the operation.
1264  */
1265 int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
1266 {
1267 	if (!hw || !link_up)
1268 		return -EINVAL;
1269 
1270 	if (hw->link.get_link_info) {
1271 		int err = ixgbe_update_link_info(hw);
1272 
1273 		if (err)
1274 			return err;
1275 	}
1276 
1277 	*link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
1278 
1279 	return 0;
1280 }
1281 
1282 /**
1283  * ixgbe_aci_get_link_info - get the link status
1284  * @hw: pointer to the HW struct
1285  * @ena_lse: enable/disable LinkStatusEvent reporting
1286  * @link: pointer to link status structure - optional
1287  *
1288  * Get the current Link Status using ACI command (0x607).
1289  * The current link can be optionally provided to update
1290  * the status.
1291  *
1292  * Return: the link status of the adapter.
1293  */
1294 int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
1295 			    struct ixgbe_link_status *link)
1296 {
1297 	struct ixgbe_aci_cmd_get_link_status_data link_data = {};
1298 	struct ixgbe_aci_cmd_get_link_status *resp;
1299 	struct ixgbe_link_status *li_old, *li;
1300 	struct ixgbe_fc_info *hw_fc_info;
1301 	struct ixgbe_aci_desc desc;
1302 	bool tx_pause, rx_pause;
1303 	u8 cmd_flags;
1304 	int err;
1305 
1306 	if (!hw)
1307 		return -EINVAL;
1308 
1309 	li_old = &hw->link.link_info_old;
1310 	li = &hw->link.link_info;
1311 	hw_fc_info = &hw->fc;
1312 
1313 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
1314 	cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
1315 	resp = &desc.params.get_link_status;
1316 	resp->cmd_flags = cpu_to_le16(cmd_flags);
1317 	resp->lport_num = hw->bus.func;
1318 
1319 	err = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
1320 	if (err)
1321 		return err;
1322 
1323 	/* Save off old link status information. */
1324 	*li_old = *li;
1325 
1326 	/* Update current link status information. */
1327 	li->link_speed = le16_to_cpu(link_data.link_speed);
1328 	li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
1329 	li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
1330 	li->link_info = link_data.link_info;
1331 	li->link_cfg_err = link_data.link_cfg_err;
1332 	li->an_info = link_data.an_info;
1333 	li->ext_info = link_data.ext_info;
1334 	li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
1335 	li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
1336 	li->topo_media_conflict = link_data.topo_media_conflict;
1337 	li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
1338 				      IXGBE_ACI_CFG_PACING_TYPE_M);
1339 
1340 	/* Update fc info. */
1341 	tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
1342 	rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
1343 	if (tx_pause && rx_pause)
1344 		hw_fc_info->current_mode = ixgbe_fc_full;
1345 	else if (tx_pause)
1346 		hw_fc_info->current_mode = ixgbe_fc_tx_pause;
1347 	else if (rx_pause)
1348 		hw_fc_info->current_mode = ixgbe_fc_rx_pause;
1349 	else
1350 		hw_fc_info->current_mode = ixgbe_fc_none;
1351 
1352 	li->lse_ena = !!(le16_to_cpu(resp->cmd_flags) &
1353 			 IXGBE_ACI_LSE_IS_ENABLED);
1354 
1355 	/* Save link status information. */
1356 	if (link)
1357 		*link = *li;
1358 
1359 	/* Flag cleared so calling functions don't call AQ again. */
1360 	hw->link.get_link_info = false;
1361 
1362 	return 0;
1363 }
1364 
1365 /**
1366  * ixgbe_aci_set_event_mask - set event mask
1367  * @hw: pointer to the HW struct
1368  * @port_num: port number of the physical function
1369  * @mask: event mask to be set
1370  *
1371  * Set the event mask using ACI command (0x0613).
1372  *
1373  * Return: the exit code of the operation.
1374  */
1375 int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
1376 {
1377 	struct ixgbe_aci_cmd_set_event_mask *cmd;
1378 	struct ixgbe_aci_desc desc;
1379 
1380 	cmd = &desc.params.set_event_mask;
1381 
1382 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
1383 
1384 	cmd->lport_num = port_num;
1385 
1386 	cmd->event_mask = cpu_to_le16(mask);
1387 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1388 }
1389 
1390 /**
1391  * ixgbe_configure_lse - enable/disable link status events
1392  * @hw: pointer to the HW struct
1393  * @activate: true for enable lse, false otherwise
1394  * @mask: event mask to be set; a set bit means deactivation of the
1395  * corresponding event
1396  *
1397  * Set the event mask and then enable or disable link status events
1398  *
1399  * Return: the exit code of the operation.
1400  */
1401 int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
1402 {
1403 	int err;
1404 
1405 	err = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
1406 	if (err)
1407 		return err;
1408 
1409 	/* Enabling link status events generation by fw. */
1410 	return ixgbe_aci_get_link_info(hw, activate, NULL);
1411 }
1412 
1413 /**
1414  * ixgbe_get_media_type_e610 - Gets media type
1415  * @hw: pointer to the HW struct
1416  *
1417  * In order to get the media type, the function gets PHY
1418  * capabilities and later on use them to identify the PHY type
1419  * checking phy_type_high and phy_type_low.
1420  *
1421  * Return: the type of media in form of ixgbe_media_type enum
1422  * or ixgbe_media_type_unknown in case of an error.
1423  */
1424 enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw)
1425 {
1426 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
1427 	int rc;
1428 
1429 	rc = ixgbe_update_link_info(hw);
1430 	if (rc)
1431 		return ixgbe_media_type_unknown;
1432 
1433 	/* If there is no link but PHY (dongle) is available SW should use
1434 	 * Get PHY Caps admin command instead of Get Link Status, find most
1435 	 * significant bit that is set in PHY types reported by the command
1436 	 * and use it to discover media type.
1437 	 */
1438 	if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
1439 	    (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
1440 		int highest_bit;
1441 
1442 		/* Get PHY Capabilities */
1443 		rc = ixgbe_aci_get_phy_caps(hw, false,
1444 					    IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
1445 					    &pcaps);
1446 		if (rc)
1447 			return ixgbe_media_type_unknown;
1448 
1449 		highest_bit = fls64(le64_to_cpu(pcaps.phy_type_high));
1450 		if (highest_bit) {
1451 			hw->link.link_info.phy_type_high =
1452 				BIT_ULL(highest_bit - 1);
1453 			hw->link.link_info.phy_type_low = 0;
1454 		} else {
1455 			highest_bit = fls64(le64_to_cpu(pcaps.phy_type_low));
1456 			if (highest_bit) {
1457 				hw->link.link_info.phy_type_low =
1458 					BIT_ULL(highest_bit - 1);
1459 				hw->link.link_info.phy_type_high = 0;
1460 			}
1461 		}
1462 	}
1463 
1464 	/* Based on link status or search above try to discover media type. */
1465 	hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
1466 
1467 	return hw->phy.media_type;
1468 }
1469 
1470 /**
1471  * ixgbe_setup_link_e610 - Set up link
1472  * @hw: pointer to hardware structure
1473  * @speed: new link speed
1474  * @autoneg_wait: true when waiting for completion is needed
1475  *
1476  * Set up the link with the specified speed.
1477  *
1478  * Return: the exit code of the operation.
1479  */
1480 int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1481 			  bool autoneg_wait)
1482 {
1483 	/* Simply request FW to perform proper PHY setup */
1484 	return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1485 }
1486 
1487 /**
1488  * ixgbe_check_link_e610 - Determine link and speed status
1489  * @hw: pointer to hardware structure
1490  * @speed: pointer to link speed
1491  * @link_up: true when link is up
1492  * @link_up_wait_to_complete: bool used to wait for link up or not
1493  *
1494  * Determine if the link is up and the current link speed
1495  * using ACI command (0x0607).
1496  *
1497  * Return: the exit code of the operation.
1498  */
1499 int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
1500 			  bool *link_up, bool link_up_wait_to_complete)
1501 {
1502 	int err;
1503 	u32 i;
1504 
1505 	if (!speed || !link_up)
1506 		return -EINVAL;
1507 
1508 	/* Set get_link_info flag to ensure that fresh
1509 	 * link information will be obtained from FW
1510 	 * by sending Get Link Status admin command.
1511 	 */
1512 	hw->link.get_link_info = true;
1513 
1514 	/* Update link information in adapter context. */
1515 	err = ixgbe_get_link_status(hw, link_up);
1516 	if (err)
1517 		return err;
1518 
1519 	/* Wait for link up if it was requested. */
1520 	if (link_up_wait_to_complete && !(*link_up)) {
1521 		for (i = 0; i < hw->mac.max_link_up_time; i++) {
1522 			msleep(100);
1523 			hw->link.get_link_info = true;
1524 			err = ixgbe_get_link_status(hw, link_up);
1525 			if (err)
1526 				return err;
1527 			if (*link_up)
1528 				break;
1529 		}
1530 	}
1531 
1532 	/* Use link information in adapter context updated by the call
1533 	 * to ixgbe_get_link_status() to determine current link speed.
1534 	 * Link speed information is valid only when link up was
1535 	 * reported by FW.
1536 	 */
1537 	if (*link_up) {
1538 		switch (hw->link.link_info.link_speed) {
1539 		case IXGBE_ACI_LINK_SPEED_10MB:
1540 			*speed = IXGBE_LINK_SPEED_10_FULL;
1541 			break;
1542 		case IXGBE_ACI_LINK_SPEED_100MB:
1543 			*speed = IXGBE_LINK_SPEED_100_FULL;
1544 			break;
1545 		case IXGBE_ACI_LINK_SPEED_1000MB:
1546 			*speed = IXGBE_LINK_SPEED_1GB_FULL;
1547 			break;
1548 		case IXGBE_ACI_LINK_SPEED_2500MB:
1549 			*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
1550 			break;
1551 		case IXGBE_ACI_LINK_SPEED_5GB:
1552 			*speed = IXGBE_LINK_SPEED_5GB_FULL;
1553 			break;
1554 		case IXGBE_ACI_LINK_SPEED_10GB:
1555 			*speed = IXGBE_LINK_SPEED_10GB_FULL;
1556 			break;
1557 		default:
1558 			*speed = IXGBE_LINK_SPEED_UNKNOWN;
1559 			break;
1560 		}
1561 	} else {
1562 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
1563 	}
1564 
1565 	return 0;
1566 }
1567 
1568 /**
1569  * ixgbe_get_link_capabilities_e610 - Determine link capabilities
1570  * @hw: pointer to hardware structure
1571  * @speed: pointer to link speed
1572  * @autoneg: true when autoneg or autotry is enabled
1573  *
1574  * Determine speed and AN parameters of a link.
1575  *
1576  * Return: the exit code of the operation.
1577  */
1578 int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw,
1579 				     ixgbe_link_speed *speed,
1580 				     bool *autoneg)
1581 {
1582 	if (!speed || !autoneg)
1583 		return -EINVAL;
1584 
1585 	*autoneg = true;
1586 	*speed = hw->phy.speeds_supported;
1587 
1588 	return 0;
1589 }
1590 
1591 /**
1592  * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
1593  * @hw: pointer to hardware structure
1594  * @cfg: PHY configuration data to set FC mode
1595  * @req_mode: FC mode to configure
1596  *
1597  * Configures PHY Flow Control according to the provided configuration.
1598  *
1599  * Return: the exit code of the operation.
1600  */
1601 int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
1602 		     struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
1603 		     enum ixgbe_fc_mode req_mode)
1604 {
1605 	u8 pause_mask = 0x0;
1606 
1607 	if (!cfg)
1608 		return -EINVAL;
1609 
1610 	switch (req_mode) {
1611 	case ixgbe_fc_full:
1612 		pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
1613 		pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
1614 		break;
1615 	case ixgbe_fc_rx_pause:
1616 		pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
1617 		break;
1618 	case ixgbe_fc_tx_pause:
1619 		pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
1620 		break;
1621 	default:
1622 		break;
1623 	}
1624 
1625 	/* Clear the old pause settings. */
1626 	cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
1627 		IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
1628 
1629 	/* Set the new capabilities. */
1630 	cfg->caps |= pause_mask;
1631 
1632 	return 0;
1633 }
1634 
1635 /**
1636  * ixgbe_setup_fc_e610 - Set up flow control
1637  * @hw: pointer to hardware structure
1638  *
1639  * Set up flow control. This has to be done during init time.
1640  *
1641  * Return: the exit code of the operation.
1642  */
1643 int ixgbe_setup_fc_e610(struct ixgbe_hw *hw)
1644 {
1645 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps = {};
1646 	struct ixgbe_aci_cmd_set_phy_cfg_data cfg = {};
1647 	int err;
1648 
1649 	/* Get the current PHY config */
1650 	err = ixgbe_aci_get_phy_caps(hw, false,
1651 				     IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
1652 	if (err)
1653 		return err;
1654 
1655 	ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
1656 
1657 	/* Configure the set PHY data */
1658 	err = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
1659 	if (err)
1660 		return err;
1661 
1662 	/* If the capabilities have changed, then set the new config */
1663 	if (cfg.caps != pcaps.caps) {
1664 		cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
1665 
1666 		err = ixgbe_aci_set_phy_cfg(hw, &cfg);
1667 		if (err)
1668 			return err;
1669 	}
1670 
1671 	return err;
1672 }
1673 
1674 /**
1675  * ixgbe_fc_autoneg_e610 - Configure flow control
1676  * @hw: pointer to hardware structure
1677  *
1678  * Configure Flow Control.
1679  */
1680 void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw)
1681 {
1682 	int err;
1683 
1684 	/* Get current link err.
1685 	 * Current FC mode will be stored in the hw context.
1686 	 */
1687 	err = ixgbe_aci_get_link_info(hw, false, NULL);
1688 	if (err)
1689 		goto no_autoneg;
1690 
1691 	/* Check if the link is up */
1692 	if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP))
1693 		goto no_autoneg;
1694 
1695 	/* Check if auto-negotiation has completed */
1696 	if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED))
1697 		goto no_autoneg;
1698 
1699 	hw->fc.fc_was_autonegged = true;
1700 	return;
1701 
1702 no_autoneg:
1703 	hw->fc.fc_was_autonegged = false;
1704 	hw->fc.current_mode = hw->fc.requested_mode;
1705 }
1706 
1707 /**
1708  * ixgbe_disable_rx_e610 - Disable RX unit
1709  * @hw: pointer to hardware structure
1710  *
1711  * Disable RX DMA unit on E610 with use of ACI command (0x000C).
1712  *
1713  * Return: the exit code of the operation.
1714  */
1715 void ixgbe_disable_rx_e610(struct ixgbe_hw *hw)
1716 {
1717 	u32 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1718 	u32 pfdtxgswc;
1719 	int err;
1720 
1721 	if (!(rxctrl & IXGBE_RXCTRL_RXEN))
1722 		return;
1723 
1724 	pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
1725 	if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
1726 		pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
1727 		IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
1728 		hw->mac.set_lben = true;
1729 	} else {
1730 		hw->mac.set_lben = false;
1731 	}
1732 
1733 	err = ixgbe_aci_disable_rxen(hw);
1734 
1735 	/* If we fail - disable RX using register write */
1736 	if (err) {
1737 		rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1738 		if (rxctrl & IXGBE_RXCTRL_RXEN) {
1739 			rxctrl &= ~IXGBE_RXCTRL_RXEN;
1740 			IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
1741 		}
1742 	}
1743 }
1744 
1745 /**
1746  * ixgbe_init_phy_ops_e610 - PHY specific init
1747  * @hw: pointer to hardware structure
1748  *
1749  * Initialize any function pointers that were not able to be
1750  * set during init_shared_code because the PHY type was not known.
1751  *
1752  * Return: the exit code of the operation.
1753  */
1754 int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw)
1755 {
1756 	struct ixgbe_mac_info *mac = &hw->mac;
1757 	struct ixgbe_phy_info *phy = &hw->phy;
1758 
1759 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
1760 		phy->ops.set_phy_power = ixgbe_set_phy_power_e610;
1761 	else
1762 		phy->ops.set_phy_power = NULL;
1763 
1764 	/* Identify the PHY */
1765 	return phy->ops.identify(hw);
1766 }
1767 
1768 /**
1769  * ixgbe_identify_phy_e610 - Identify PHY
1770  * @hw: pointer to hardware structure
1771  *
1772  * Determine PHY type, supported speeds and PHY ID.
1773  *
1774  * Return: the exit code of the operation.
1775  */
1776 int ixgbe_identify_phy_e610(struct ixgbe_hw *hw)
1777 {
1778 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
1779 	u64 phy_type_low, phy_type_high;
1780 	int err;
1781 
1782 	/* Set PHY type */
1783 	hw->phy.type = ixgbe_phy_fw;
1784 
1785 	err = ixgbe_aci_get_phy_caps(hw, false,
1786 				     IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, &pcaps);
1787 	if (err)
1788 		return err;
1789 
1790 	if (!(pcaps.module_compliance_enforcement &
1791 	      IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
1792 		/* Handle lenient mode */
1793 		err = ixgbe_aci_get_phy_caps(hw, false,
1794 					     IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
1795 					     &pcaps);
1796 		if (err)
1797 			return err;
1798 	}
1799 
1800 	/* Determine supported speeds */
1801 	hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
1802 	phy_type_high = le64_to_cpu(pcaps.phy_type_high);
1803 	phy_type_low = le64_to_cpu(pcaps.phy_type_low);
1804 
1805 	if (phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
1806 	    phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
1807 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
1808 	if (phy_type_low  & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
1809 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
1810 	    phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
1811 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
1812 	if (phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_T  ||
1813 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
1814 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
1815 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
1816 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_1G_SGMII    ||
1817 	    phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
1818 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
1819 	if (phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_T       ||
1820 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_10G_SFI_DA      ||
1821 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_SR      ||
1822 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_LR      ||
1823 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1  ||
1824 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
1825 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C     ||
1826 	    phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
1827 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
1828 
1829 	/* 2.5 and 5 Gbps link speeds must be excluded from the
1830 	 * auto-negotiation set used during driver initialization due to
1831 	 * compatibility issues with certain switches. Those issues do not
1832 	 * exist in case of E610 2.5G SKU device (0x57b1).
1833 	 */
1834 	if (!hw->phy.autoneg_advertised &&
1835 	    hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
1836 		hw->phy.autoneg_advertised = hw->phy.speeds_supported;
1837 
1838 	if (phy_type_low  & IXGBE_PHY_TYPE_LOW_2500BASE_T   ||
1839 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_2500BASE_X   ||
1840 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_2500BASE_KX  ||
1841 	    phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
1842 	    phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
1843 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
1844 
1845 	if (!hw->phy.autoneg_advertised &&
1846 	    hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
1847 		hw->phy.autoneg_advertised = hw->phy.speeds_supported;
1848 
1849 	if (phy_type_low  & IXGBE_PHY_TYPE_LOW_5GBASE_T  ||
1850 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
1851 	    phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
1852 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
1853 
1854 	/* Set PHY ID */
1855 	memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
1856 
1857 	hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
1858 				       IXGBE_LINK_SPEED_100_FULL |
1859 				       IXGBE_LINK_SPEED_1GB_FULL;
1860 	hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
1861 
1862 	return 0;
1863 }
1864 
1865 /**
1866  * ixgbe_identify_module_e610 - Identify SFP module type
1867  * @hw: pointer to hardware structure
1868  *
1869  * Identify the SFP module type.
1870  *
1871  * Return: the exit code of the operation.
1872  */
1873 int ixgbe_identify_module_e610(struct ixgbe_hw *hw)
1874 {
1875 	bool media_available;
1876 	u8 module_type;
1877 	int err;
1878 
1879 	err = ixgbe_update_link_info(hw);
1880 	if (err)
1881 		return err;
1882 
1883 	media_available =
1884 		(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE);
1885 
1886 	if (media_available) {
1887 		hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1888 
1889 		/* Get module type from hw context updated by
1890 		 * ixgbe_update_link_info()
1891 		 */
1892 		module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
1893 
1894 		if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
1895 		    (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
1896 			hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
1897 		} else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
1898 			hw->phy.sfp_type = ixgbe_sfp_type_sr;
1899 		} else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
1900 			   (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
1901 			hw->phy.sfp_type = ixgbe_sfp_type_lr;
1902 		}
1903 	} else {
1904 		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1905 		return -ENOENT;
1906 	}
1907 
1908 	return 0;
1909 }
1910 
1911 /**
1912  * ixgbe_setup_phy_link_e610 - Sets up firmware-controlled PHYs
1913  * @hw: pointer to hardware structure
1914  *
1915  * Set the parameters for the firmware-controlled PHYs.
1916  *
1917  * Return: the exit code of the operation.
1918  */
1919 int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw)
1920 {
1921 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
1922 	struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
1923 	u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
1924 	u64 sup_phy_type_low, sup_phy_type_high;
1925 	u64 phy_type_low = 0, phy_type_high = 0;
1926 	int err;
1927 
1928 	err = ixgbe_aci_get_link_info(hw, false, NULL);
1929 	if (err)
1930 		return err;
1931 
1932 	/* If media is not available get default config. */
1933 	if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
1934 		rmode = IXGBE_ACI_REPORT_DFLT_CFG;
1935 
1936 	err = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
1937 	if (err)
1938 		return err;
1939 
1940 	sup_phy_type_low = le64_to_cpu(pcaps.phy_type_low);
1941 	sup_phy_type_high = le64_to_cpu(pcaps.phy_type_high);
1942 
1943 	/* Get Active configuration to avoid unintended changes. */
1944 	err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
1945 				     &pcaps);
1946 	if (err)
1947 		return err;
1948 
1949 	ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
1950 
1951 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
1952 		phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
1953 		phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
1954 	}
1955 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
1956 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
1957 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
1958 		phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
1959 	}
1960 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
1961 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
1962 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
1963 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
1964 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
1965 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
1966 		phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
1967 	}
1968 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
1969 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
1970 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
1971 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
1972 		phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
1973 		phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
1974 	}
1975 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
1976 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
1977 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
1978 		phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
1979 	}
1980 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
1981 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
1982 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
1983 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
1984 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
1985 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
1986 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
1987 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
1988 		phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
1989 	}
1990 
1991 	/* Mask the set values to avoid requesting unsupported link types. */
1992 	phy_type_low &= sup_phy_type_low;
1993 	pcfg.phy_type_low = cpu_to_le64(phy_type_low);
1994 	phy_type_high &= sup_phy_type_high;
1995 	pcfg.phy_type_high = cpu_to_le64(phy_type_high);
1996 
1997 	if (pcfg.phy_type_high != pcaps.phy_type_high ||
1998 	    pcfg.phy_type_low != pcaps.phy_type_low ||
1999 	    pcfg.caps != pcaps.caps) {
2000 		pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
2001 		pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
2002 
2003 		err = ixgbe_aci_set_phy_cfg(hw, &pcfg);
2004 		if (err)
2005 			return err;
2006 	}
2007 
2008 	return 0;
2009 }
2010 
2011 /**
2012  * ixgbe_set_phy_power_e610 - Control power for copper PHY
2013  * @hw: pointer to hardware structure
2014  * @on: true for on, false for off
2015  *
2016  * Set the power on/off of the PHY
2017  * by getting its capabilities and setting the appropriate
2018  * configuration parameters.
2019  *
2020  * Return: the exit code of the operation.
2021  */
2022 int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on)
2023 {
2024 	struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
2025 	struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
2026 	int err;
2027 
2028 	err = ixgbe_aci_get_phy_caps(hw, false,
2029 				     IXGBE_ACI_REPORT_ACTIVE_CFG,
2030 				     &phy_caps);
2031 	if (err)
2032 		return err;
2033 
2034 	ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
2035 
2036 	if (on)
2037 		phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
2038 	else
2039 		phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
2040 
2041 	/* PHY is already in requested power mode. */
2042 	if (phy_caps.caps == phy_cfg.caps)
2043 		return 0;
2044 
2045 	phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
2046 	phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
2047 
2048 	return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
2049 }
2050 
2051 /**
2052  * ixgbe_enter_lplu_e610 - Transition to low power states
2053  * @hw: pointer to hardware structure
2054  *
2055  * Configures Low Power Link Up on transition to low power states
2056  * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
2057  * X557 PHY immediately prior to entering LPLU.
2058  *
2059  * Return: the exit code of the operation.
2060  */
2061 int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw)
2062 {
2063 	struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
2064 	struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
2065 	int err;
2066 
2067 	err = ixgbe_aci_get_phy_caps(hw, false,
2068 				     IXGBE_ACI_REPORT_ACTIVE_CFG,
2069 				     &phy_caps);
2070 	if (err)
2071 		return err;
2072 
2073 	ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
2074 
2075 	phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
2076 
2077 	return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
2078 }
2079 
2080 /**
2081  * ixgbe_init_eeprom_params_e610 - Initialize EEPROM params
2082  * @hw: pointer to hardware structure
2083  *
2084  * Initialize the EEPROM parameters ixgbe_eeprom_info within the ixgbe_hw
2085  * struct in order to set up EEPROM access.
2086  *
2087  * Return: the operation exit code.
2088  */
2089 int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw)
2090 {
2091 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2092 	u32 gens_stat;
2093 	u8 sr_size;
2094 
2095 	if (eeprom->type != ixgbe_eeprom_uninitialized)
2096 		return 0;
2097 
2098 	eeprom->type = ixgbe_flash;
2099 
2100 	gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
2101 	sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
2102 
2103 	/* Switching to words (sr_size contains power of 2). */
2104 	eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
2105 
2106 	hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type,
2107 	       eeprom->word_size);
2108 
2109 	return 0;
2110 }
2111 
2112 /**
2113  * ixgbe_aci_get_netlist_node - get a node handle
2114  * @hw: pointer to the hw struct
2115  * @cmd: get_link_topo AQ structure
2116  * @node_part_number: output node part number if node found
2117  * @node_handle: output node handle parameter if node found
2118  *
2119  * Get the netlist node and assigns it to
2120  * the provided handle using ACI command (0x06E0).
2121  *
2122  * Return: the exit code of the operation.
2123  */
2124 int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
2125 			       struct ixgbe_aci_cmd_get_link_topo *cmd,
2126 			       u8 *node_part_number, u16 *node_handle)
2127 {
2128 	struct ixgbe_aci_desc desc;
2129 
2130 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
2131 	desc.params.get_link_topo = *cmd;
2132 
2133 	if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
2134 		return -EOPNOTSUPP;
2135 
2136 	if (node_handle)
2137 		*node_handle =
2138 			le16_to_cpu(desc.params.get_link_topo.addr.handle);
2139 	if (node_part_number)
2140 		*node_part_number = desc.params.get_link_topo.node_part_num;
2141 
2142 	return 0;
2143 }
2144 
2145 /**
2146  * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
2147  * @hw: pointer to the HW structure
2148  * @access: NVM access type (read or write)
2149  *
2150  * Request NVM ownership.
2151  *
2152  * Return: the exit code of the operation.
2153  */
2154 int ixgbe_acquire_nvm(struct ixgbe_hw *hw,
2155 		      enum ixgbe_aci_res_access_type access)
2156 {
2157 	u32 fla;
2158 
2159 	/* Skip if we are in blank NVM programming mode */
2160 	fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
2161 	if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
2162 		return 0;
2163 
2164 	return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
2165 				 IXGBE_NVM_TIMEOUT);
2166 }
2167 
2168 /**
2169  * ixgbe_release_nvm - Generic request for releasing the NVM ownership
2170  * @hw: pointer to the HW structure
2171  *
2172  * Release NVM ownership.
2173  */
2174 void ixgbe_release_nvm(struct ixgbe_hw *hw)
2175 {
2176 	u32 fla;
2177 
2178 	/* Skip if we are in blank NVM programming mode */
2179 	fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
2180 	if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
2181 		return;
2182 
2183 	ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
2184 }
2185 
2186 /**
2187  * ixgbe_aci_read_nvm - read NVM
2188  * @hw: pointer to the HW struct
2189  * @module_typeid: module pointer location in words from the NVM beginning
2190  * @offset: byte offset from the module beginning
2191  * @length: length of the section to be read (in bytes from the offset)
2192  * @data: command buffer (size [bytes] = length)
2193  * @last_command: tells if this is the last command in a series
2194  * @read_shadow_ram: tell if this is a shadow RAM read
2195  *
2196  * Read the NVM using ACI command (0x0701).
2197  *
2198  * Return: the exit code of the operation.
2199  */
2200 int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
2201 		       u16 length, void *data, bool last_command,
2202 		       bool read_shadow_ram)
2203 {
2204 	struct ixgbe_aci_cmd_nvm *cmd;
2205 	struct ixgbe_aci_desc desc;
2206 
2207 	if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
2208 		return -EINVAL;
2209 
2210 	cmd = &desc.params.nvm;
2211 
2212 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
2213 
2214 	if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
2215 		cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
2216 
2217 	/* If this is the last command in a series, set the proper flag. */
2218 	if (last_command)
2219 		cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
2220 	cmd->module_typeid = cpu_to_le16(module_typeid);
2221 	cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
2222 	cmd->offset_high = (offset >> 16) & 0xFF;
2223 	cmd->length = cpu_to_le16(length);
2224 
2225 	return ixgbe_aci_send_cmd(hw, &desc, data, length);
2226 }
2227 
2228 /**
2229  * ixgbe_nvm_validate_checksum - validate checksum
2230  * @hw: pointer to the HW struct
2231  *
2232  * Verify NVM PFA checksum validity using ACI command (0x0706).
2233  * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
2234  * The function acquires and then releases the NVM ownership.
2235  *
2236  * Return: the exit code of the operation.
2237  */
2238 int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
2239 {
2240 	struct ixgbe_aci_cmd_nvm_checksum *cmd;
2241 	struct ixgbe_aci_desc desc;
2242 	int err;
2243 
2244 	err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2245 	if (err)
2246 		return err;
2247 
2248 	cmd = &desc.params.nvm_checksum;
2249 
2250 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
2251 	cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
2252 
2253 	err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2254 
2255 	ixgbe_release_nvm(hw);
2256 
2257 	if (!err && cmd->checksum !=
2258 		cpu_to_le16(IXGBE_ACI_NVM_CHECKSUM_CORRECT)) {
2259 		struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
2260 							     hw);
2261 
2262 		err = -EIO;
2263 		netdev_err(adapter->netdev, "Invalid Shadow Ram checksum");
2264 	}
2265 
2266 	return err;
2267 }
2268 
2269 /**
2270  * ixgbe_discover_flash_size - Discover the available flash size
2271  * @hw: pointer to the HW struct
2272  *
2273  * The device flash could be up to 16MB in size. However, it is possible that
2274  * the actual size is smaller. Use bisection to determine the accessible size
2275  * of flash memory.
2276  *
2277  * Return: the exit code of the operation.
2278  */
2279 static int ixgbe_discover_flash_size(struct ixgbe_hw *hw)
2280 {
2281 	u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
2282 	int err;
2283 
2284 	err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2285 	if (err)
2286 		return err;
2287 
2288 	while ((max_size - min_size) > 1) {
2289 		u32 offset = (max_size + min_size) / 2;
2290 		u32 len = 1;
2291 		u8 data;
2292 
2293 		err = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
2294 		if (err == -EIO &&
2295 		    hw->aci.last_status == IXGBE_ACI_RC_EINVAL) {
2296 			err = 0;
2297 			max_size = offset;
2298 		} else if (!err) {
2299 			min_size = offset;
2300 		} else {
2301 			/* an unexpected error occurred */
2302 			goto err_read_flat_nvm;
2303 		}
2304 	}
2305 
2306 	hw->flash.flash_size = max_size;
2307 
2308 err_read_flat_nvm:
2309 	ixgbe_release_nvm(hw);
2310 
2311 	return err;
2312 }
2313 
2314 /**
2315  * ixgbe_read_sr_base_address - Read the value of a Shadow RAM pointer word
2316  * @hw: pointer to the HW structure
2317  * @offset: the word offset of the Shadow RAM word to read
2318  * @pointer: pointer value read from Shadow RAM
2319  *
2320  * Read the given Shadow RAM word, and convert it to a pointer value specified
2321  * in bytes. This function assumes the specified offset is a valid pointer
2322  * word.
2323  *
2324  * Each pointer word specifies whether it is stored in word size or 4KB
2325  * sector size by using the highest bit. The reported pointer value will be in
2326  * bytes, intended for flat NVM reads.
2327  *
2328  * Return: the exit code of the operation.
2329  */
2330 static int ixgbe_read_sr_base_address(struct ixgbe_hw *hw, u16 offset,
2331 				      u32 *pointer)
2332 {
2333 	u16 value;
2334 	int err;
2335 
2336 	err = ixgbe_read_ee_aci_e610(hw, offset, &value);
2337 	if (err)
2338 		return err;
2339 
2340 	/* Determine if the pointer is in 4KB or word units */
2341 	if (value & IXGBE_SR_NVM_PTR_4KB_UNITS)
2342 		*pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * SZ_4K;
2343 	else
2344 		*pointer = value * sizeof(u16);
2345 
2346 	return 0;
2347 }
2348 
2349 /**
2350  * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word
2351  * @hw: pointer to the HW structure
2352  * @offset: the word offset of the Shadow RAM to read
2353  * @size: size value read from the Shadow RAM
2354  *
2355  * Read the given Shadow RAM word, and convert it to an area size value
2356  * specified in bytes. This function assumes the specified offset is a valid
2357  * area size word.
2358  *
2359  * Each area size word is specified in 4KB sector units. This function reports
2360  * the size in bytes, intended for flat NVM reads.
2361  *
2362  * Return: the exit code of the operation.
2363  */
2364 static int ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size)
2365 {
2366 	u16 value;
2367 	int err;
2368 
2369 	err = ixgbe_read_ee_aci_e610(hw, offset, &value);
2370 	if (err)
2371 		return err;
2372 
2373 	/* Area sizes are always specified in 4KB units */
2374 	*size = value * SZ_4K;
2375 
2376 	return 0;
2377 }
2378 
2379 /**
2380  * ixgbe_determine_active_flash_banks - Discover active bank for each module
2381  * @hw: pointer to the HW struct
2382  *
2383  * Read the Shadow RAM control word and determine which banks are active for
2384  * the NVM, OROM, and Netlist modules. Also read and calculate the associated
2385  * pointer and size. These values are then cached into the ixgbe_flash_info
2386  * structure for later use in order to calculate the correct offset to read
2387  * from the active module.
2388  *
2389  * Return: the exit code of the operation.
2390  */
2391 static int ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw)
2392 {
2393 	struct ixgbe_bank_info *banks = &hw->flash.banks;
2394 	u16 ctrl_word;
2395 	int err;
2396 
2397 	err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_NVM_CTRL_WORD,
2398 				     &ctrl_word);
2399 	if (err)
2400 		return err;
2401 
2402 	if (FIELD_GET(IXGBE_SR_CTRL_WORD_1_M, ctrl_word) !=
2403 	    IXGBE_SR_CTRL_WORD_VALID)
2404 		return -ENODATA;
2405 
2406 	if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK))
2407 		banks->nvm_bank = IXGBE_1ST_FLASH_BANK;
2408 	else
2409 		banks->nvm_bank = IXGBE_2ND_FLASH_BANK;
2410 
2411 	if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK))
2412 		banks->orom_bank = IXGBE_1ST_FLASH_BANK;
2413 	else
2414 		banks->orom_bank = IXGBE_2ND_FLASH_BANK;
2415 
2416 	if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK))
2417 		banks->netlist_bank = IXGBE_1ST_FLASH_BANK;
2418 	else
2419 		banks->netlist_bank = IXGBE_2ND_FLASH_BANK;
2420 
2421 	err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_NVM_BANK_PTR,
2422 					 &banks->nvm_ptr);
2423 	if (err)
2424 		return err;
2425 
2426 	err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NVM_BANK_SIZE,
2427 				      &banks->nvm_size);
2428 	if (err)
2429 		return err;
2430 
2431 	err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_OROM_BANK_PTR,
2432 					 &banks->orom_ptr);
2433 	if (err)
2434 		return err;
2435 
2436 	err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_OROM_BANK_SIZE,
2437 				      &banks->orom_size);
2438 	if (err)
2439 		return err;
2440 
2441 	err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_NETLIST_BANK_PTR,
2442 					 &banks->netlist_ptr);
2443 	if (err)
2444 		return err;
2445 
2446 	err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NETLIST_BANK_SIZE,
2447 				      &banks->netlist_size);
2448 
2449 	return err;
2450 }
2451 
2452 /**
2453  * ixgbe_get_flash_bank_offset - Get offset into requested flash bank
2454  * @hw: pointer to the HW structure
2455  * @bank: whether to read from the active or inactive flash bank
2456  * @module: the module to read from
2457  *
2458  * Based on the module, lookup the module offset from the beginning of the
2459  * flash.
2460  *
2461  * Return: the flash offset. Note that a value of zero is invalid and must be
2462  * treated as an error.
2463  */
2464 static int ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw,
2465 				       enum ixgbe_bank_select bank,
2466 				       u16 module)
2467 {
2468 	struct ixgbe_bank_info *banks = &hw->flash.banks;
2469 	enum ixgbe_flash_bank active_bank;
2470 	bool second_bank_active;
2471 	u32 offset, size;
2472 
2473 	switch (module) {
2474 	case IXGBE_E610_SR_1ST_NVM_BANK_PTR:
2475 		offset = banks->nvm_ptr;
2476 		size = banks->nvm_size;
2477 		active_bank = banks->nvm_bank;
2478 		break;
2479 	case IXGBE_E610_SR_1ST_OROM_BANK_PTR:
2480 		offset = banks->orom_ptr;
2481 		size = banks->orom_size;
2482 		active_bank = banks->orom_bank;
2483 		break;
2484 	case IXGBE_E610_SR_NETLIST_BANK_PTR:
2485 		offset = banks->netlist_ptr;
2486 		size = banks->netlist_size;
2487 		active_bank = banks->netlist_bank;
2488 		break;
2489 	default:
2490 		return 0;
2491 	}
2492 
2493 	switch (active_bank) {
2494 	case IXGBE_1ST_FLASH_BANK:
2495 		second_bank_active = false;
2496 		break;
2497 	case IXGBE_2ND_FLASH_BANK:
2498 		second_bank_active = true;
2499 		break;
2500 	default:
2501 		return 0;
2502 	}
2503 
2504 	/* The second flash bank is stored immediately following the first
2505 	 * bank. Based on whether the 1st or 2nd bank is active, and whether
2506 	 * we want the active or inactive bank, calculate the desired offset.
2507 	 */
2508 	switch (bank) {
2509 	case IXGBE_ACTIVE_FLASH_BANK:
2510 		return offset + (second_bank_active ? size : 0);
2511 	case IXGBE_INACTIVE_FLASH_BANK:
2512 		return offset + (second_bank_active ? 0 : size);
2513 	}
2514 
2515 	return 0;
2516 }
2517 
2518 /**
2519  * ixgbe_read_flash_module - Read a word from one of the main NVM modules
2520  * @hw: pointer to the HW structure
2521  * @bank: which bank of the module to read
2522  * @module: the module to read
2523  * @offset: the offset into the module in bytes
2524  * @data: storage for the word read from the flash
2525  * @length: bytes of data to read
2526  *
2527  * Read data from the specified flash module. The bank parameter indicates
2528  * whether or not to read from the active bank or the inactive bank of that
2529  * module.
2530  *
2531  * The word will be read using flat NVM access, and relies on the
2532  * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks()
2533  * during initialization.
2534  *
2535  * Return: the exit code of the operation.
2536  */
2537 static int ixgbe_read_flash_module(struct ixgbe_hw *hw,
2538 				   enum ixgbe_bank_select bank,
2539 				   u16 module, u32 offset, u8 *data, u32 length)
2540 {
2541 	u32 start;
2542 	int err;
2543 
2544 	start = ixgbe_get_flash_bank_offset(hw, bank, module);
2545 	if (!start)
2546 		return -EINVAL;
2547 
2548 	err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2549 	if (err)
2550 		return err;
2551 
2552 	err = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false);
2553 
2554 	ixgbe_release_nvm(hw);
2555 
2556 	return err;
2557 }
2558 
2559 /**
2560  * ixgbe_read_nvm_module - Read from the active main NVM module
2561  * @hw: pointer to the HW structure
2562  * @bank: whether to read from active or inactive NVM module
2563  * @offset: offset into the NVM module to read, in words
2564  * @data: storage for returned word value
2565  *
2566  * Read the specified word from the active NVM module. This includes the CSS
2567  * header at the start of the NVM module.
2568  *
2569  * Return: the exit code of the operation.
2570  */
2571 static int ixgbe_read_nvm_module(struct ixgbe_hw *hw,
2572 				 enum ixgbe_bank_select bank,
2573 				 u32 offset, u16 *data)
2574 {
2575 	__le16 data_local;
2576 	int err;
2577 
2578 	err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_NVM_BANK_PTR,
2579 				      offset * sizeof(data_local),
2580 				      (u8 *)&data_local,
2581 				      sizeof(data_local));
2582 	if (!err)
2583 		*data = le16_to_cpu(data_local);
2584 
2585 	return err;
2586 }
2587 
2588 /**
2589  * ixgbe_read_netlist_module - Read data from the netlist module area
2590  * @hw: pointer to the HW structure
2591  * @bank: whether to read from the active or inactive module
2592  * @offset: offset into the netlist to read from
2593  * @data: storage for returned word value
2594  *
2595  * Read a word from the specified netlist bank.
2596  *
2597  * Return: the exit code of the operation.
2598  */
2599 static int ixgbe_read_netlist_module(struct ixgbe_hw *hw,
2600 				     enum ixgbe_bank_select bank,
2601 				     u32 offset, u16 *data)
2602 {
2603 	__le16 data_local;
2604 	int err;
2605 
2606 	err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR,
2607 				      offset * sizeof(data_local),
2608 				      (u8 *)&data_local, sizeof(data_local));
2609 	if (!err)
2610 		*data = le16_to_cpu(data_local);
2611 
2612 	return err;
2613 }
2614 
2615 /**
2616  * ixgbe_read_orom_module - Read from the active Option ROM module
2617  * @hw: pointer to the HW structure
2618  * @bank: whether to read from active or inactive OROM module
2619  * @offset: offset into the OROM module to read, in words
2620  * @data: storage for returned word value
2621  *
2622  * Read the specified word from the active Option ROM module of the flash.
2623  * Note that unlike the NVM module, the CSS data is stored at the end of the
2624  * module instead of at the beginning.
2625  *
2626  * Return: the exit code of the operation.
2627  */
2628 static int ixgbe_read_orom_module(struct ixgbe_hw *hw,
2629 				  enum ixgbe_bank_select bank,
2630 				  u32 offset, u16 *data)
2631 {
2632 	__le16 data_local;
2633 	int err;
2634 
2635 	err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_OROM_BANK_PTR,
2636 				      offset * sizeof(data_local),
2637 				      (u8 *)&data_local, sizeof(data_local));
2638 	if (!err)
2639 		*data = le16_to_cpu(data_local);
2640 
2641 	return err;
2642 }
2643 
2644 /**
2645  * ixgbe_get_nvm_css_hdr_len - Read the CSS header length
2646  * @hw: pointer to the HW struct
2647  * @bank: whether to read from the active or inactive flash bank
2648  * @hdr_len: storage for header length in words
2649  *
2650  * Read the CSS header length from the NVM CSS header and add the
2651  * Authentication header size, and then convert to words.
2652  *
2653  * Return: the exit code of the operation.
2654  */
2655 static int ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw,
2656 				     enum ixgbe_bank_select bank,
2657 				     u32 *hdr_len)
2658 {
2659 	u16 hdr_len_l, hdr_len_h;
2660 	u32 hdr_len_dword;
2661 	int err;
2662 
2663 	err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L,
2664 				    &hdr_len_l);
2665 	if (err)
2666 		return err;
2667 
2668 	err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H,
2669 				    &hdr_len_h);
2670 	if (err)
2671 		return err;
2672 
2673 	/* CSS header length is in DWORD, so convert to words and add
2674 	 * authentication header size.
2675 	 */
2676 	hdr_len_dword = (hdr_len_h << 16) | hdr_len_l;
2677 	*hdr_len = hdr_len_dword * 2 + IXGBE_NVM_AUTH_HEADER_LEN;
2678 
2679 	return 0;
2680 }
2681 
2682 /**
2683  * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy
2684  * @hw: pointer to the HW structure
2685  * @bank: whether to read from the active or inactive NVM module
2686  * @offset: offset into the Shadow RAM copy to read, in words
2687  * @data: storage for returned word value
2688  *
2689  * Read the specified word from the copy of the Shadow RAM found in the
2690  * specified NVM module.
2691  *
2692  * Return: the exit code of the operation.
2693  */
2694 static int ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw,
2695 				  enum ixgbe_bank_select bank,
2696 				  u32 offset, u16 *data)
2697 {
2698 	u32 hdr_len;
2699 	int err;
2700 
2701 	err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
2702 	if (err)
2703 		return err;
2704 
2705 	hdr_len = round_up(hdr_len, IXGBE_HDR_LEN_ROUNDUP);
2706 
2707 	return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data);
2708 }
2709 
2710 /**
2711  * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header
2712  * @hw: pointer to the HW struct
2713  * @bank: whether to read from the active or inactive flash bank
2714  * @srev: storage for security revision
2715  *
2716  * Read the security revision out of the CSS header of the active NVM module
2717  * bank.
2718  *
2719  * Return: the exit code of the operation.
2720  */
2721 static int ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
2722 			      enum ixgbe_bank_select bank, u32 *srev)
2723 {
2724 	u16 srev_l, srev_h;
2725 	int err;
2726 
2727 	err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l);
2728 	if (err)
2729 		return err;
2730 
2731 	err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h);
2732 	if (err)
2733 		return err;
2734 
2735 	*srev = (srev_h << 16) | srev_l;
2736 
2737 	return 0;
2738 }
2739 
2740 /**
2741  * ixgbe_get_orom_civd_data - Get the combo version information from Option ROM
2742  * @hw: pointer to the HW struct
2743  * @bank: whether to read from the active or inactive flash module
2744  * @civd: storage for the Option ROM CIVD data.
2745  *
2746  * Searches through the Option ROM flash contents to locate the CIVD data for
2747  * the image.
2748  *
2749  * Return: the exit code of the operation.
2750  */
2751 static int
2752 ixgbe_get_orom_civd_data(struct ixgbe_hw *hw, enum ixgbe_bank_select bank,
2753 			 struct ixgbe_orom_civd_info *civd)
2754 {
2755 	struct ixgbe_orom_civd_info tmp;
2756 	u32 offset;
2757 	int err;
2758 
2759 	/* The CIVD section is located in the Option ROM aligned to 512 bytes.
2760 	 * The first 4 bytes must contain the ASCII characters "$CIV".
2761 	 * A simple modulo 256 sum of all of the bytes of the structure must
2762 	 * equal 0.
2763 	 */
2764 	for (offset = 0; (offset + SZ_512) <= hw->flash.banks.orom_size;
2765 	     offset += SZ_512) {
2766 		u8 sum = 0;
2767 		u32 i;
2768 
2769 		err = ixgbe_read_flash_module(hw, bank,
2770 					      IXGBE_E610_SR_1ST_OROM_BANK_PTR,
2771 					      offset,
2772 					      (u8 *)&tmp, sizeof(tmp));
2773 		if (err)
2774 			return err;
2775 
2776 		/* Skip forward until we find a matching signature */
2777 		if (memcmp(IXGBE_OROM_CIV_SIGNATURE, tmp.signature,
2778 			   sizeof(tmp.signature)))
2779 			continue;
2780 
2781 		/* Verify that the simple checksum is zero */
2782 		for (i = 0; i < sizeof(tmp); i++)
2783 			sum += ((u8 *)&tmp)[i];
2784 
2785 		if (sum)
2786 			return -EDOM;
2787 
2788 		*civd = tmp;
2789 		return 0;
2790 	}
2791 
2792 	return -ENODATA;
2793 }
2794 
2795 /**
2796  * ixgbe_get_orom_srev - Read the security revision from the OROM CSS header
2797  * @hw: pointer to the HW struct
2798  * @bank: whether to read from active or inactive flash module
2799  * @srev: storage for security revision
2800  *
2801  * Read the security revision out of the CSS header of the active OROM module
2802  * bank.
2803  *
2804  * Return: the exit code of the operation.
2805  */
2806 static int ixgbe_get_orom_srev(struct ixgbe_hw *hw,
2807 			       enum ixgbe_bank_select bank,
2808 			       u32 *srev)
2809 {
2810 	u32 orom_size_word = hw->flash.banks.orom_size / 2;
2811 	u32 css_start, hdr_len;
2812 	u16 srev_l, srev_h;
2813 	int err;
2814 
2815 	err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
2816 	if (err)
2817 		return err;
2818 
2819 	if (orom_size_word < hdr_len)
2820 		return -EINVAL;
2821 
2822 	/* Calculate how far into the Option ROM the CSS header starts. Note
2823 	 * that ixgbe_read_orom_module takes a word offset.
2824 	 */
2825 	css_start = orom_size_word - hdr_len;
2826 	err = ixgbe_read_orom_module(hw, bank,
2827 				     css_start + IXGBE_NVM_CSS_SREV_L,
2828 				     &srev_l);
2829 	if (err)
2830 		return err;
2831 
2832 	err = ixgbe_read_orom_module(hw, bank,
2833 				     css_start + IXGBE_NVM_CSS_SREV_H,
2834 				     &srev_h);
2835 	if (err)
2836 		return err;
2837 
2838 	*srev = srev_h << 16 | srev_l;
2839 
2840 	return 0;
2841 }
2842 
2843 /**
2844  * ixgbe_get_orom_ver_info - Read Option ROM version information
2845  * @hw: pointer to the HW struct
2846  * @bank: whether to read from the active or inactive flash module
2847  * @orom: pointer to Option ROM info structure
2848  *
2849  * Read Option ROM version and security revision from the Option ROM flash
2850  * section.
2851  *
2852  * Return: the exit code of the operation.
2853  */
2854 static int ixgbe_get_orom_ver_info(struct ixgbe_hw *hw,
2855 				   enum ixgbe_bank_select bank,
2856 				   struct ixgbe_orom_info *orom)
2857 {
2858 	struct ixgbe_orom_civd_info civd;
2859 	u32 combo_ver;
2860 	int err;
2861 
2862 	err = ixgbe_get_orom_civd_data(hw, bank, &civd);
2863 	if (err)
2864 		return err;
2865 
2866 	combo_ver = le32_to_cpu(civd.combo_ver);
2867 
2868 	orom->major = (u8)FIELD_GET(IXGBE_OROM_VER_MASK, combo_ver);
2869 	orom->patch = (u8)FIELD_GET(IXGBE_OROM_VER_PATCH_MASK, combo_ver);
2870 	orom->build = (u16)FIELD_GET(IXGBE_OROM_VER_BUILD_MASK, combo_ver);
2871 
2872 	return ixgbe_get_orom_srev(hw, bank, &orom->srev);
2873 }
2874 
2875 /**
2876  * ixgbe_get_nvm_ver_info - Read NVM version information
2877  * @hw: pointer to the HW struct
2878  * @bank: whether to read from the active or inactive flash bank
2879  * @nvm: pointer to NVM info structure
2880  *
2881  * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
2882  * in the nvm info structure.
2883  *
2884  * Return: the exit code of the operation.
2885  */
2886 static int ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw,
2887 				  enum ixgbe_bank_select bank,
2888 				  struct ixgbe_nvm_info *nvm)
2889 {
2890 	u16 eetrack_lo, eetrack_hi, ver;
2891 	int err;
2892 
2893 	err = ixgbe_read_nvm_sr_copy(hw, bank,
2894 				     IXGBE_E610_SR_NVM_DEV_STARTER_VER, &ver);
2895 	if (err)
2896 		return err;
2897 
2898 	nvm->major = FIELD_GET(IXGBE_E610_NVM_VER_HI_MASK, ver);
2899 	nvm->minor = FIELD_GET(IXGBE_E610_NVM_VER_LO_MASK, ver);
2900 
2901 	err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_LO,
2902 				     &eetrack_lo);
2903 	if (err)
2904 		return err;
2905 
2906 	err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_HI,
2907 				     &eetrack_hi);
2908 	if (err)
2909 		return err;
2910 
2911 	nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
2912 
2913 	ixgbe_get_nvm_srev(hw, bank, &nvm->srev);
2914 
2915 	return 0;
2916 }
2917 
2918 /**
2919  * ixgbe_get_netlist_info - Read the netlist version information
2920  * @hw: pointer to the HW struct
2921  * @bank: whether to read from the active or inactive flash bank
2922  * @netlist: pointer to netlist version info structure
2923  *
2924  * Get the netlist version information from the requested bank. Reads the Link
2925  * Topology section to find the Netlist ID block and extract the relevant
2926  * information into the netlist version structure.
2927  *
2928  * Return: the exit code of the operation.
2929  */
2930 static int ixgbe_get_netlist_info(struct ixgbe_hw *hw,
2931 				  enum ixgbe_bank_select bank,
2932 				  struct ixgbe_netlist_info *netlist)
2933 {
2934 	u16 module_id, length, node_count, i;
2935 	u16 *id_blk;
2936 	int err;
2937 
2938 	err = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET,
2939 					&module_id);
2940 	if (err)
2941 		return err;
2942 
2943 	if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID)
2944 		return -EIO;
2945 
2946 	err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN,
2947 					&length);
2948 	if (err)
2949 		return err;
2950 
2951 	/* Sanity check that we have at least enough words to store the
2952 	 * netlist ID block.
2953 	 */
2954 	if (length < IXGBE_NETLIST_ID_BLK_SIZE)
2955 		return -EIO;
2956 
2957 	err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT,
2958 					&node_count);
2959 	if (err)
2960 		return err;
2961 
2962 	node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M;
2963 
2964 	id_blk = kcalloc(IXGBE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL);
2965 	if (!id_blk)
2966 		return -ENOMEM;
2967 
2968 	/* Read out the entire Netlist ID Block at once. */
2969 	err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR,
2970 				      IXGBE_NETLIST_ID_BLK_OFFSET(node_count) *
2971 				      sizeof(*id_blk), (u8 *)id_blk,
2972 				      IXGBE_NETLIST_ID_BLK_SIZE *
2973 				      sizeof(*id_blk));
2974 	if (err)
2975 		goto free_id_blk;
2976 
2977 	for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++)
2978 		id_blk[i] = le16_to_cpu(((__le16 *)id_blk)[i]);
2979 
2980 	netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
2981 			 id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW];
2982 	netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
2983 			 id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW];
2984 	netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
2985 			id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW];
2986 	netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 |
2987 		       id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW];
2988 	netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER];
2989 	/* Read the left most 4 bytes of SHA */
2990 	netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
2991 			id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
2992 
2993 free_id_blk:
2994 	kfree(id_blk);
2995 	return err;
2996 }
2997 
2998 /**
2999  * ixgbe_get_flash_data - get flash data
3000  * @hw: pointer to the HW struct
3001  *
3002  * Read and populate flash data such as Shadow RAM size,
3003  * max_timeout and blank_nvm_mode
3004  *
3005  * Return: the exit code of the operation.
3006  */
3007 int ixgbe_get_flash_data(struct ixgbe_hw *hw)
3008 {
3009 	struct ixgbe_flash_info *flash = &hw->flash;
3010 	u32 fla, gens_stat;
3011 	u8 sr_size;
3012 	int err;
3013 
3014 	/* The SR size is stored regardless of the NVM programming mode
3015 	 * as the blank mode may be used in the factory line.
3016 	 */
3017 	gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
3018 	sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
3019 
3020 	/* Switching to words (sr_size contains power of 2) */
3021 	flash->sr_words = BIT(sr_size) * (SZ_1K / sizeof(u16));
3022 
3023 	/* Check if we are in the normal or blank NVM programming mode */
3024 	fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
3025 	if (fla & IXGBE_GLNVM_FLA_LOCKED_M) {
3026 		flash->blank_nvm_mode = false;
3027 	} else {
3028 		flash->blank_nvm_mode = true;
3029 		return -EIO;
3030 	}
3031 
3032 	err = ixgbe_discover_flash_size(hw);
3033 	if (err)
3034 		return err;
3035 
3036 	err = ixgbe_determine_active_flash_banks(hw);
3037 	if (err)
3038 		return err;
3039 
3040 	err = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
3041 				     &flash->nvm);
3042 	if (err)
3043 		return err;
3044 
3045 	err = ixgbe_get_orom_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
3046 				      &flash->orom);
3047 	if (err)
3048 		return err;
3049 
3050 	err = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK,
3051 				     &flash->netlist);
3052 	return err;
3053 }
3054 
3055 /**
3056  * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
3057  * @hw: pointer to the HW structure
3058  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
3059  * @data: word read from the Shadow RAM
3060  *
3061  * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
3062  *
3063  * Return: the exit code of the operation.
3064  */
3065 int ixgbe_read_sr_word_aci(struct ixgbe_hw  *hw, u16 offset, u16 *data)
3066 {
3067 	u32 bytes = sizeof(u16);
3068 	u16 data_local;
3069 	int err;
3070 
3071 	err = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
3072 				  (u8 *)&data_local, true);
3073 	if (err)
3074 		return err;
3075 
3076 	*data = data_local;
3077 	return 0;
3078 }
3079 
3080 /**
3081  * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
3082  * @hw: pointer to the HW struct
3083  * @offset: offset from beginning of NVM
3084  * @length: (in) number of bytes to read; (out) number of bytes actually read
3085  * @data: buffer to return data in (sized to fit the specified length)
3086  * @read_shadow_ram: if true, read from shadow RAM instead of NVM
3087  *
3088  * Reads a portion of the NVM, as a flat memory space. This function correctly
3089  * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
3090  * from being exceeded in case of Shadow RAM read requests and ensures that no
3091  * single read request exceeds the maximum 4KB read for a single admin command.
3092  *
3093  * Returns an error code on failure. Note that the data pointer may be
3094  * partially updated if some reads succeed before a failure.
3095  *
3096  * Return: the exit code of the operation.
3097  */
3098 int ixgbe_read_flat_nvm(struct ixgbe_hw  *hw, u32 offset, u32 *length,
3099 			u8 *data, bool read_shadow_ram)
3100 {
3101 	u32 inlen = *length;
3102 	u32 bytes_read = 0;
3103 	bool last_cmd;
3104 	int err;
3105 
3106 	/* Verify the length of the read if this is for the Shadow RAM */
3107 	if (read_shadow_ram && ((offset + inlen) >
3108 				(hw->eeprom.word_size * 2u)))
3109 		return -EINVAL;
3110 
3111 	do {
3112 		u32 read_size, sector_offset;
3113 
3114 		/* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
3115 		 * Additionally, a read from the Shadow RAM may not cross over
3116 		 * a sector boundary. Conveniently, the sector size is also 4KB.
3117 		 */
3118 		sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
3119 		read_size = min_t(u32,
3120 				  IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
3121 				  inlen - bytes_read);
3122 
3123 		last_cmd = !(bytes_read + read_size < inlen);
3124 
3125 		/* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
3126 		 * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
3127 		 * maximum size guarantees that it will fit within the 2 bytes.
3128 		 */
3129 		err = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
3130 					 offset, (u16)read_size,
3131 					 data + bytes_read, last_cmd,
3132 					 read_shadow_ram);
3133 		if (err)
3134 			break;
3135 
3136 		bytes_read += read_size;
3137 		offset += read_size;
3138 	} while (!last_cmd);
3139 
3140 	*length = bytes_read;
3141 	return err;
3142 }
3143 
3144 /**
3145  * ixgbe_read_sr_buf_aci - Read Shadow RAM buffer via ACI
3146  * @hw: pointer to the HW structure
3147  * @offset: offset of the Shadow RAM words to read (0x000000 - 0x001FFF)
3148  * @words: (in) number of words to read; (out) number of words actually read
3149  * @data: words read from the Shadow RAM
3150  *
3151  * Read 16 bit words (data buf) from the Shadow RAM. Acquire/release the NVM
3152  * ownership.
3153  *
3154  * Return: the operation exit code.
3155  */
3156 int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
3157 			  u16 *data)
3158 {
3159 	u32 bytes = *words * 2;
3160 	int err;
3161 
3162 	err = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
3163 	if (err)
3164 		return err;
3165 
3166 	*words = bytes / 2;
3167 
3168 	for (int i = 0; i < *words; i++)
3169 		data[i] = le16_to_cpu(((__le16 *)data)[i]);
3170 
3171 	return 0;
3172 }
3173 
3174 /**
3175  * ixgbe_read_ee_aci_e610 - Read EEPROM word using the admin command.
3176  * @hw: pointer to hardware structure
3177  * @offset: offset of  word in the EEPROM to read
3178  * @data: word read from the EEPROM
3179  *
3180  * Reads a 16 bit word from the EEPROM using the ACI.
3181  * If the EEPROM params are not initialized, the function
3182  * initialize them before proceeding with reading.
3183  * The function acquires and then releases the NVM ownership.
3184  *
3185  * Return: the exit code of the operation.
3186  */
3187 int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data)
3188 {
3189 	int err;
3190 
3191 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
3192 		err = hw->eeprom.ops.init_params(hw);
3193 		if (err)
3194 			return err;
3195 	}
3196 
3197 	err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
3198 	if (err)
3199 		return err;
3200 
3201 	err = ixgbe_read_sr_word_aci(hw, offset, data);
3202 	ixgbe_release_nvm(hw);
3203 
3204 	return err;
3205 }
3206 
3207 /**
3208  * ixgbe_read_ee_aci_buffer_e610 - Read EEPROM words via ACI
3209  * @hw: pointer to hardware structure
3210  * @offset: offset of words in the EEPROM to read
3211  * @words: number of words to read
3212  * @data: words to read from the EEPROM
3213  *
3214  * Read 16 bit words from the EEPROM via the ACI. Initialize the EEPROM params
3215  * prior to the read. Acquire/release the NVM ownership.
3216  *
3217  * Return: the operation exit code.
3218  */
3219 int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
3220 				  u16 words, u16 *data)
3221 {
3222 	int err;
3223 
3224 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
3225 		err = hw->eeprom.ops.init_params(hw);
3226 		if (err)
3227 			return err;
3228 	}
3229 
3230 	err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
3231 	if (err)
3232 		return err;
3233 
3234 	err = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
3235 	ixgbe_release_nvm(hw);
3236 
3237 	return err;
3238 }
3239 
3240 /**
3241  * ixgbe_validate_eeprom_checksum_e610 - Validate EEPROM checksum
3242  * @hw: pointer to hardware structure
3243  * @checksum_val: calculated checksum
3244  *
3245  * Performs checksum calculation and validates the EEPROM checksum. If the
3246  * caller does not need checksum_val, the value can be NULL.
3247  * If the EEPROM params are not initialized, the function
3248  * initialize them before proceeding.
3249  * The function acquires and then releases the NVM ownership.
3250  *
3251  * Return: the exit code of the operation.
3252  */
3253 int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val)
3254 {
3255 	int err;
3256 
3257 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
3258 		err = hw->eeprom.ops.init_params(hw);
3259 		if (err)
3260 			return err;
3261 	}
3262 
3263 	err = ixgbe_nvm_validate_checksum(hw);
3264 	if (err)
3265 		return err;
3266 
3267 	if (checksum_val) {
3268 		u16 tmp_checksum;
3269 
3270 		err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
3271 		if (err)
3272 			return err;
3273 
3274 		err = ixgbe_read_sr_word_aci(hw, IXGBE_E610_SR_SW_CHECKSUM_WORD,
3275 					     &tmp_checksum);
3276 		ixgbe_release_nvm(hw);
3277 
3278 		if (!err)
3279 			*checksum_val = tmp_checksum;
3280 	}
3281 
3282 	return err;
3283 }
3284 
3285 /**
3286  * ixgbe_reset_hw_e610 - Perform hardware reset
3287  * @hw: pointer to hardware structure
3288  *
3289  * Resets the hardware by resetting the transmit and receive units, masks
3290  * and clears all interrupts, and performs a reset.
3291  *
3292  * Return: the exit code of the operation.
3293  */
3294 int ixgbe_reset_hw_e610(struct ixgbe_hw *hw)
3295 {
3296 	u32 swfw_mask = hw->phy.phy_semaphore_mask;
3297 	u32 ctrl, i;
3298 	int err;
3299 
3300 	/* Call adapter stop to disable tx/rx and clear interrupts */
3301 	err = hw->mac.ops.stop_adapter(hw);
3302 	if (err)
3303 		goto reset_hw_out;
3304 
3305 	/* Flush pending Tx transactions. */
3306 	ixgbe_clear_tx_pending(hw);
3307 
3308 	hw->phy.ops.init(hw);
3309 mac_reset_top:
3310 	err = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
3311 	if (err)
3312 		return -EBUSY;
3313 	ctrl = IXGBE_CTRL_RST;
3314 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
3315 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
3316 	IXGBE_WRITE_FLUSH(hw);
3317 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
3318 
3319 	/* Poll for reset bit to self-clear indicating reset is complete */
3320 	for (i = 0; i < 10; i++) {
3321 		udelay(1);
3322 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
3323 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
3324 			break;
3325 	}
3326 
3327 	if (ctrl & IXGBE_CTRL_RST_MASK) {
3328 		struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
3329 							     hw);
3330 
3331 		err = -EIO;
3332 		netdev_err(adapter->netdev, "Reset polling failed to complete.");
3333 	}
3334 
3335 	/* Double resets are required for recovery from certain error
3336 	 * conditions. Between resets, it is necessary to stall to allow time
3337 	 * for any pending HW events to complete.
3338 	 */
3339 	msleep(100);
3340 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
3341 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3342 		goto mac_reset_top;
3343 	}
3344 
3345 	/* Set the Rx packet buffer size. */
3346 	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), GENMASK(18, 17));
3347 
3348 	/* Store the permanent mac address */
3349 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
3350 
3351 	/* Maximum number of Receive Address Registers. */
3352 #define IXGBE_MAX_NUM_RAR		128
3353 
3354 	/* Store MAC address from RAR0, clear receive address registers, and
3355 	 * clear the multicast table.  Also reset num_rar_entries to the
3356 	 * maximum number of Receive Address Registers, since we modify this
3357 	 * value when programming the SAN MAC address.
3358 	 */
3359 	hw->mac.num_rar_entries = IXGBE_MAX_NUM_RAR;
3360 	hw->mac.ops.init_rx_addrs(hw);
3361 
3362 	/* Initialize bus function number */
3363 	hw->mac.ops.set_lan_id(hw);
3364 
3365 reset_hw_out:
3366 	return err;
3367 }
3368 
3369 static const struct ixgbe_mac_operations mac_ops_e610 = {
3370 	.init_hw			= ixgbe_init_hw_generic,
3371 	.start_hw			= ixgbe_start_hw_X540,
3372 	.clear_hw_cntrs			= ixgbe_clear_hw_cntrs_generic,
3373 	.enable_rx_dma			= ixgbe_enable_rx_dma_generic,
3374 	.get_mac_addr			= ixgbe_get_mac_addr_generic,
3375 	.get_device_caps		= ixgbe_get_device_caps_generic,
3376 	.stop_adapter			= ixgbe_stop_adapter_generic,
3377 	.set_lan_id			= ixgbe_set_lan_id_multi_port_pcie,
3378 	.set_rxpba			= ixgbe_set_rxpba_generic,
3379 	.check_link			= ixgbe_check_link_e610,
3380 	.blink_led_start		= ixgbe_blink_led_start_X540,
3381 	.blink_led_stop			= ixgbe_blink_led_stop_X540,
3382 	.set_rar			= ixgbe_set_rar_generic,
3383 	.clear_rar			= ixgbe_clear_rar_generic,
3384 	.set_vmdq			= ixgbe_set_vmdq_generic,
3385 	.set_vmdq_san_mac		= ixgbe_set_vmdq_san_mac_generic,
3386 	.clear_vmdq			= ixgbe_clear_vmdq_generic,
3387 	.init_rx_addrs			= ixgbe_init_rx_addrs_generic,
3388 	.update_mc_addr_list		= ixgbe_update_mc_addr_list_generic,
3389 	.enable_mc			= ixgbe_enable_mc_generic,
3390 	.disable_mc			= ixgbe_disable_mc_generic,
3391 	.clear_vfta			= ixgbe_clear_vfta_generic,
3392 	.set_vfta			= ixgbe_set_vfta_generic,
3393 	.fc_enable			= ixgbe_fc_enable_generic,
3394 	.set_fw_drv_ver			= ixgbe_set_fw_drv_ver_x550,
3395 	.init_uta_tables		= ixgbe_init_uta_tables_generic,
3396 	.set_mac_anti_spoofing		= ixgbe_set_mac_anti_spoofing,
3397 	.set_vlan_anti_spoofing		= ixgbe_set_vlan_anti_spoofing,
3398 	.set_source_address_pruning	=
3399 				ixgbe_set_source_address_pruning_x550,
3400 	.set_ethertype_anti_spoofing	=
3401 				ixgbe_set_ethertype_anti_spoofing_x550,
3402 	.disable_rx_buff		= ixgbe_disable_rx_buff_generic,
3403 	.enable_rx_buff			= ixgbe_enable_rx_buff_generic,
3404 	.enable_rx			= ixgbe_enable_rx_generic,
3405 	.disable_rx			= ixgbe_disable_rx_e610,
3406 	.led_on				= ixgbe_led_on_generic,
3407 	.led_off			= ixgbe_led_off_generic,
3408 	.init_led_link_act		= ixgbe_init_led_link_act_generic,
3409 	.reset_hw			= ixgbe_reset_hw_e610,
3410 	.get_media_type			= ixgbe_get_media_type_e610,
3411 	.setup_link			= ixgbe_setup_link_e610,
3412 	.get_link_capabilities		= ixgbe_get_link_capabilities_e610,
3413 	.get_bus_info			= ixgbe_get_bus_info_generic,
3414 	.acquire_swfw_sync		= ixgbe_acquire_swfw_sync_X540,
3415 	.release_swfw_sync		= ixgbe_release_swfw_sync_X540,
3416 	.init_swfw_sync			= ixgbe_init_swfw_sync_X540,
3417 	.prot_autoc_read		= prot_autoc_read_generic,
3418 	.prot_autoc_write		= prot_autoc_write_generic,
3419 	.setup_fc			= ixgbe_setup_fc_e610,
3420 	.fc_autoneg			= ixgbe_fc_autoneg_e610,
3421 };
3422 
3423 static const struct ixgbe_phy_operations phy_ops_e610 = {
3424 	.init				= ixgbe_init_phy_ops_e610,
3425 	.identify			= ixgbe_identify_phy_e610,
3426 	.identify_sfp			= ixgbe_identify_module_e610,
3427 	.setup_link_speed		= ixgbe_setup_phy_link_speed_generic,
3428 	.setup_link			= ixgbe_setup_phy_link_e610,
3429 	.enter_lplu			= ixgbe_enter_lplu_e610,
3430 };
3431 
3432 static const struct ixgbe_eeprom_operations eeprom_ops_e610 = {
3433 	.read				= ixgbe_read_ee_aci_e610,
3434 	.read_buffer			= ixgbe_read_ee_aci_buffer_e610,
3435 	.validate_checksum		= ixgbe_validate_eeprom_checksum_e610,
3436 };
3437 
3438 const struct ixgbe_info ixgbe_e610_info = {
3439 	.mac			= ixgbe_mac_e610,
3440 	.get_invariants		= ixgbe_get_invariants_X540,
3441 	.mac_ops		= &mac_ops_e610,
3442 	.eeprom_ops		= &eeprom_ops_e610,
3443 	.phy_ops		= &phy_ops_e610,
3444 	.mbx_ops		= &mbx_ops_generic,
3445 	.mvals			= ixgbe_mvals_x550em_a,
3446 };
3447