1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2024 Intel Corporation. */
3
4 #include "ixgbe_common.h"
5 #include "ixgbe_e610.h"
6 #include "ixgbe_x550.h"
7 #include "ixgbe_type.h"
8 #include "ixgbe_x540.h"
9 #include "ixgbe_mbx.h"
10 #include "ixgbe_phy.h"
11
12 /**
13 * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
14 * be resent
15 * @opcode: ACI opcode
16 *
17 * Check if ACI command should be sent again depending on the provided opcode.
18 * It may happen when CSR is busy during link state changes.
19 *
20 * Return: true if the sending command routine should be repeated,
21 * otherwise false.
22 */
ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)23 static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
24 {
25 switch (opcode) {
26 case ixgbe_aci_opc_disable_rxen:
27 case ixgbe_aci_opc_get_phy_caps:
28 case ixgbe_aci_opc_get_link_status:
29 case ixgbe_aci_opc_get_link_topo:
30 return true;
31 }
32
33 return false;
34 }
35
36 /**
37 * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
38 * Command Interface
39 * @hw: pointer to the HW struct
40 * @desc: descriptor describing the command
41 * @buf: buffer to use for indirect commands (NULL for direct commands)
42 * @buf_size: size of buffer for indirect commands (0 for direct commands)
43 *
44 * Admin Command is sent using CSR by setting descriptor and buffer in specific
45 * registers.
46 *
47 * Return: the exit code of the operation.
48 * * - 0 - success.
49 * * - -EIO - CSR mechanism is not enabled.
50 * * - -EBUSY - CSR mechanism is busy.
51 * * - -EINVAL - buf_size is too big or
52 * invalid argument buf or buf_size.
53 * * - -ETIME - Admin Command X command timeout.
54 * * - -EIO - Admin Command X invalid state of HICR register or
55 * Admin Command failed because of bad opcode was returned or
56 * Admin Command failed with error Y.
57 */
ixgbe_aci_send_cmd_execute(struct ixgbe_hw * hw,struct ixgbe_aci_desc * desc,void * buf,u16 buf_size)58 static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw,
59 struct ixgbe_aci_desc *desc,
60 void *buf, u16 buf_size)
61 {
62 u16 opcode, buf_tail_size = buf_size % 4;
63 u32 *raw_desc = (u32 *)desc;
64 u32 hicr, i, buf_tail = 0;
65 bool valid_buf = false;
66
67 hw->aci.last_status = IXGBE_ACI_RC_OK;
68
69 /* It's necessary to check if mechanism is enabled */
70 hicr = IXGBE_READ_REG(hw, IXGBE_PF_HICR);
71
72 if (!(hicr & IXGBE_PF_HICR_EN))
73 return -EIO;
74
75 if (hicr & IXGBE_PF_HICR_C) {
76 hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
77 return -EBUSY;
78 }
79
80 opcode = le16_to_cpu(desc->opcode);
81
82 if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE)
83 return -EINVAL;
84
85 if (buf)
86 desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_BUF);
87
88 if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_BUF)) {
89 if ((buf && !buf_size) ||
90 (!buf && buf_size))
91 return -EINVAL;
92 if (buf && buf_size)
93 valid_buf = true;
94 }
95
96 if (valid_buf) {
97 if (buf_tail_size)
98 memcpy(&buf_tail, buf + buf_size - buf_tail_size,
99 buf_tail_size);
100
101 if (((buf_size + 3) & ~0x3) > IXGBE_ACI_LG_BUF)
102 desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_LB);
103
104 desc->datalen = cpu_to_le16(buf_size);
105
106 if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_RD)) {
107 for (i = 0; i < buf_size / 4; i++)
108 IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), ((u32 *)buf)[i]);
109 if (buf_tail_size)
110 IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), buf_tail);
111 }
112 }
113
114 /* Descriptor is written to specific registers */
115 for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
116 IXGBE_WRITE_REG(hw, IXGBE_PF_HIDA(i), raw_desc[i]);
117
118 /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
119 * PF_HICR_EV
120 */
121 hicr = (IXGBE_READ_REG(hw, IXGBE_PF_HICR) | IXGBE_PF_HICR_C) &
122 ~(IXGBE_PF_HICR_SV | IXGBE_PF_HICR_EV);
123 IXGBE_WRITE_REG(hw, IXGBE_PF_HICR, hicr);
124
125 #define MAX_SLEEP_RESP_US 1000
126 #define MAX_TMOUT_RESP_SYNC_US 100000000
127
128 /* Wait for sync Admin Command response */
129 read_poll_timeout(IXGBE_READ_REG, hicr,
130 (hicr & IXGBE_PF_HICR_SV) ||
131 !(hicr & IXGBE_PF_HICR_C),
132 MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_SYNC_US, true, hw,
133 IXGBE_PF_HICR);
134
135 #define MAX_TMOUT_RESP_ASYNC_US 150000000
136
137 /* Wait for async Admin Command response */
138 read_poll_timeout(IXGBE_READ_REG, hicr,
139 (hicr & IXGBE_PF_HICR_EV) ||
140 !(hicr & IXGBE_PF_HICR_C),
141 MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_ASYNC_US, true, hw,
142 IXGBE_PF_HICR);
143
144 /* Read sync Admin Command response */
145 if ((hicr & IXGBE_PF_HICR_SV)) {
146 for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
147 raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA(i));
148 raw_desc[i] = raw_desc[i];
149 }
150 }
151
152 /* Read async Admin Command response */
153 if ((hicr & IXGBE_PF_HICR_EV) && !(hicr & IXGBE_PF_HICR_C)) {
154 for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
155 raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA_2(i));
156 raw_desc[i] = raw_desc[i];
157 }
158 }
159
160 /* Handle timeout and invalid state of HICR register */
161 if (hicr & IXGBE_PF_HICR_C)
162 return -ETIME;
163
164 if (!(hicr & IXGBE_PF_HICR_SV) && !(hicr & IXGBE_PF_HICR_EV))
165 return -EIO;
166
167 /* For every command other than 0x0014 treat opcode mismatch
168 * as an error. Response to 0x0014 command read from HIDA_2
169 * is a descriptor of an event which is expected to contain
170 * different opcode than the command.
171 */
172 if (desc->opcode != cpu_to_le16(opcode) &&
173 opcode != ixgbe_aci_opc_get_fw_event)
174 return -EIO;
175
176 if (desc->retval) {
177 hw->aci.last_status = (enum ixgbe_aci_err)
178 le16_to_cpu(desc->retval);
179 return -EIO;
180 }
181
182 /* Write a response values to a buf */
183 if (valid_buf) {
184 for (i = 0; i < buf_size / 4; i++)
185 ((u32 *)buf)[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
186 if (buf_tail_size) {
187 buf_tail = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
188 memcpy(buf + buf_size - buf_tail_size, &buf_tail,
189 buf_tail_size);
190 }
191 }
192
193 return 0;
194 }
195
196 /**
197 * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
198 * @hw: pointer to the HW struct
199 * @desc: descriptor describing the command
200 * @buf: buffer to use for indirect commands (NULL for direct commands)
201 * @buf_size: size of buffer for indirect commands (0 for direct commands)
202 *
203 * Helper function to send FW Admin Commands to the FW Admin Command Interface.
204 *
205 * Retry sending the FW Admin Command multiple times to the FW ACI
206 * if the EBUSY Admin Command error is returned.
207 *
208 * Return: the exit code of the operation.
209 */
ixgbe_aci_send_cmd(struct ixgbe_hw * hw,struct ixgbe_aci_desc * desc,void * buf,u16 buf_size)210 int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
211 void *buf, u16 buf_size)
212 {
213 u16 opcode = le16_to_cpu(desc->opcode);
214 struct ixgbe_aci_desc desc_cpy;
215 enum ixgbe_aci_err last_status;
216 u8 idx = 0, *buf_cpy = NULL;
217 bool is_cmd_for_retry;
218 unsigned long timeout;
219 int err;
220
221 is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
222 if (is_cmd_for_retry) {
223 if (buf) {
224 buf_cpy = kmalloc(buf_size, GFP_KERNEL);
225 if (!buf_cpy)
226 return -ENOMEM;
227 *buf_cpy = *(u8 *)buf;
228 }
229 desc_cpy = *desc;
230 }
231
232 timeout = jiffies + msecs_to_jiffies(IXGBE_ACI_SEND_TIMEOUT_MS);
233 do {
234 mutex_lock(&hw->aci.lock);
235 err = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
236 last_status = hw->aci.last_status;
237 mutex_unlock(&hw->aci.lock);
238
239 if (!is_cmd_for_retry || !err ||
240 last_status != IXGBE_ACI_RC_EBUSY)
241 break;
242
243 if (buf)
244 memcpy(buf, buf_cpy, buf_size);
245 *desc = desc_cpy;
246
247 msleep(IXGBE_ACI_SEND_DELAY_TIME_MS);
248 } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE &&
249 time_before(jiffies, timeout));
250
251 kfree(buf_cpy);
252
253 return err;
254 }
255
256 /**
257 * ixgbe_aci_check_event_pending - check if there are any pending events
258 * @hw: pointer to the HW struct
259 *
260 * Determine if there are any pending events.
261 *
262 * Return: true if there are any currently pending events
263 * otherwise false.
264 */
ixgbe_aci_check_event_pending(struct ixgbe_hw * hw)265 bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
266 {
267 u32 ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
268 u32 fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
269
270 return (fwsts & ep_bit_mask) ? true : false;
271 }
272
273 /**
274 * ixgbe_aci_get_event - get an event from ACI
275 * @hw: pointer to the HW struct
276 * @e: event information structure
277 * @pending: optional flag signaling that there are more pending events
278 *
279 * Obtain an event from ACI and return its content
280 * through 'e' using ACI command (0x0014).
281 * Provide information if there are more events
282 * to retrieve through 'pending'.
283 *
284 * Return: the exit code of the operation.
285 */
ixgbe_aci_get_event(struct ixgbe_hw * hw,struct ixgbe_aci_event * e,bool * pending)286 int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
287 bool *pending)
288 {
289 struct ixgbe_aci_desc desc;
290 int err;
291
292 if (!e || (!e->msg_buf && e->buf_len))
293 return -EINVAL;
294
295 mutex_lock(&hw->aci.lock);
296
297 /* Check if there are any events pending */
298 if (!ixgbe_aci_check_event_pending(hw)) {
299 err = -ENOENT;
300 goto aci_get_event_exit;
301 }
302
303 /* Obtain pending event */
304 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
305 err = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
306 if (err)
307 goto aci_get_event_exit;
308
309 /* Returned 0x0014 opcode indicates that no event was obtained */
310 if (desc.opcode == cpu_to_le16(ixgbe_aci_opc_get_fw_event)) {
311 err = -ENOENT;
312 goto aci_get_event_exit;
313 }
314
315 /* Determine size of event data */
316 e->msg_len = min_t(u16, le16_to_cpu(desc.datalen), e->buf_len);
317 /* Write event descriptor to event info structure */
318 memcpy(&e->desc, &desc, sizeof(e->desc));
319
320 /* Check if there are any further events pending */
321 if (pending)
322 *pending = ixgbe_aci_check_event_pending(hw);
323
324 aci_get_event_exit:
325 mutex_unlock(&hw->aci.lock);
326
327 return err;
328 }
329
330 /**
331 * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
332 * @desc: pointer to the temp descriptor (non DMA mem)
333 * @opcode: the opcode can be used to decide which flags to turn off or on
334 *
335 * Helper function to fill the descriptor desc with default values
336 * and the provided opcode.
337 */
ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc * desc,u16 opcode)338 void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
339 {
340 /* Zero out the desc. */
341 memset(desc, 0, sizeof(*desc));
342 desc->opcode = cpu_to_le16(opcode);
343 desc->flags = cpu_to_le16(IXGBE_ACI_FLAG_SI);
344 }
345
346 /**
347 * ixgbe_aci_req_res - request a common resource
348 * @hw: pointer to the HW struct
349 * @res: resource ID
350 * @access: access type
351 * @sdp_number: resource number
352 * @timeout: the maximum time in ms that the driver may hold the resource
353 *
354 * Requests a common resource using the ACI command (0x0008).
355 * Specifies the maximum time the driver may hold the resource.
356 * If the requested resource is currently occupied by some other driver,
357 * a busy return value is returned and the timeout field value indicates the
358 * maximum time the current owner has to free it.
359 *
360 * Return: the exit code of the operation.
361 */
ixgbe_aci_req_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res,enum ixgbe_aci_res_access_type access,u8 sdp_number,u32 * timeout)362 static int ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
363 enum ixgbe_aci_res_access_type access,
364 u8 sdp_number, u32 *timeout)
365 {
366 struct ixgbe_aci_cmd_req_res *cmd_resp;
367 struct ixgbe_aci_desc desc;
368 int err;
369
370 cmd_resp = &desc.params.res_owner;
371
372 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
373
374 cmd_resp->res_id = cpu_to_le16(res);
375 cmd_resp->access_type = cpu_to_le16(access);
376 cmd_resp->res_number = cpu_to_le32(sdp_number);
377 cmd_resp->timeout = cpu_to_le32(*timeout);
378 *timeout = 0;
379
380 err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
381
382 /* If the resource is held by some other driver, the command completes
383 * with a busy return value and the timeout field indicates the maximum
384 * time the current owner of the resource has to free it.
385 */
386 if (!err || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
387 *timeout = le32_to_cpu(cmd_resp->timeout);
388
389 return err;
390 }
391
392 /**
393 * ixgbe_aci_release_res - release a common resource using ACI
394 * @hw: pointer to the HW struct
395 * @res: resource ID
396 * @sdp_number: resource number
397 *
398 * Release a common resource using ACI command (0x0009).
399 *
400 * Return: the exit code of the operation.
401 */
ixgbe_aci_release_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res,u8 sdp_number)402 static int ixgbe_aci_release_res(struct ixgbe_hw *hw,
403 enum ixgbe_aci_res_ids res, u8 sdp_number)
404 {
405 struct ixgbe_aci_cmd_req_res *cmd;
406 struct ixgbe_aci_desc desc;
407
408 cmd = &desc.params.res_owner;
409
410 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
411
412 cmd->res_id = cpu_to_le16(res);
413 cmd->res_number = cpu_to_le32(sdp_number);
414
415 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
416 }
417
418 /**
419 * ixgbe_acquire_res - acquire the ownership of a resource
420 * @hw: pointer to the HW structure
421 * @res: resource ID
422 * @access: access type (read or write)
423 * @timeout: timeout in milliseconds
424 *
425 * Make an attempt to acquire the ownership of a resource using
426 * the ixgbe_aci_req_res to utilize ACI.
427 * In case if some other driver has previously acquired the resource and
428 * performed any necessary updates, the -EALREADY is returned,
429 * and the caller does not obtain the resource and has no further work to do.
430 * If needed, the function will poll until the current lock owner timeouts.
431 *
432 * Return: the exit code of the operation.
433 */
ixgbe_acquire_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res,enum ixgbe_aci_res_access_type access,u32 timeout)434 int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
435 enum ixgbe_aci_res_access_type access, u32 timeout)
436 {
437 #define IXGBE_RES_POLLING_DELAY_MS 10
438 u32 delay = IXGBE_RES_POLLING_DELAY_MS;
439 u32 res_timeout = timeout;
440 u32 retry_timeout;
441 int err;
442
443 err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
444
445 /* A return code of -EALREADY means that another driver has
446 * previously acquired the resource and performed any necessary updates;
447 * in this case the caller does not obtain the resource and has no
448 * further work to do.
449 */
450 if (err == -EALREADY)
451 return err;
452
453 /* If necessary, poll until the current lock owner timeouts.
454 * Set retry_timeout to the timeout value reported by the FW in the
455 * response to the "Request Resource Ownership" (0x0008) Admin Command
456 * as it indicates the maximum time the current owner of the resource
457 * is allowed to hold it.
458 */
459 retry_timeout = res_timeout;
460 while (err && retry_timeout && res_timeout) {
461 msleep(delay);
462 retry_timeout = (retry_timeout > delay) ?
463 retry_timeout - delay : 0;
464 err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
465
466 /* Success - lock acquired.
467 * -EALREADY - lock free, no work to do.
468 */
469 if (!err || err == -EALREADY)
470 break;
471 }
472
473 return err;
474 }
475
476 /**
477 * ixgbe_release_res - release a common resource
478 * @hw: pointer to the HW structure
479 * @res: resource ID
480 *
481 * Release a common resource using ixgbe_aci_release_res.
482 */
ixgbe_release_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res)483 void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
484 {
485 u32 total_delay = 0;
486 int err;
487
488 err = ixgbe_aci_release_res(hw, res, 0);
489
490 /* There are some rare cases when trying to release the resource
491 * results in an admin command timeout, so handle them correctly.
492 */
493 while (err == -ETIME &&
494 total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT) {
495 usleep_range(1000, 1500);
496 err = ixgbe_aci_release_res(hw, res, 0);
497 total_delay++;
498 }
499 }
500
501 /**
502 * ixgbe_parse_e610_caps - Parse common device/function capabilities
503 * @hw: pointer to the HW struct
504 * @caps: pointer to common capabilities structure
505 * @elem: the capability element to parse
506 * @prefix: message prefix for tracing capabilities
507 *
508 * Given a capability element, extract relevant details into the common
509 * capability structure.
510 *
511 * Return: true if the capability matches one of the common capability ids,
512 * false otherwise.
513 */
ixgbe_parse_e610_caps(struct ixgbe_hw * hw,struct ixgbe_hw_caps * caps,struct ixgbe_aci_cmd_list_caps_elem * elem,const char * prefix)514 static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw,
515 struct ixgbe_hw_caps *caps,
516 struct ixgbe_aci_cmd_list_caps_elem *elem,
517 const char *prefix)
518 {
519 u32 logical_id = le32_to_cpu(elem->logical_id);
520 u32 phys_id = le32_to_cpu(elem->phys_id);
521 u32 number = le32_to_cpu(elem->number);
522 u16 cap = le16_to_cpu(elem->cap);
523
524 switch (cap) {
525 case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
526 caps->valid_functions = number;
527 break;
528 case IXGBE_ACI_CAPS_SRIOV:
529 caps->sr_iov_1_1 = (number == 1);
530 break;
531 case IXGBE_ACI_CAPS_VMDQ:
532 caps->vmdq = (number == 1);
533 break;
534 case IXGBE_ACI_CAPS_DCB:
535 caps->dcb = (number == 1);
536 caps->active_tc_bitmap = logical_id;
537 caps->maxtc = phys_id;
538 break;
539 case IXGBE_ACI_CAPS_RSS:
540 caps->rss_table_size = number;
541 caps->rss_table_entry_width = logical_id;
542 break;
543 case IXGBE_ACI_CAPS_RXQS:
544 caps->num_rxq = number;
545 caps->rxq_first_id = phys_id;
546 break;
547 case IXGBE_ACI_CAPS_TXQS:
548 caps->num_txq = number;
549 caps->txq_first_id = phys_id;
550 break;
551 case IXGBE_ACI_CAPS_MSIX:
552 caps->num_msix_vectors = number;
553 caps->msix_vector_first_id = phys_id;
554 break;
555 case IXGBE_ACI_CAPS_NVM_VER:
556 break;
557 case IXGBE_ACI_CAPS_MAX_MTU:
558 caps->max_mtu = number;
559 break;
560 case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
561 caps->pcie_reset_avoidance = (number > 0);
562 break;
563 case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
564 caps->reset_restrict_support = (number == 1);
565 break;
566 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
567 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
568 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
569 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
570 {
571 u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
572
573 caps->ext_topo_dev_img_ver_high[index] = number;
574 caps->ext_topo_dev_img_ver_low[index] = logical_id;
575 caps->ext_topo_dev_img_part_num[index] =
576 FIELD_GET(IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M, phys_id);
577 caps->ext_topo_dev_img_load_en[index] =
578 (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
579 caps->ext_topo_dev_img_prog_en[index] =
580 (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
581 break;
582 }
583 default:
584 /* Not one of the recognized common capabilities */
585 return false;
586 }
587
588 return true;
589 }
590
591 /**
592 * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
593 * @hw: pointer to the HW struct
594 * @dev_p: pointer to device capabilities structure
595 * @cap: capability element to parse
596 *
597 * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
598 */
599 static void
ixgbe_parse_valid_functions_cap(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)600 ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
601 struct ixgbe_hw_dev_caps *dev_p,
602 struct ixgbe_aci_cmd_list_caps_elem *cap)
603 {
604 dev_p->num_funcs = hweight32(le32_to_cpu(cap->number));
605 }
606
607 /**
608 * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps
609 * @hw: pointer to the HW struct
610 * @dev_p: pointer to device capabilities structure
611 * @cap: capability element to parse
612 *
613 * Parse IXGBE_ACI_CAPS_VF for device capabilities.
614 */
ixgbe_parse_vf_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)615 static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
616 struct ixgbe_hw_dev_caps *dev_p,
617 struct ixgbe_aci_cmd_list_caps_elem *cap)
618 {
619 dev_p->num_vfs_exposed = le32_to_cpu(cap->number);
620 }
621
622 /**
623 * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
624 * @hw: pointer to the HW struct
625 * @dev_p: pointer to device capabilities structure
626 * @cap: capability element to parse
627 *
628 * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
629 */
ixgbe_parse_vsi_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)630 static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
631 struct ixgbe_hw_dev_caps *dev_p,
632 struct ixgbe_aci_cmd_list_caps_elem *cap)
633 {
634 dev_p->num_vsi_allocd_to_host = le32_to_cpu(cap->number);
635 }
636
637 /**
638 * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
639 * @hw: pointer to the HW struct
640 * @dev_p: pointer to device capabilities structure
641 * @cap: capability element to parse
642 *
643 * Parse IXGBE_ACI_CAPS_FD for device capabilities.
644 */
ixgbe_parse_fdir_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)645 static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
646 struct ixgbe_hw_dev_caps *dev_p,
647 struct ixgbe_aci_cmd_list_caps_elem *cap)
648 {
649 dev_p->num_flow_director_fltr = le32_to_cpu(cap->number);
650 }
651
652 /**
653 * ixgbe_parse_dev_caps - Parse device capabilities
654 * @hw: pointer to the HW struct
655 * @dev_p: pointer to device capabilities structure
656 * @buf: buffer containing the device capability records
657 * @cap_count: the number of capabilities
658 *
659 * Helper device to parse device (0x000B) capabilities list. For
660 * capabilities shared between device and function, this relies on
661 * ixgbe_parse_e610_caps.
662 *
663 * Loop through the list of provided capabilities and extract the relevant
664 * data into the device capabilities structured.
665 */
ixgbe_parse_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,void * buf,u32 cap_count)666 static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
667 struct ixgbe_hw_dev_caps *dev_p,
668 void *buf, u32 cap_count)
669 {
670 struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
671 u32 i;
672
673 cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
674
675 memset(dev_p, 0, sizeof(*dev_p));
676
677 for (i = 0; i < cap_count; i++) {
678 u16 cap = le16_to_cpu(cap_resp[i].cap);
679
680 ixgbe_parse_e610_caps(hw, &dev_p->common_cap, &cap_resp[i],
681 "dev caps");
682
683 switch (cap) {
684 case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
685 ixgbe_parse_valid_functions_cap(hw, dev_p,
686 &cap_resp[i]);
687 break;
688 case IXGBE_ACI_CAPS_VF:
689 ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
690 break;
691 case IXGBE_ACI_CAPS_VSI:
692 ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
693 break;
694 case IXGBE_ACI_CAPS_FD:
695 ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
696 break;
697 default:
698 /* Don't list common capabilities as unknown */
699 break;
700 }
701 }
702 }
703
704 /**
705 * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps
706 * @hw: pointer to the HW struct
707 * @func_p: pointer to function capabilities structure
708 * @cap: pointer to the capability element to parse
709 *
710 * Extract function capabilities for IXGBE_ACI_CAPS_VF.
711 */
ixgbe_parse_vf_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_p,struct ixgbe_aci_cmd_list_caps_elem * cap)712 static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
713 struct ixgbe_hw_func_caps *func_p,
714 struct ixgbe_aci_cmd_list_caps_elem *cap)
715 {
716 func_p->num_allocd_vfs = le32_to_cpu(cap->number);
717 func_p->vf_base_id = le32_to_cpu(cap->logical_id);
718 }
719
720 /**
721 * ixgbe_get_num_per_func - determine number of resources per PF
722 * @hw: pointer to the HW structure
723 * @max: value to be evenly split between each PF
724 *
725 * Determine the number of valid functions by going through the bitmap returned
726 * from parsing capabilities and use this to calculate the number of resources
727 * per PF based on the max value passed in.
728 *
729 * Return: the number of resources per PF or 0, if no PH are available.
730 */
ixgbe_get_num_per_func(struct ixgbe_hw * hw,u32 max)731 static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
732 {
733 #define IXGBE_CAPS_VALID_FUNCS_M GENMASK(7, 0)
734 u8 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
735 IXGBE_CAPS_VALID_FUNCS_M);
736
737 return funcs ? (max / funcs) : 0;
738 }
739
740 /**
741 * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
742 * @hw: pointer to the HW struct
743 * @func_p: pointer to function capabilities structure
744 * @cap: pointer to the capability element to parse
745 *
746 * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
747 */
ixgbe_parse_vsi_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_p,struct ixgbe_aci_cmd_list_caps_elem * cap)748 static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
749 struct ixgbe_hw_func_caps *func_p,
750 struct ixgbe_aci_cmd_list_caps_elem *cap)
751 {
752 func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
753 }
754
755 /**
756 * ixgbe_parse_func_caps - Parse function capabilities
757 * @hw: pointer to the HW struct
758 * @func_p: pointer to function capabilities structure
759 * @buf: buffer containing the function capability records
760 * @cap_count: the number of capabilities
761 *
762 * Helper function to parse function (0x000A) capabilities list. For
763 * capabilities shared between device and function, this relies on
764 * ixgbe_parse_e610_caps.
765 *
766 * Loop through the list of provided capabilities and extract the relevant
767 * data into the function capabilities structured.
768 */
ixgbe_parse_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_p,void * buf,u32 cap_count)769 static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
770 struct ixgbe_hw_func_caps *func_p,
771 void *buf, u32 cap_count)
772 {
773 struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
774 u32 i;
775
776 cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
777
778 memset(func_p, 0, sizeof(*func_p));
779
780 for (i = 0; i < cap_count; i++) {
781 u16 cap = le16_to_cpu(cap_resp[i].cap);
782
783 ixgbe_parse_e610_caps(hw, &func_p->common_cap,
784 &cap_resp[i], "func caps");
785
786 switch (cap) {
787 case IXGBE_ACI_CAPS_VF:
788 ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
789 break;
790 case IXGBE_ACI_CAPS_VSI:
791 ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
792 break;
793 default:
794 /* Don't list common capabilities as unknown */
795 break;
796 }
797 }
798 }
799
800 /**
801 * ixgbe_aci_list_caps - query function/device capabilities
802 * @hw: pointer to the HW struct
803 * @buf: a buffer to hold the capabilities
804 * @buf_size: size of the buffer
805 * @cap_count: if not NULL, set to the number of capabilities reported
806 * @opc: capabilities type to discover, device or function
807 *
808 * Get the function (0x000A) or device (0x000B) capabilities description from
809 * firmware and store it in the buffer.
810 *
811 * If the cap_count pointer is not NULL, then it is set to the number of
812 * capabilities firmware will report. Note that if the buffer size is too
813 * small, it is possible the command will return -ENOMEM. The
814 * cap_count will still be updated in this case. It is recommended that the
815 * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
816 * buffer that firmware could return) to avoid this.
817 *
818 * Return: the exit code of the operation.
819 * Exit code of -ENOMEM means the buffer size is too small.
820 */
ixgbe_aci_list_caps(struct ixgbe_hw * hw,void * buf,u16 buf_size,u32 * cap_count,enum ixgbe_aci_opc opc)821 int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
822 u32 *cap_count, enum ixgbe_aci_opc opc)
823 {
824 struct ixgbe_aci_cmd_list_caps *cmd;
825 struct ixgbe_aci_desc desc;
826 int err;
827
828 cmd = &desc.params.get_cap;
829
830 if (opc != ixgbe_aci_opc_list_func_caps &&
831 opc != ixgbe_aci_opc_list_dev_caps)
832 return -EINVAL;
833
834 ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
835 err = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
836
837 if (cap_count)
838 *cap_count = le32_to_cpu(cmd->count);
839
840 return err;
841 }
842
843 /**
844 * ixgbe_discover_dev_caps - Read and extract device capabilities
845 * @hw: pointer to the hardware structure
846 * @dev_caps: pointer to device capabilities structure
847 *
848 * Read the device capabilities and extract them into the dev_caps structure
849 * for later use.
850 *
851 * Return: the exit code of the operation.
852 */
ixgbe_discover_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_caps)853 int ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
854 struct ixgbe_hw_dev_caps *dev_caps)
855 {
856 u32 cap_count;
857 u8 *cbuf;
858 int err;
859
860 cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
861 if (!cbuf)
862 return -ENOMEM;
863
864 /* Although the driver doesn't know the number of capabilities the
865 * device will return, we can simply send a 4KB buffer, the maximum
866 * possible size that firmware can return.
867 */
868 cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
869 sizeof(struct ixgbe_aci_cmd_list_caps_elem);
870
871 err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
872 &cap_count,
873 ixgbe_aci_opc_list_dev_caps);
874 if (!err)
875 ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
876
877 kfree(cbuf);
878
879 return 0;
880 }
881
882 /**
883 * ixgbe_discover_func_caps - Read and extract function capabilities
884 * @hw: pointer to the hardware structure
885 * @func_caps: pointer to function capabilities structure
886 *
887 * Read the function capabilities and extract them into the func_caps structure
888 * for later use.
889 *
890 * Return: the exit code of the operation.
891 */
ixgbe_discover_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_caps)892 int ixgbe_discover_func_caps(struct ixgbe_hw *hw,
893 struct ixgbe_hw_func_caps *func_caps)
894 {
895 u32 cap_count;
896 u8 *cbuf;
897 int err;
898
899 cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
900 if (!cbuf)
901 return -ENOMEM;
902
903 /* Although the driver doesn't know the number of capabilities the
904 * device will return, we can simply send a 4KB buffer, the maximum
905 * possible size that firmware can return.
906 */
907 cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
908 sizeof(struct ixgbe_aci_cmd_list_caps_elem);
909
910 err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
911 &cap_count,
912 ixgbe_aci_opc_list_func_caps);
913 if (!err)
914 ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
915
916 kfree(cbuf);
917
918 return 0;
919 }
920
921 /**
922 * ixgbe_get_caps - get info about the HW
923 * @hw: pointer to the hardware structure
924 *
925 * Retrieve both device and function capabilities.
926 *
927 * Return: the exit code of the operation.
928 */
ixgbe_get_caps(struct ixgbe_hw * hw)929 int ixgbe_get_caps(struct ixgbe_hw *hw)
930 {
931 int err;
932
933 err = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
934 if (err)
935 return err;
936
937 return ixgbe_discover_func_caps(hw, &hw->func_caps);
938 }
939
940 /**
941 * ixgbe_aci_disable_rxen - disable RX
942 * @hw: pointer to the HW struct
943 *
944 * Request a safe disable of Receive Enable using ACI command (0x000C).
945 *
946 * Return: the exit code of the operation.
947 */
ixgbe_aci_disable_rxen(struct ixgbe_hw * hw)948 int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
949 {
950 struct ixgbe_aci_cmd_disable_rxen *cmd;
951 struct ixgbe_aci_desc desc;
952
953 cmd = &desc.params.disable_rxen;
954
955 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
956
957 cmd->lport_num = hw->bus.func;
958
959 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
960 }
961
962 /**
963 * ixgbe_aci_get_phy_caps - returns PHY capabilities
964 * @hw: pointer to the HW struct
965 * @qual_mods: report qualified modules
966 * @report_mode: report mode capabilities
967 * @pcaps: structure for PHY capabilities to be filled
968 *
969 * Returns the various PHY capabilities supported on the Port
970 * using ACI command (0x0600).
971 *
972 * Return: the exit code of the operation.
973 */
ixgbe_aci_get_phy_caps(struct ixgbe_hw * hw,bool qual_mods,u8 report_mode,struct ixgbe_aci_cmd_get_phy_caps_data * pcaps)974 int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
975 struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
976 {
977 struct ixgbe_aci_cmd_get_phy_caps *cmd;
978 u16 pcaps_size = sizeof(*pcaps);
979 struct ixgbe_aci_desc desc;
980 int err;
981
982 cmd = &desc.params.get_phy;
983
984 if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
985 return -EINVAL;
986
987 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
988
989 if (qual_mods)
990 cmd->param0 |= cpu_to_le16(IXGBE_ACI_GET_PHY_RQM);
991
992 cmd->param0 |= cpu_to_le16(report_mode);
993 err = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
994 if (!err && report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
995 hw->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
996 hw->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
997 memcpy(hw->link.link_info.module_type, &pcaps->module_type,
998 sizeof(hw->link.link_info.module_type));
999 }
1000
1001 return err;
1002 }
1003
1004 /**
1005 * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
1006 * @caps: PHY ability structure to copy data from
1007 * @cfg: PHY configuration structure to copy data to
1008 *
1009 * Helper function to copy data from PHY capabilities data structure
1010 * to PHY configuration data structure
1011 */
ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data * caps,struct ixgbe_aci_cmd_set_phy_cfg_data * cfg)1012 void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
1013 struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
1014 {
1015 if (!caps || !cfg)
1016 return;
1017
1018 memset(cfg, 0, sizeof(*cfg));
1019 cfg->phy_type_low = caps->phy_type_low;
1020 cfg->phy_type_high = caps->phy_type_high;
1021 cfg->caps = caps->caps;
1022 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
1023 cfg->eee_cap = caps->eee_cap;
1024 cfg->eeer_value = caps->eeer_value;
1025 cfg->link_fec_opt = caps->link_fec_options;
1026 cfg->module_compliance_enforcement =
1027 caps->module_compliance_enforcement;
1028 }
1029
1030 /**
1031 * ixgbe_aci_set_phy_cfg - set PHY configuration
1032 * @hw: pointer to the HW struct
1033 * @cfg: structure with PHY configuration data to be set
1034 *
1035 * Set the various PHY configuration parameters supported on the Port
1036 * using ACI command (0x0601).
1037 * One or more of the Set PHY config parameters may be ignored in an MFP
1038 * mode as the PF may not have the privilege to set some of the PHY Config
1039 * parameters.
1040 *
1041 * Return: the exit code of the operation.
1042 */
ixgbe_aci_set_phy_cfg(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_set_phy_cfg_data * cfg)1043 int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
1044 struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
1045 {
1046 struct ixgbe_aci_desc desc;
1047 int err;
1048
1049 if (!cfg)
1050 return -EINVAL;
1051
1052 /* Ensure that only valid bits of cfg->caps can be turned on. */
1053 cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
1054
1055 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
1056 desc.params.set_phy.lport_num = hw->bus.func;
1057 desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
1058
1059 err = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
1060 if (!err)
1061 hw->phy.curr_user_phy_cfg = *cfg;
1062
1063 return err;
1064 }
1065
1066 /**
1067 * ixgbe_aci_set_link_restart_an - set up link and restart AN
1068 * @hw: pointer to the HW struct
1069 * @ena_link: if true: enable link, if false: disable link
1070 *
1071 * Function sets up the link and restarts the Auto-Negotiation over the link.
1072 *
1073 * Return: the exit code of the operation.
1074 */
ixgbe_aci_set_link_restart_an(struct ixgbe_hw * hw,bool ena_link)1075 int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
1076 {
1077 struct ixgbe_aci_cmd_restart_an *cmd;
1078 struct ixgbe_aci_desc desc;
1079
1080 cmd = &desc.params.restart_an;
1081
1082 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
1083
1084 cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
1085 cmd->lport_num = hw->bus.func;
1086 if (ena_link)
1087 cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
1088 else
1089 cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
1090
1091 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1092 }
1093
1094 /**
1095 * ixgbe_is_media_cage_present - check if media cage is present
1096 * @hw: pointer to the HW struct
1097 *
1098 * Identify presence of media cage using the ACI command (0x06E0).
1099 *
1100 * Return: true if media cage is present, else false. If no cage, then
1101 * media type is backplane or BASE-T.
1102 */
ixgbe_is_media_cage_present(struct ixgbe_hw * hw)1103 static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
1104 {
1105 struct ixgbe_aci_cmd_get_link_topo *cmd;
1106 struct ixgbe_aci_desc desc;
1107
1108 cmd = &desc.params.get_link_topo;
1109
1110 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
1111
1112 cmd->addr.topo_params.node_type_ctx =
1113 FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_CTX_M,
1114 IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT);
1115
1116 /* Set node type. */
1117 cmd->addr.topo_params.node_type_ctx |=
1118 FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_TYPE_M,
1119 IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE);
1120
1121 /* Node type cage can be used to determine if cage is present. If AQC
1122 * returns error (ENOENT), then no cage present. If no cage present then
1123 * connection type is backplane or BASE-T.
1124 */
1125 return !ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
1126 }
1127
1128 /**
1129 * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
1130 * @hw: pointer to the HW struct
1131 *
1132 * Try to identify the media type based on the phy type.
1133 * If more than one media type, the ixgbe_media_type_unknown is returned.
1134 * First, phy_type_low is checked, then phy_type_high.
1135 * If none are identified, the ixgbe_media_type_unknown is returned
1136 *
1137 * Return: type of a media based on phy type in form of enum.
1138 */
1139 static enum ixgbe_media_type
ixgbe_get_media_type_from_phy_type(struct ixgbe_hw * hw)1140 ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
1141 {
1142 struct ixgbe_link_status *hw_link_info;
1143
1144 if (!hw)
1145 return ixgbe_media_type_unknown;
1146
1147 hw_link_info = &hw->link.link_info;
1148 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
1149 /* If more than one media type is selected, report unknown */
1150 return ixgbe_media_type_unknown;
1151
1152 if (hw_link_info->phy_type_low) {
1153 /* 1G SGMII is a special case where some DA cable PHYs
1154 * may show this as an option when it really shouldn't
1155 * be since SGMII is meant to be between a MAC and a PHY
1156 * in a backplane. Try to detect this case and handle it
1157 */
1158 if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
1159 (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
1160 IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
1161 hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
1162 IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
1163 return ixgbe_media_type_da;
1164
1165 switch (hw_link_info->phy_type_low) {
1166 case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
1167 case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
1168 case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
1169 case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
1170 case IXGBE_PHY_TYPE_LOW_25GBASE_SR:
1171 case IXGBE_PHY_TYPE_LOW_25GBASE_LR:
1172 return ixgbe_media_type_fiber;
1173 case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
1174 case IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
1175 return ixgbe_media_type_fiber;
1176 case IXGBE_PHY_TYPE_LOW_100BASE_TX:
1177 case IXGBE_PHY_TYPE_LOW_1000BASE_T:
1178 case IXGBE_PHY_TYPE_LOW_2500BASE_T:
1179 case IXGBE_PHY_TYPE_LOW_5GBASE_T:
1180 case IXGBE_PHY_TYPE_LOW_10GBASE_T:
1181 case IXGBE_PHY_TYPE_LOW_25GBASE_T:
1182 return ixgbe_media_type_copper;
1183 case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
1184 case IXGBE_PHY_TYPE_LOW_25GBASE_CR:
1185 case IXGBE_PHY_TYPE_LOW_25GBASE_CR_S:
1186 case IXGBE_PHY_TYPE_LOW_25GBASE_CR1:
1187 return ixgbe_media_type_da;
1188 case IXGBE_PHY_TYPE_LOW_25G_AUI_C2C:
1189 if (ixgbe_is_media_cage_present(hw))
1190 return ixgbe_media_type_aui;
1191 fallthrough;
1192 case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
1193 case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
1194 case IXGBE_PHY_TYPE_LOW_2500BASE_X:
1195 case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
1196 case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
1197 case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
1198 case IXGBE_PHY_TYPE_LOW_25GBASE_KR:
1199 case IXGBE_PHY_TYPE_LOW_25GBASE_KR1:
1200 case IXGBE_PHY_TYPE_LOW_25GBASE_KR_S:
1201 return ixgbe_media_type_backplane;
1202 }
1203 } else {
1204 switch (hw_link_info->phy_type_high) {
1205 case IXGBE_PHY_TYPE_HIGH_10BASE_T:
1206 return ixgbe_media_type_copper;
1207 }
1208 }
1209 return ixgbe_media_type_unknown;
1210 }
1211
1212 /**
1213 * ixgbe_update_link_info - update status of the HW network link
1214 * @hw: pointer to the HW struct
1215 *
1216 * Update the status of the HW network link.
1217 *
1218 * Return: the exit code of the operation.
1219 */
ixgbe_update_link_info(struct ixgbe_hw * hw)1220 int ixgbe_update_link_info(struct ixgbe_hw *hw)
1221 {
1222 struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
1223 struct ixgbe_link_status *li;
1224 int err;
1225
1226 if (!hw)
1227 return -EINVAL;
1228
1229 li = &hw->link.link_info;
1230
1231 err = ixgbe_aci_get_link_info(hw, true, NULL);
1232 if (err)
1233 return err;
1234
1235 if (!(li->link_info & IXGBE_ACI_MEDIA_AVAILABLE))
1236 return 0;
1237
1238 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
1239 if (!pcaps)
1240 return -ENOMEM;
1241
1242 err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
1243 pcaps);
1244
1245 if (!err)
1246 memcpy(li->module_type, &pcaps->module_type,
1247 sizeof(li->module_type));
1248
1249 kfree(pcaps);
1250
1251 return err;
1252 }
1253
1254 /**
1255 * ixgbe_get_link_status - get status of the HW network link
1256 * @hw: pointer to the HW struct
1257 * @link_up: pointer to bool (true/false = linkup/linkdown)
1258 *
1259 * Variable link_up is true if link is up, false if link is down.
1260 * The variable link_up is invalid if status is non zero. As a
1261 * result of this call, link status reporting becomes enabled
1262 *
1263 * Return: the exit code of the operation.
1264 */
ixgbe_get_link_status(struct ixgbe_hw * hw,bool * link_up)1265 int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
1266 {
1267 if (!hw || !link_up)
1268 return -EINVAL;
1269
1270 if (hw->link.get_link_info) {
1271 int err = ixgbe_update_link_info(hw);
1272
1273 if (err)
1274 return err;
1275 }
1276
1277 *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
1278
1279 return 0;
1280 }
1281
1282 /**
1283 * ixgbe_aci_get_link_info - get the link status
1284 * @hw: pointer to the HW struct
1285 * @ena_lse: enable/disable LinkStatusEvent reporting
1286 * @link: pointer to link status structure - optional
1287 *
1288 * Get the current Link Status using ACI command (0x607).
1289 * The current link can be optionally provided to update
1290 * the status.
1291 *
1292 * Return: the link status of the adapter.
1293 */
ixgbe_aci_get_link_info(struct ixgbe_hw * hw,bool ena_lse,struct ixgbe_link_status * link)1294 int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
1295 struct ixgbe_link_status *link)
1296 {
1297 struct ixgbe_aci_cmd_get_link_status_data link_data = {};
1298 struct ixgbe_aci_cmd_get_link_status *resp;
1299 struct ixgbe_link_status *li_old, *li;
1300 struct ixgbe_fc_info *hw_fc_info;
1301 struct ixgbe_aci_desc desc;
1302 bool tx_pause, rx_pause;
1303 u8 cmd_flags;
1304 int err;
1305
1306 if (!hw)
1307 return -EINVAL;
1308
1309 li_old = &hw->link.link_info_old;
1310 li = &hw->link.link_info;
1311 hw_fc_info = &hw->fc;
1312
1313 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
1314 cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
1315 resp = &desc.params.get_link_status;
1316 resp->cmd_flags = cpu_to_le16(cmd_flags);
1317 resp->lport_num = hw->bus.func;
1318
1319 err = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
1320 if (err)
1321 return err;
1322
1323 /* Save off old link status information. */
1324 *li_old = *li;
1325
1326 /* Update current link status information. */
1327 li->link_speed = le16_to_cpu(link_data.link_speed);
1328 li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
1329 li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
1330 li->link_info = link_data.link_info;
1331 li->link_cfg_err = link_data.link_cfg_err;
1332 li->an_info = link_data.an_info;
1333 li->ext_info = link_data.ext_info;
1334 li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
1335 li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
1336 li->topo_media_conflict = link_data.topo_media_conflict;
1337 li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
1338 IXGBE_ACI_CFG_PACING_TYPE_M);
1339
1340 /* Update fc info. */
1341 tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
1342 rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
1343 if (tx_pause && rx_pause)
1344 hw_fc_info->current_mode = ixgbe_fc_full;
1345 else if (tx_pause)
1346 hw_fc_info->current_mode = ixgbe_fc_tx_pause;
1347 else if (rx_pause)
1348 hw_fc_info->current_mode = ixgbe_fc_rx_pause;
1349 else
1350 hw_fc_info->current_mode = ixgbe_fc_none;
1351
1352 li->lse_ena = !!(le16_to_cpu(resp->cmd_flags) &
1353 IXGBE_ACI_LSE_IS_ENABLED);
1354
1355 /* Save link status information. */
1356 if (link)
1357 *link = *li;
1358
1359 /* Flag cleared so calling functions don't call AQ again. */
1360 hw->link.get_link_info = false;
1361
1362 return 0;
1363 }
1364
1365 /**
1366 * ixgbe_aci_set_event_mask - set event mask
1367 * @hw: pointer to the HW struct
1368 * @port_num: port number of the physical function
1369 * @mask: event mask to be set
1370 *
1371 * Set the event mask using ACI command (0x0613).
1372 *
1373 * Return: the exit code of the operation.
1374 */
ixgbe_aci_set_event_mask(struct ixgbe_hw * hw,u8 port_num,u16 mask)1375 int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
1376 {
1377 struct ixgbe_aci_cmd_set_event_mask *cmd;
1378 struct ixgbe_aci_desc desc;
1379
1380 cmd = &desc.params.set_event_mask;
1381
1382 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
1383
1384 cmd->lport_num = port_num;
1385
1386 cmd->event_mask = cpu_to_le16(mask);
1387 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1388 }
1389
1390 /**
1391 * ixgbe_configure_lse - enable/disable link status events
1392 * @hw: pointer to the HW struct
1393 * @activate: true for enable lse, false otherwise
1394 * @mask: event mask to be set; a set bit means deactivation of the
1395 * corresponding event
1396 *
1397 * Set the event mask and then enable or disable link status events
1398 *
1399 * Return: the exit code of the operation.
1400 */
ixgbe_configure_lse(struct ixgbe_hw * hw,bool activate,u16 mask)1401 int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
1402 {
1403 int err;
1404
1405 err = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
1406 if (err)
1407 return err;
1408
1409 /* Enabling link status events generation by fw. */
1410 return ixgbe_aci_get_link_info(hw, activate, NULL);
1411 }
1412
1413 /**
1414 * ixgbe_get_media_type_e610 - Gets media type
1415 * @hw: pointer to the HW struct
1416 *
1417 * In order to get the media type, the function gets PHY
1418 * capabilities and later on use them to identify the PHY type
1419 * checking phy_type_high and phy_type_low.
1420 *
1421 * Return: the type of media in form of ixgbe_media_type enum
1422 * or ixgbe_media_type_unknown in case of an error.
1423 */
ixgbe_get_media_type_e610(struct ixgbe_hw * hw)1424 enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw)
1425 {
1426 struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
1427 int rc;
1428
1429 rc = ixgbe_update_link_info(hw);
1430 if (rc)
1431 return ixgbe_media_type_unknown;
1432
1433 /* If there is no link but PHY (dongle) is available SW should use
1434 * Get PHY Caps admin command instead of Get Link Status, find most
1435 * significant bit that is set in PHY types reported by the command
1436 * and use it to discover media type.
1437 */
1438 if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
1439 (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
1440 int highest_bit;
1441
1442 /* Get PHY Capabilities */
1443 rc = ixgbe_aci_get_phy_caps(hw, false,
1444 IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
1445 &pcaps);
1446 if (rc)
1447 return ixgbe_media_type_unknown;
1448
1449 highest_bit = fls64(le64_to_cpu(pcaps.phy_type_high));
1450 if (highest_bit) {
1451 hw->link.link_info.phy_type_high =
1452 BIT_ULL(highest_bit - 1);
1453 hw->link.link_info.phy_type_low = 0;
1454 } else {
1455 highest_bit = fls64(le64_to_cpu(pcaps.phy_type_low));
1456 if (highest_bit)
1457 hw->link.link_info.phy_type_low =
1458 BIT_ULL(highest_bit - 1);
1459 }
1460 }
1461
1462 /* Based on link status or search above try to discover media type. */
1463 hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
1464
1465 return hw->phy.media_type;
1466 }
1467
1468 /**
1469 * ixgbe_setup_link_e610 - Set up link
1470 * @hw: pointer to hardware structure
1471 * @speed: new link speed
1472 * @autoneg_wait: true when waiting for completion is needed
1473 *
1474 * Set up the link with the specified speed.
1475 *
1476 * Return: the exit code of the operation.
1477 */
ixgbe_setup_link_e610(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)1478 int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1479 bool autoneg_wait)
1480 {
1481 /* Simply request FW to perform proper PHY setup */
1482 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1483 }
1484
1485 /**
1486 * ixgbe_check_link_e610 - Determine link and speed status
1487 * @hw: pointer to hardware structure
1488 * @speed: pointer to link speed
1489 * @link_up: true when link is up
1490 * @link_up_wait_to_complete: bool used to wait for link up or not
1491 *
1492 * Determine if the link is up and the current link speed
1493 * using ACI command (0x0607).
1494 *
1495 * Return: the exit code of the operation.
1496 */
ixgbe_check_link_e610(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)1497 int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
1498 bool *link_up, bool link_up_wait_to_complete)
1499 {
1500 int err;
1501 u32 i;
1502
1503 if (!speed || !link_up)
1504 return -EINVAL;
1505
1506 /* Set get_link_info flag to ensure that fresh
1507 * link information will be obtained from FW
1508 * by sending Get Link Status admin command.
1509 */
1510 hw->link.get_link_info = true;
1511
1512 /* Update link information in adapter context. */
1513 err = ixgbe_get_link_status(hw, link_up);
1514 if (err)
1515 return err;
1516
1517 /* Wait for link up if it was requested. */
1518 if (link_up_wait_to_complete && !(*link_up)) {
1519 for (i = 0; i < hw->mac.max_link_up_time; i++) {
1520 msleep(100);
1521 hw->link.get_link_info = true;
1522 err = ixgbe_get_link_status(hw, link_up);
1523 if (err)
1524 return err;
1525 if (*link_up)
1526 break;
1527 }
1528 }
1529
1530 /* Use link information in adapter context updated by the call
1531 * to ixgbe_get_link_status() to determine current link speed.
1532 * Link speed information is valid only when link up was
1533 * reported by FW.
1534 */
1535 if (*link_up) {
1536 switch (hw->link.link_info.link_speed) {
1537 case IXGBE_ACI_LINK_SPEED_10MB:
1538 *speed = IXGBE_LINK_SPEED_10_FULL;
1539 break;
1540 case IXGBE_ACI_LINK_SPEED_100MB:
1541 *speed = IXGBE_LINK_SPEED_100_FULL;
1542 break;
1543 case IXGBE_ACI_LINK_SPEED_1000MB:
1544 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1545 break;
1546 case IXGBE_ACI_LINK_SPEED_2500MB:
1547 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
1548 break;
1549 case IXGBE_ACI_LINK_SPEED_5GB:
1550 *speed = IXGBE_LINK_SPEED_5GB_FULL;
1551 break;
1552 case IXGBE_ACI_LINK_SPEED_10GB:
1553 *speed = IXGBE_LINK_SPEED_10GB_FULL;
1554 break;
1555 default:
1556 *speed = IXGBE_LINK_SPEED_UNKNOWN;
1557 break;
1558 }
1559 } else {
1560 *speed = IXGBE_LINK_SPEED_UNKNOWN;
1561 }
1562
1563 return 0;
1564 }
1565
1566 /**
1567 * ixgbe_get_link_capabilities_e610 - Determine link capabilities
1568 * @hw: pointer to hardware structure
1569 * @speed: pointer to link speed
1570 * @autoneg: true when autoneg or autotry is enabled
1571 *
1572 * Determine speed and AN parameters of a link.
1573 *
1574 * Return: the exit code of the operation.
1575 */
ixgbe_get_link_capabilities_e610(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)1576 int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw,
1577 ixgbe_link_speed *speed,
1578 bool *autoneg)
1579 {
1580 if (!speed || !autoneg)
1581 return -EINVAL;
1582
1583 *autoneg = true;
1584 *speed = hw->phy.speeds_supported;
1585
1586 return 0;
1587 }
1588
1589 /**
1590 * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
1591 * @hw: pointer to hardware structure
1592 * @cfg: PHY configuration data to set FC mode
1593 * @req_mode: FC mode to configure
1594 *
1595 * Configures PHY Flow Control according to the provided configuration.
1596 *
1597 * Return: the exit code of the operation.
1598 */
ixgbe_cfg_phy_fc(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_set_phy_cfg_data * cfg,enum ixgbe_fc_mode req_mode)1599 int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
1600 struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
1601 enum ixgbe_fc_mode req_mode)
1602 {
1603 u8 pause_mask = 0x0;
1604
1605 if (!cfg)
1606 return -EINVAL;
1607
1608 switch (req_mode) {
1609 case ixgbe_fc_full:
1610 pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
1611 pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
1612 break;
1613 case ixgbe_fc_rx_pause:
1614 pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
1615 break;
1616 case ixgbe_fc_tx_pause:
1617 pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
1618 break;
1619 default:
1620 break;
1621 }
1622
1623 /* Clear the old pause settings. */
1624 cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
1625 IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
1626
1627 /* Set the new capabilities. */
1628 cfg->caps |= pause_mask;
1629
1630 return 0;
1631 }
1632
1633 /**
1634 * ixgbe_setup_fc_e610 - Set up flow control
1635 * @hw: pointer to hardware structure
1636 *
1637 * Set up flow control. This has to be done during init time.
1638 *
1639 * Return: the exit code of the operation.
1640 */
ixgbe_setup_fc_e610(struct ixgbe_hw * hw)1641 int ixgbe_setup_fc_e610(struct ixgbe_hw *hw)
1642 {
1643 struct ixgbe_aci_cmd_get_phy_caps_data pcaps = {};
1644 struct ixgbe_aci_cmd_set_phy_cfg_data cfg = {};
1645 int err;
1646
1647 /* Get the current PHY config */
1648 err = ixgbe_aci_get_phy_caps(hw, false,
1649 IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
1650 if (err)
1651 return err;
1652
1653 ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
1654
1655 /* Configure the set PHY data */
1656 err = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
1657 if (err)
1658 return err;
1659
1660 /* If the capabilities have changed, then set the new config */
1661 if (cfg.caps != pcaps.caps) {
1662 cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
1663
1664 err = ixgbe_aci_set_phy_cfg(hw, &cfg);
1665 if (err)
1666 return err;
1667 }
1668
1669 return err;
1670 }
1671
1672 /**
1673 * ixgbe_fc_autoneg_e610 - Configure flow control
1674 * @hw: pointer to hardware structure
1675 *
1676 * Configure Flow Control.
1677 */
ixgbe_fc_autoneg_e610(struct ixgbe_hw * hw)1678 void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw)
1679 {
1680 int err;
1681
1682 /* Get current link err.
1683 * Current FC mode will be stored in the hw context.
1684 */
1685 err = ixgbe_aci_get_link_info(hw, false, NULL);
1686 if (err)
1687 goto no_autoneg;
1688
1689 /* Check if the link is up */
1690 if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP))
1691 goto no_autoneg;
1692
1693 /* Check if auto-negotiation has completed */
1694 if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED))
1695 goto no_autoneg;
1696
1697 hw->fc.fc_was_autonegged = true;
1698 return;
1699
1700 no_autoneg:
1701 hw->fc.fc_was_autonegged = false;
1702 hw->fc.current_mode = hw->fc.requested_mode;
1703 }
1704
1705 /**
1706 * ixgbe_disable_rx_e610 - Disable RX unit
1707 * @hw: pointer to hardware structure
1708 *
1709 * Disable RX DMA unit on E610 with use of ACI command (0x000C).
1710 *
1711 * Return: the exit code of the operation.
1712 */
ixgbe_disable_rx_e610(struct ixgbe_hw * hw)1713 void ixgbe_disable_rx_e610(struct ixgbe_hw *hw)
1714 {
1715 u32 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1716 u32 pfdtxgswc;
1717 int err;
1718
1719 if (!(rxctrl & IXGBE_RXCTRL_RXEN))
1720 return;
1721
1722 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
1723 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
1724 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
1725 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
1726 hw->mac.set_lben = true;
1727 } else {
1728 hw->mac.set_lben = false;
1729 }
1730
1731 err = ixgbe_aci_disable_rxen(hw);
1732
1733 /* If we fail - disable RX using register write */
1734 if (err) {
1735 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1736 if (rxctrl & IXGBE_RXCTRL_RXEN) {
1737 rxctrl &= ~IXGBE_RXCTRL_RXEN;
1738 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
1739 }
1740 }
1741 }
1742
1743 /**
1744 * ixgbe_init_phy_ops_e610 - PHY specific init
1745 * @hw: pointer to hardware structure
1746 *
1747 * Initialize any function pointers that were not able to be
1748 * set during init_shared_code because the PHY type was not known.
1749 *
1750 * Return: the exit code of the operation.
1751 */
ixgbe_init_phy_ops_e610(struct ixgbe_hw * hw)1752 int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw)
1753 {
1754 struct ixgbe_mac_info *mac = &hw->mac;
1755 struct ixgbe_phy_info *phy = &hw->phy;
1756
1757 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
1758 phy->ops.set_phy_power = ixgbe_set_phy_power_e610;
1759 else
1760 phy->ops.set_phy_power = NULL;
1761
1762 /* Identify the PHY */
1763 return phy->ops.identify(hw);
1764 }
1765
1766 /**
1767 * ixgbe_identify_phy_e610 - Identify PHY
1768 * @hw: pointer to hardware structure
1769 *
1770 * Determine PHY type, supported speeds and PHY ID.
1771 *
1772 * Return: the exit code of the operation.
1773 */
ixgbe_identify_phy_e610(struct ixgbe_hw * hw)1774 int ixgbe_identify_phy_e610(struct ixgbe_hw *hw)
1775 {
1776 struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
1777 u64 phy_type_low, phy_type_high;
1778 int err;
1779
1780 /* Set PHY type */
1781 hw->phy.type = ixgbe_phy_fw;
1782
1783 err = ixgbe_aci_get_phy_caps(hw, false,
1784 IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, &pcaps);
1785 if (err)
1786 return err;
1787
1788 if (!(pcaps.module_compliance_enforcement &
1789 IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
1790 /* Handle lenient mode */
1791 err = ixgbe_aci_get_phy_caps(hw, false,
1792 IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
1793 &pcaps);
1794 if (err)
1795 return err;
1796 }
1797
1798 /* Determine supported speeds */
1799 hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
1800 phy_type_high = le64_to_cpu(pcaps.phy_type_high);
1801 phy_type_low = le64_to_cpu(pcaps.phy_type_low);
1802
1803 if (phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
1804 phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
1805 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
1806 if (phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
1807 phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
1808 phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
1809 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
1810 if (phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T ||
1811 phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
1812 phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
1813 phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
1814 phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII ||
1815 phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
1816 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
1817 if (phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T ||
1818 phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA ||
1819 phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR ||
1820 phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR ||
1821 phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
1822 phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
1823 phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C ||
1824 phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
1825 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
1826
1827 /* 2.5 and 5 Gbps link speeds must be excluded from the
1828 * auto-negotiation set used during driver initialization due to
1829 * compatibility issues with certain switches. Those issues do not
1830 * exist in case of E610 2.5G SKU device (0x57b1).
1831 */
1832 if (!hw->phy.autoneg_advertised &&
1833 hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
1834 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
1835
1836 if (phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
1837 phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
1838 phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
1839 phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
1840 phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
1841 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
1842
1843 if (!hw->phy.autoneg_advertised &&
1844 hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
1845 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
1846
1847 if (phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
1848 phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
1849 phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
1850 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
1851
1852 /* Set PHY ID */
1853 memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
1854
1855 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
1856 IXGBE_LINK_SPEED_100_FULL |
1857 IXGBE_LINK_SPEED_1GB_FULL;
1858 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
1859
1860 return 0;
1861 }
1862
1863 /**
1864 * ixgbe_identify_module_e610 - Identify SFP module type
1865 * @hw: pointer to hardware structure
1866 *
1867 * Identify the SFP module type.
1868 *
1869 * Return: the exit code of the operation.
1870 */
ixgbe_identify_module_e610(struct ixgbe_hw * hw)1871 int ixgbe_identify_module_e610(struct ixgbe_hw *hw)
1872 {
1873 bool media_available;
1874 u8 module_type;
1875 int err;
1876
1877 err = ixgbe_update_link_info(hw);
1878 if (err)
1879 return err;
1880
1881 media_available =
1882 (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE);
1883
1884 if (media_available) {
1885 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1886
1887 /* Get module type from hw context updated by
1888 * ixgbe_update_link_info()
1889 */
1890 module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
1891
1892 if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
1893 (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
1894 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
1895 } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
1896 hw->phy.sfp_type = ixgbe_sfp_type_sr;
1897 } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
1898 (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
1899 hw->phy.sfp_type = ixgbe_sfp_type_lr;
1900 }
1901 } else {
1902 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1903 return -ENOENT;
1904 }
1905
1906 return 0;
1907 }
1908
1909 /**
1910 * ixgbe_setup_phy_link_e610 - Sets up firmware-controlled PHYs
1911 * @hw: pointer to hardware structure
1912 *
1913 * Set the parameters for the firmware-controlled PHYs.
1914 *
1915 * Return: the exit code of the operation.
1916 */
ixgbe_setup_phy_link_e610(struct ixgbe_hw * hw)1917 int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw)
1918 {
1919 struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
1920 struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
1921 u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
1922 u64 sup_phy_type_low, sup_phy_type_high;
1923 u64 phy_type_low = 0, phy_type_high = 0;
1924 int err;
1925
1926 err = ixgbe_aci_get_link_info(hw, false, NULL);
1927 if (err)
1928 return err;
1929
1930 /* If media is not available get default config. */
1931 if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
1932 rmode = IXGBE_ACI_REPORT_DFLT_CFG;
1933
1934 err = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
1935 if (err)
1936 return err;
1937
1938 sup_phy_type_low = le64_to_cpu(pcaps.phy_type_low);
1939 sup_phy_type_high = le64_to_cpu(pcaps.phy_type_high);
1940
1941 /* Get Active configuration to avoid unintended changes. */
1942 err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
1943 &pcaps);
1944 if (err)
1945 return err;
1946
1947 ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
1948
1949 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
1950 phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
1951 phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
1952 }
1953 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
1954 phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
1955 phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
1956 phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
1957 }
1958 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
1959 phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
1960 phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
1961 phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
1962 phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
1963 phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
1964 phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
1965 }
1966 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
1967 phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
1968 phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
1969 phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
1970 phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
1971 phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
1972 }
1973 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
1974 phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
1975 phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
1976 phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
1977 }
1978 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
1979 phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
1980 phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
1981 phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
1982 phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
1983 phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
1984 phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
1985 phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
1986 phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
1987 }
1988
1989 /* Mask the set values to avoid requesting unsupported link types. */
1990 phy_type_low &= sup_phy_type_low;
1991 pcfg.phy_type_low = cpu_to_le64(phy_type_low);
1992 phy_type_high &= sup_phy_type_high;
1993 pcfg.phy_type_high = cpu_to_le64(phy_type_high);
1994
1995 if (pcfg.phy_type_high != pcaps.phy_type_high ||
1996 pcfg.phy_type_low != pcaps.phy_type_low ||
1997 pcfg.caps != pcaps.caps) {
1998 pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
1999 pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
2000
2001 err = ixgbe_aci_set_phy_cfg(hw, &pcfg);
2002 if (err)
2003 return err;
2004 }
2005
2006 return 0;
2007 }
2008
2009 /**
2010 * ixgbe_set_phy_power_e610 - Control power for copper PHY
2011 * @hw: pointer to hardware structure
2012 * @on: true for on, false for off
2013 *
2014 * Set the power on/off of the PHY
2015 * by getting its capabilities and setting the appropriate
2016 * configuration parameters.
2017 *
2018 * Return: the exit code of the operation.
2019 */
ixgbe_set_phy_power_e610(struct ixgbe_hw * hw,bool on)2020 int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on)
2021 {
2022 struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
2023 struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
2024 int err;
2025
2026 err = ixgbe_aci_get_phy_caps(hw, false,
2027 IXGBE_ACI_REPORT_ACTIVE_CFG,
2028 &phy_caps);
2029 if (err)
2030 return err;
2031
2032 ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
2033
2034 if (on)
2035 phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
2036 else
2037 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
2038
2039 /* PHY is already in requested power mode. */
2040 if (phy_caps.caps == phy_cfg.caps)
2041 return 0;
2042
2043 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
2044 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
2045
2046 return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
2047 }
2048
2049 /**
2050 * ixgbe_enter_lplu_e610 - Transition to low power states
2051 * @hw: pointer to hardware structure
2052 *
2053 * Configures Low Power Link Up on transition to low power states
2054 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
2055 * X557 PHY immediately prior to entering LPLU.
2056 *
2057 * Return: the exit code of the operation.
2058 */
ixgbe_enter_lplu_e610(struct ixgbe_hw * hw)2059 int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw)
2060 {
2061 struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
2062 struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
2063 int err;
2064
2065 err = ixgbe_aci_get_phy_caps(hw, false,
2066 IXGBE_ACI_REPORT_ACTIVE_CFG,
2067 &phy_caps);
2068 if (err)
2069 return err;
2070
2071 ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
2072
2073 phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
2074
2075 return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
2076 }
2077
2078 /**
2079 * ixgbe_init_eeprom_params_e610 - Initialize EEPROM params
2080 * @hw: pointer to hardware structure
2081 *
2082 * Initialize the EEPROM parameters ixgbe_eeprom_info within the ixgbe_hw
2083 * struct in order to set up EEPROM access.
2084 *
2085 * Return: the operation exit code.
2086 */
ixgbe_init_eeprom_params_e610(struct ixgbe_hw * hw)2087 int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw)
2088 {
2089 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2090 u32 gens_stat;
2091 u8 sr_size;
2092
2093 if (eeprom->type != ixgbe_eeprom_uninitialized)
2094 return 0;
2095
2096 eeprom->type = ixgbe_flash;
2097
2098 gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
2099 sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
2100
2101 /* Switching to words (sr_size contains power of 2). */
2102 eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
2103
2104 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type,
2105 eeprom->word_size);
2106
2107 return 0;
2108 }
2109
2110 /**
2111 * ixgbe_aci_get_netlist_node - get a node handle
2112 * @hw: pointer to the hw struct
2113 * @cmd: get_link_topo AQ structure
2114 * @node_part_number: output node part number if node found
2115 * @node_handle: output node handle parameter if node found
2116 *
2117 * Get the netlist node and assigns it to
2118 * the provided handle using ACI command (0x06E0).
2119 *
2120 * Return: the exit code of the operation.
2121 */
ixgbe_aci_get_netlist_node(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_get_link_topo * cmd,u8 * node_part_number,u16 * node_handle)2122 int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
2123 struct ixgbe_aci_cmd_get_link_topo *cmd,
2124 u8 *node_part_number, u16 *node_handle)
2125 {
2126 struct ixgbe_aci_desc desc;
2127
2128 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
2129 desc.params.get_link_topo = *cmd;
2130
2131 if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
2132 return -EOPNOTSUPP;
2133
2134 if (node_handle)
2135 *node_handle =
2136 le16_to_cpu(desc.params.get_link_topo.addr.handle);
2137 if (node_part_number)
2138 *node_part_number = desc.params.get_link_topo.node_part_num;
2139
2140 return 0;
2141 }
2142
2143 /**
2144 * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
2145 * @hw: pointer to the HW structure
2146 * @access: NVM access type (read or write)
2147 *
2148 * Request NVM ownership.
2149 *
2150 * Return: the exit code of the operation.
2151 */
ixgbe_acquire_nvm(struct ixgbe_hw * hw,enum ixgbe_aci_res_access_type access)2152 int ixgbe_acquire_nvm(struct ixgbe_hw *hw,
2153 enum ixgbe_aci_res_access_type access)
2154 {
2155 u32 fla;
2156
2157 /* Skip if we are in blank NVM programming mode */
2158 fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
2159 if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
2160 return 0;
2161
2162 return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
2163 IXGBE_NVM_TIMEOUT);
2164 }
2165
2166 /**
2167 * ixgbe_release_nvm - Generic request for releasing the NVM ownership
2168 * @hw: pointer to the HW structure
2169 *
2170 * Release NVM ownership.
2171 */
ixgbe_release_nvm(struct ixgbe_hw * hw)2172 void ixgbe_release_nvm(struct ixgbe_hw *hw)
2173 {
2174 u32 fla;
2175
2176 /* Skip if we are in blank NVM programming mode */
2177 fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
2178 if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
2179 return;
2180
2181 ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
2182 }
2183
2184 /**
2185 * ixgbe_aci_read_nvm - read NVM
2186 * @hw: pointer to the HW struct
2187 * @module_typeid: module pointer location in words from the NVM beginning
2188 * @offset: byte offset from the module beginning
2189 * @length: length of the section to be read (in bytes from the offset)
2190 * @data: command buffer (size [bytes] = length)
2191 * @last_command: tells if this is the last command in a series
2192 * @read_shadow_ram: tell if this is a shadow RAM read
2193 *
2194 * Read the NVM using ACI command (0x0701).
2195 *
2196 * Return: the exit code of the operation.
2197 */
ixgbe_aci_read_nvm(struct ixgbe_hw * hw,u16 module_typeid,u32 offset,u16 length,void * data,bool last_command,bool read_shadow_ram)2198 int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
2199 u16 length, void *data, bool last_command,
2200 bool read_shadow_ram)
2201 {
2202 struct ixgbe_aci_cmd_nvm *cmd;
2203 struct ixgbe_aci_desc desc;
2204
2205 if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
2206 return -EINVAL;
2207
2208 cmd = &desc.params.nvm;
2209
2210 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
2211
2212 if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
2213 cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
2214
2215 /* If this is the last command in a series, set the proper flag. */
2216 if (last_command)
2217 cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
2218 cmd->module_typeid = cpu_to_le16(module_typeid);
2219 cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
2220 cmd->offset_high = (offset >> 16) & 0xFF;
2221 cmd->length = cpu_to_le16(length);
2222
2223 return ixgbe_aci_send_cmd(hw, &desc, data, length);
2224 }
2225
2226 /**
2227 * ixgbe_nvm_validate_checksum - validate checksum
2228 * @hw: pointer to the HW struct
2229 *
2230 * Verify NVM PFA checksum validity using ACI command (0x0706).
2231 * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
2232 * The function acquires and then releases the NVM ownership.
2233 *
2234 * Return: the exit code of the operation.
2235 */
ixgbe_nvm_validate_checksum(struct ixgbe_hw * hw)2236 int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
2237 {
2238 struct ixgbe_aci_cmd_nvm_checksum *cmd;
2239 struct ixgbe_aci_desc desc;
2240 int err;
2241
2242 err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2243 if (err)
2244 return err;
2245
2246 cmd = &desc.params.nvm_checksum;
2247
2248 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
2249 cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
2250
2251 err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2252
2253 ixgbe_release_nvm(hw);
2254
2255 if (!err && cmd->checksum !=
2256 cpu_to_le16(IXGBE_ACI_NVM_CHECKSUM_CORRECT)) {
2257 struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
2258 hw);
2259
2260 err = -EIO;
2261 netdev_err(adapter->netdev, "Invalid Shadow Ram checksum");
2262 }
2263
2264 return err;
2265 }
2266
2267 /**
2268 * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
2269 * @hw: pointer to the HW structure
2270 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
2271 * @data: word read from the Shadow RAM
2272 *
2273 * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
2274 *
2275 * Return: the exit code of the operation.
2276 */
ixgbe_read_sr_word_aci(struct ixgbe_hw * hw,u16 offset,u16 * data)2277 int ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data)
2278 {
2279 u32 bytes = sizeof(u16);
2280 u16 data_local;
2281 int err;
2282
2283 err = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
2284 (u8 *)&data_local, true);
2285 if (err)
2286 return err;
2287
2288 *data = data_local;
2289 return 0;
2290 }
2291
2292 /**
2293 * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
2294 * @hw: pointer to the HW struct
2295 * @offset: offset from beginning of NVM
2296 * @length: (in) number of bytes to read; (out) number of bytes actually read
2297 * @data: buffer to return data in (sized to fit the specified length)
2298 * @read_shadow_ram: if true, read from shadow RAM instead of NVM
2299 *
2300 * Reads a portion of the NVM, as a flat memory space. This function correctly
2301 * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
2302 * from being exceeded in case of Shadow RAM read requests and ensures that no
2303 * single read request exceeds the maximum 4KB read for a single admin command.
2304 *
2305 * Returns an error code on failure. Note that the data pointer may be
2306 * partially updated if some reads succeed before a failure.
2307 *
2308 * Return: the exit code of the operation.
2309 */
ixgbe_read_flat_nvm(struct ixgbe_hw * hw,u32 offset,u32 * length,u8 * data,bool read_shadow_ram)2310 int ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
2311 u8 *data, bool read_shadow_ram)
2312 {
2313 u32 inlen = *length;
2314 u32 bytes_read = 0;
2315 bool last_cmd;
2316 int err;
2317
2318 /* Verify the length of the read if this is for the Shadow RAM */
2319 if (read_shadow_ram && ((offset + inlen) >
2320 (hw->eeprom.word_size * 2u)))
2321 return -EINVAL;
2322
2323 do {
2324 u32 read_size, sector_offset;
2325
2326 /* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
2327 * Additionally, a read from the Shadow RAM may not cross over
2328 * a sector boundary. Conveniently, the sector size is also 4KB.
2329 */
2330 sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
2331 read_size = min_t(u32,
2332 IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
2333 inlen - bytes_read);
2334
2335 last_cmd = !(bytes_read + read_size < inlen);
2336
2337 /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
2338 * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
2339 * maximum size guarantees that it will fit within the 2 bytes.
2340 */
2341 err = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
2342 offset, (u16)read_size,
2343 data + bytes_read, last_cmd,
2344 read_shadow_ram);
2345 if (err)
2346 break;
2347
2348 bytes_read += read_size;
2349 offset += read_size;
2350 } while (!last_cmd);
2351
2352 *length = bytes_read;
2353 return err;
2354 }
2355
2356 /**
2357 * ixgbe_read_sr_buf_aci - Read Shadow RAM buffer via ACI
2358 * @hw: pointer to the HW structure
2359 * @offset: offset of the Shadow RAM words to read (0x000000 - 0x001FFF)
2360 * @words: (in) number of words to read; (out) number of words actually read
2361 * @data: words read from the Shadow RAM
2362 *
2363 * Read 16 bit words (data buf) from the Shadow RAM. Acquire/release the NVM
2364 * ownership.
2365 *
2366 * Return: the operation exit code.
2367 */
ixgbe_read_sr_buf_aci(struct ixgbe_hw * hw,u16 offset,u16 * words,u16 * data)2368 int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
2369 u16 *data)
2370 {
2371 u32 bytes = *words * 2;
2372 int err;
2373
2374 err = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
2375 if (err)
2376 return err;
2377
2378 *words = bytes / 2;
2379
2380 for (int i = 0; i < *words; i++)
2381 data[i] = le16_to_cpu(((__le16 *)data)[i]);
2382
2383 return 0;
2384 }
2385
2386 /**
2387 * ixgbe_read_ee_aci_e610 - Read EEPROM word using the admin command.
2388 * @hw: pointer to hardware structure
2389 * @offset: offset of word in the EEPROM to read
2390 * @data: word read from the EEPROM
2391 *
2392 * Reads a 16 bit word from the EEPROM using the ACI.
2393 * If the EEPROM params are not initialized, the function
2394 * initialize them before proceeding with reading.
2395 * The function acquires and then releases the NVM ownership.
2396 *
2397 * Return: the exit code of the operation.
2398 */
ixgbe_read_ee_aci_e610(struct ixgbe_hw * hw,u16 offset,u16 * data)2399 int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data)
2400 {
2401 int err;
2402
2403 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
2404 err = hw->eeprom.ops.init_params(hw);
2405 if (err)
2406 return err;
2407 }
2408
2409 err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2410 if (err)
2411 return err;
2412
2413 err = ixgbe_read_sr_word_aci(hw, offset, data);
2414 ixgbe_release_nvm(hw);
2415
2416 return err;
2417 }
2418
2419 /**
2420 * ixgbe_read_ee_aci_buffer_e610 - Read EEPROM words via ACI
2421 * @hw: pointer to hardware structure
2422 * @offset: offset of words in the EEPROM to read
2423 * @words: number of words to read
2424 * @data: words to read from the EEPROM
2425 *
2426 * Read 16 bit words from the EEPROM via the ACI. Initialize the EEPROM params
2427 * prior to the read. Acquire/release the NVM ownership.
2428 *
2429 * Return: the operation exit code.
2430 */
ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)2431 int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
2432 u16 words, u16 *data)
2433 {
2434 int err;
2435
2436 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
2437 err = hw->eeprom.ops.init_params(hw);
2438 if (err)
2439 return err;
2440 }
2441
2442 err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2443 if (err)
2444 return err;
2445
2446 err = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
2447 ixgbe_release_nvm(hw);
2448
2449 return err;
2450 }
2451
2452 /**
2453 * ixgbe_validate_eeprom_checksum_e610 - Validate EEPROM checksum
2454 * @hw: pointer to hardware structure
2455 * @checksum_val: calculated checksum
2456 *
2457 * Performs checksum calculation and validates the EEPROM checksum. If the
2458 * caller does not need checksum_val, the value can be NULL.
2459 * If the EEPROM params are not initialized, the function
2460 * initialize them before proceeding.
2461 * The function acquires and then releases the NVM ownership.
2462 *
2463 * Return: the exit code of the operation.
2464 */
ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw * hw,u16 * checksum_val)2465 int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val)
2466 {
2467 int err;
2468
2469 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
2470 err = hw->eeprom.ops.init_params(hw);
2471 if (err)
2472 return err;
2473 }
2474
2475 err = ixgbe_nvm_validate_checksum(hw);
2476 if (err)
2477 return err;
2478
2479 if (checksum_val) {
2480 u16 tmp_checksum;
2481
2482 err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2483 if (err)
2484 return err;
2485
2486 err = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD,
2487 &tmp_checksum);
2488 ixgbe_release_nvm(hw);
2489
2490 if (!err)
2491 *checksum_val = tmp_checksum;
2492 }
2493
2494 return err;
2495 }
2496
2497 /**
2498 * ixgbe_reset_hw_e610 - Perform hardware reset
2499 * @hw: pointer to hardware structure
2500 *
2501 * Resets the hardware by resetting the transmit and receive units, masks
2502 * and clears all interrupts, and performs a reset.
2503 *
2504 * Return: the exit code of the operation.
2505 */
ixgbe_reset_hw_e610(struct ixgbe_hw * hw)2506 int ixgbe_reset_hw_e610(struct ixgbe_hw *hw)
2507 {
2508 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2509 u32 ctrl, i;
2510 int err;
2511
2512 /* Call adapter stop to disable tx/rx and clear interrupts */
2513 err = hw->mac.ops.stop_adapter(hw);
2514 if (err)
2515 goto reset_hw_out;
2516
2517 /* Flush pending Tx transactions. */
2518 ixgbe_clear_tx_pending(hw);
2519
2520 hw->phy.ops.init(hw);
2521 mac_reset_top:
2522 err = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
2523 if (err)
2524 return -EBUSY;
2525 ctrl = IXGBE_CTRL_RST;
2526 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
2527 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
2528 IXGBE_WRITE_FLUSH(hw);
2529 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2530
2531 /* Poll for reset bit to self-clear indicating reset is complete */
2532 for (i = 0; i < 10; i++) {
2533 udelay(1);
2534 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
2535 if (!(ctrl & IXGBE_CTRL_RST_MASK))
2536 break;
2537 }
2538
2539 if (ctrl & IXGBE_CTRL_RST_MASK) {
2540 struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
2541 hw);
2542
2543 err = -EIO;
2544 netdev_err(adapter->netdev, "Reset polling failed to complete.");
2545 }
2546
2547 /* Double resets are required for recovery from certain error
2548 * conditions. Between resets, it is necessary to stall to allow time
2549 * for any pending HW events to complete.
2550 */
2551 msleep(100);
2552 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
2553 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
2554 goto mac_reset_top;
2555 }
2556
2557 /* Set the Rx packet buffer size. */
2558 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), GENMASK(18, 17));
2559
2560 /* Store the permanent mac address */
2561 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
2562
2563 /* Maximum number of Receive Address Registers. */
2564 #define IXGBE_MAX_NUM_RAR 128
2565
2566 /* Store MAC address from RAR0, clear receive address registers, and
2567 * clear the multicast table. Also reset num_rar_entries to the
2568 * maximum number of Receive Address Registers, since we modify this
2569 * value when programming the SAN MAC address.
2570 */
2571 hw->mac.num_rar_entries = IXGBE_MAX_NUM_RAR;
2572 hw->mac.ops.init_rx_addrs(hw);
2573
2574 /* Initialize bus function number */
2575 hw->mac.ops.set_lan_id(hw);
2576
2577 reset_hw_out:
2578 return err;
2579 }
2580
2581 static const struct ixgbe_mac_operations mac_ops_e610 = {
2582 .init_hw = ixgbe_init_hw_generic,
2583 .start_hw = ixgbe_start_hw_X540,
2584 .clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic,
2585 .enable_rx_dma = ixgbe_enable_rx_dma_generic,
2586 .get_mac_addr = ixgbe_get_mac_addr_generic,
2587 .get_device_caps = ixgbe_get_device_caps_generic,
2588 .stop_adapter = ixgbe_stop_adapter_generic,
2589 .set_lan_id = ixgbe_set_lan_id_multi_port_pcie,
2590 .set_rxpba = ixgbe_set_rxpba_generic,
2591 .check_link = ixgbe_check_link_e610,
2592 .blink_led_start = ixgbe_blink_led_start_X540,
2593 .blink_led_stop = ixgbe_blink_led_stop_X540,
2594 .set_rar = ixgbe_set_rar_generic,
2595 .clear_rar = ixgbe_clear_rar_generic,
2596 .set_vmdq = ixgbe_set_vmdq_generic,
2597 .set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic,
2598 .clear_vmdq = ixgbe_clear_vmdq_generic,
2599 .init_rx_addrs = ixgbe_init_rx_addrs_generic,
2600 .update_mc_addr_list = ixgbe_update_mc_addr_list_generic,
2601 .enable_mc = ixgbe_enable_mc_generic,
2602 .disable_mc = ixgbe_disable_mc_generic,
2603 .clear_vfta = ixgbe_clear_vfta_generic,
2604 .set_vfta = ixgbe_set_vfta_generic,
2605 .fc_enable = ixgbe_fc_enable_generic,
2606 .set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550,
2607 .init_uta_tables = ixgbe_init_uta_tables_generic,
2608 .set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing,
2609 .set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing,
2610 .set_source_address_pruning =
2611 ixgbe_set_source_address_pruning_x550,
2612 .set_ethertype_anti_spoofing =
2613 ixgbe_set_ethertype_anti_spoofing_x550,
2614 .disable_rx_buff = ixgbe_disable_rx_buff_generic,
2615 .enable_rx_buff = ixgbe_enable_rx_buff_generic,
2616 .enable_rx = ixgbe_enable_rx_generic,
2617 .disable_rx = ixgbe_disable_rx_e610,
2618 .led_on = ixgbe_led_on_generic,
2619 .led_off = ixgbe_led_off_generic,
2620 .init_led_link_act = ixgbe_init_led_link_act_generic,
2621 .reset_hw = ixgbe_reset_hw_e610,
2622 .get_media_type = ixgbe_get_media_type_e610,
2623 .setup_link = ixgbe_setup_link_e610,
2624 .get_link_capabilities = ixgbe_get_link_capabilities_e610,
2625 .get_bus_info = ixgbe_get_bus_info_generic,
2626 .acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540,
2627 .release_swfw_sync = ixgbe_release_swfw_sync_X540,
2628 .init_swfw_sync = ixgbe_init_swfw_sync_X540,
2629 .prot_autoc_read = prot_autoc_read_generic,
2630 .prot_autoc_write = prot_autoc_write_generic,
2631 .setup_fc = ixgbe_setup_fc_e610,
2632 .fc_autoneg = ixgbe_fc_autoneg_e610,
2633 };
2634
2635 static const struct ixgbe_phy_operations phy_ops_e610 = {
2636 .init = ixgbe_init_phy_ops_e610,
2637 .identify = ixgbe_identify_phy_e610,
2638 .identify_sfp = ixgbe_identify_module_e610,
2639 .setup_link_speed = ixgbe_setup_phy_link_speed_generic,
2640 .setup_link = ixgbe_setup_phy_link_e610,
2641 .enter_lplu = ixgbe_enter_lplu_e610,
2642 };
2643
2644 static const struct ixgbe_eeprom_operations eeprom_ops_e610 = {
2645 .read = ixgbe_read_ee_aci_e610,
2646 .read_buffer = ixgbe_read_ee_aci_buffer_e610,
2647 .validate_checksum = ixgbe_validate_eeprom_checksum_e610,
2648 };
2649
2650 const struct ixgbe_info ixgbe_e610_info = {
2651 .mac = ixgbe_mac_e610,
2652 .get_invariants = ixgbe_get_invariants_X540,
2653 .mac_ops = &mac_ops_e610,
2654 .eeprom_ops = &eeprom_ops_e610,
2655 .phy_ops = &phy_ops_e610,
2656 .mbx_ops = &mbx_ops_generic,
2657 .mvals = ixgbe_mvals_x550em_a,
2658 };
2659