1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2024 Intel Corporation. */
3
4 #include "vf.h"
5 #include "ixgbevf.h"
6
7 /* On Hyper-V, to reset, we need to read from this offset
8 * from the PCI config space. This is the mechanism used on
9 * Hyper-V to support PF/VF communication.
10 */
11 #define IXGBE_HV_RESET_OFFSET 0x201
12
ixgbevf_write_msg_read_ack(struct ixgbe_hw * hw,u32 * msg,u32 * retmsg,u16 size)13 static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
14 u32 *retmsg, u16 size)
15 {
16 s32 retval = ixgbevf_write_mbx(hw, msg, size);
17
18 if (retval)
19 return retval;
20
21 return ixgbevf_poll_mbx(hw, retmsg, size);
22 }
23
24 /**
25 * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
26 * @hw: pointer to hardware structure
27 *
28 * Starts the hardware by filling the bus info structure and media type, clears
29 * all on chip counters, initializes receive address registers, multicast
30 * table, VLAN filter table, calls routine to set up link and flow control
31 * settings, and leaves transmit and receive units disabled and uninitialized
32 **/
ixgbevf_start_hw_vf(struct ixgbe_hw * hw)33 static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
34 {
35 /* Clear adapter stopped flag */
36 hw->adapter_stopped = false;
37
38 return 0;
39 }
40
41 /**
42 * ixgbevf_init_hw_vf - virtual function hardware initialization
43 * @hw: pointer to hardware structure
44 *
45 * Initialize the hardware by resetting the hardware and then starting
46 * the hardware
47 **/
ixgbevf_init_hw_vf(struct ixgbe_hw * hw)48 static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
49 {
50 s32 status = hw->mac.ops.start_hw(hw);
51
52 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
53
54 return status;
55 }
56
57 /**
58 * ixgbevf_reset_hw_vf - Performs hardware reset
59 * @hw: pointer to hardware structure
60 *
61 * Resets the hardware by resetting the transmit and receive units, masks and
62 * clears all interrupts.
63 **/
ixgbevf_reset_hw_vf(struct ixgbe_hw * hw)64 static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
65 {
66 struct ixgbe_mbx_info *mbx = &hw->mbx;
67 u32 timeout = IXGBE_VF_INIT_TIMEOUT;
68 u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
69 u8 *addr = (u8 *)(&msgbuf[1]);
70 s32 ret_val;
71
72 /* Call adapter stop to disable tx/rx and clear interrupts */
73 hw->mac.ops.stop_adapter(hw);
74
75 /* reset the api version */
76 hw->api_version = ixgbe_mbox_api_10;
77 hw->mbx.ops.init_params(hw);
78 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
79 sizeof(struct ixgbe_mbx_operations));
80
81 IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
82 IXGBE_WRITE_FLUSH(hw);
83
84 /* we cannot reset while the RSTI / RSTD bits are asserted */
85 while (!mbx->ops.check_for_rst(hw) && timeout) {
86 timeout--;
87 udelay(5);
88 }
89
90 if (!timeout)
91 return IXGBE_ERR_RESET_FAILED;
92
93 /* mailbox timeout can now become active */
94 mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
95
96 msgbuf[0] = IXGBE_VF_RESET;
97 ixgbevf_write_mbx(hw, msgbuf, 1);
98
99 mdelay(10);
100
101 /* set our "perm_addr" based on info provided by PF
102 * also set up the mc_filter_type which is piggy backed
103 * on the mac address in word 3
104 */
105 ret_val = ixgbevf_poll_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
106 if (ret_val)
107 return ret_val;
108
109 /* New versions of the PF may NACK the reset return message
110 * to indicate that no MAC address has yet been assigned for
111 * the VF.
112 */
113 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) &&
114 msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE))
115 return IXGBE_ERR_INVALID_MAC_ADDR;
116
117 if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS))
118 ether_addr_copy(hw->mac.perm_addr, addr);
119
120 hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
121
122 return 0;
123 }
124
125 /**
126 * ixgbevf_hv_reset_hw_vf - reset via Hyper-V
127 * @hw: pointer to private hardware struct
128 *
129 * Hyper-V variant; the VF/PF communication is through the PCI
130 * config space.
131 */
ixgbevf_hv_reset_hw_vf(struct ixgbe_hw * hw)132 static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
133 {
134 #if IS_ENABLED(CONFIG_PCI_MMCONFIG)
135 struct ixgbevf_adapter *adapter = hw->back;
136 int i;
137
138 for (i = 0; i < 6; i++)
139 pci_read_config_byte(adapter->pdev,
140 (i + IXGBE_HV_RESET_OFFSET),
141 &hw->mac.perm_addr[i]);
142 return 0;
143 #else
144 pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
145 return -EOPNOTSUPP;
146 #endif
147 }
148
149 /**
150 * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
151 * @hw: pointer to hardware structure
152 *
153 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
154 * disables transmit and receive units. The adapter_stopped flag is used by
155 * the shared code and drivers to determine if the adapter is in a stopped
156 * state and should not touch the hardware.
157 **/
ixgbevf_stop_hw_vf(struct ixgbe_hw * hw)158 static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
159 {
160 u32 number_of_queues;
161 u32 reg_val;
162 u16 i;
163
164 /* Set the adapter_stopped flag so other driver functions stop touching
165 * the hardware
166 */
167 hw->adapter_stopped = true;
168
169 /* Disable the receive unit by stopped each queue */
170 number_of_queues = hw->mac.max_rx_queues;
171 for (i = 0; i < number_of_queues; i++) {
172 reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
173 if (reg_val & IXGBE_RXDCTL_ENABLE) {
174 reg_val &= ~IXGBE_RXDCTL_ENABLE;
175 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
176 }
177 }
178
179 IXGBE_WRITE_FLUSH(hw);
180
181 /* Clear interrupt mask to stop from interrupts being generated */
182 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
183
184 /* Clear any pending interrupts */
185 IXGBE_READ_REG(hw, IXGBE_VTEICR);
186
187 /* Disable the transmit unit. Each queue must be disabled. */
188 number_of_queues = hw->mac.max_tx_queues;
189 for (i = 0; i < number_of_queues; i++) {
190 reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
191 if (reg_val & IXGBE_TXDCTL_ENABLE) {
192 reg_val &= ~IXGBE_TXDCTL_ENABLE;
193 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
194 }
195 }
196
197 return 0;
198 }
199
200 /**
201 * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
202 * @hw: pointer to hardware structure
203 * @mc_addr: the multicast address
204 *
205 * Extracts the 12 bits, from a multicast address, to determine which
206 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
207 * incoming Rx multicast addresses, to determine the bit-vector to check in
208 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
209 * by the MO field of the MCSTCTRL. The MO field is set during initialization
210 * to mc_filter_type.
211 **/
ixgbevf_mta_vector(struct ixgbe_hw * hw,u8 * mc_addr)212 static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
213 {
214 u32 vector = 0;
215
216 switch (hw->mac.mc_filter_type) {
217 case 0: /* use bits [47:36] of the address */
218 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
219 break;
220 case 1: /* use bits [46:35] of the address */
221 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
222 break;
223 case 2: /* use bits [45:34] of the address */
224 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
225 break;
226 case 3: /* use bits [43:32] of the address */
227 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
228 break;
229 default: /* Invalid mc_filter_type */
230 break;
231 }
232
233 /* vector can only be 12-bits or boundary will be exceeded */
234 vector &= 0xFFF;
235 return vector;
236 }
237
238 /**
239 * ixgbevf_get_mac_addr_vf - Read device MAC address
240 * @hw: pointer to the HW structure
241 * @mac_addr: pointer to storage for retrieved MAC address
242 **/
ixgbevf_get_mac_addr_vf(struct ixgbe_hw * hw,u8 * mac_addr)243 static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
244 {
245 ether_addr_copy(mac_addr, hw->mac.perm_addr);
246
247 return 0;
248 }
249
ixgbevf_set_uc_addr_vf(struct ixgbe_hw * hw,u32 index,u8 * addr)250 static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
251 {
252 u32 msgbuf[3], msgbuf_chk;
253 u8 *msg_addr = (u8 *)(&msgbuf[1]);
254 s32 ret_val;
255
256 memset(msgbuf, 0, sizeof(msgbuf));
257 /* If index is one then this is the start of a new list and needs
258 * indication to the PF so it can do its own list management.
259 * If it is zero then that tells the PF to just clear all of
260 * this VF's macvlans and there is no new list.
261 */
262 msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
263 msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
264 msgbuf_chk = msgbuf[0];
265
266 if (addr)
267 ether_addr_copy(msg_addr, addr);
268
269 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
270 ARRAY_SIZE(msgbuf));
271 if (!ret_val) {
272 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
273
274 if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE))
275 return -ENOMEM;
276 }
277
278 return ret_val;
279 }
280
ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw * hw,u32 index,u8 * addr)281 static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
282 {
283 return -EOPNOTSUPP;
284 }
285
286 /**
287 * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
288 * @hw: pointer to hardware structure
289 * @reta: buffer to fill with RETA contents.
290 * @num_rx_queues: Number of Rx queues configured for this port
291 *
292 * The "reta" buffer should be big enough to contain 32 registers.
293 *
294 * Returns: 0 on success.
295 * if API doesn't support this operation - (-EOPNOTSUPP).
296 */
ixgbevf_get_reta_locked(struct ixgbe_hw * hw,u32 * reta,int num_rx_queues)297 int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
298 {
299 int err, i, j;
300 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
301 u32 *hw_reta = &msgbuf[1];
302 u32 mask = 0;
303
304 /* We have to use a mailbox for 82599 and x540 devices only.
305 * For these devices RETA has 128 entries.
306 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
307 * 16 RETA entries in each DWORD giving 2 bits to each entry.
308 */
309 int dwords = IXGBEVF_82599_RETA_SIZE / 16;
310
311 /* We support the RSS querying for 82599 and x540 devices only.
312 * Thus return an error if API doesn't support RETA querying or querying
313 * is not supported for this device type.
314 */
315 switch (hw->api_version) {
316 case ixgbe_mbox_api_17:
317 case ixgbe_mbox_api_16:
318 case ixgbe_mbox_api_15:
319 case ixgbe_mbox_api_14:
320 case ixgbe_mbox_api_13:
321 case ixgbe_mbox_api_12:
322 if (hw->mac.type < ixgbe_mac_X550_vf)
323 break;
324 fallthrough;
325 default:
326 return -EOPNOTSUPP;
327 }
328
329 msgbuf[0] = IXGBE_VF_GET_RETA;
330
331 err = ixgbevf_write_mbx(hw, msgbuf, 1);
332
333 if (err)
334 return err;
335
336 err = ixgbevf_poll_mbx(hw, msgbuf, dwords + 1);
337
338 if (err)
339 return err;
340
341 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
342
343 /* If the operation has been refused by a PF return -EPERM */
344 if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_FAILURE))
345 return -EPERM;
346
347 /* If we didn't get an ACK there must have been
348 * some sort of mailbox error so we should treat it
349 * as such.
350 */
351 if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_SUCCESS))
352 return IXGBE_ERR_MBX;
353
354 /* ixgbevf doesn't support more than 2 queues at the moment */
355 if (num_rx_queues > 1)
356 mask = 0x1;
357
358 for (i = 0; i < dwords; i++)
359 for (j = 0; j < 16; j++)
360 reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
361
362 return 0;
363 }
364
365 /**
366 * ixgbevf_get_rss_key_locked - get the RSS Random Key
367 * @hw: pointer to the HW structure
368 * @rss_key: buffer to fill with RSS Hash Key contents.
369 *
370 * The "rss_key" buffer should be big enough to contain 10 registers.
371 *
372 * Returns: 0 on success.
373 * if API doesn't support this operation - (-EOPNOTSUPP).
374 */
ixgbevf_get_rss_key_locked(struct ixgbe_hw * hw,u8 * rss_key)375 int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
376 {
377 int err;
378 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
379
380 /* We currently support the RSS Random Key retrieval for 82599 and x540
381 * devices only.
382 *
383 * Thus return an error if API doesn't support RSS Random Key retrieval
384 * or if the operation is not supported for this device type.
385 */
386 switch (hw->api_version) {
387 case ixgbe_mbox_api_17:
388 case ixgbe_mbox_api_16:
389 case ixgbe_mbox_api_15:
390 case ixgbe_mbox_api_14:
391 case ixgbe_mbox_api_13:
392 case ixgbe_mbox_api_12:
393 if (hw->mac.type < ixgbe_mac_X550_vf)
394 break;
395 fallthrough;
396 default:
397 return -EOPNOTSUPP;
398 }
399
400 msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
401 err = ixgbevf_write_mbx(hw, msgbuf, 1);
402
403 if (err)
404 return err;
405
406 err = ixgbevf_poll_mbx(hw, msgbuf, 11);
407
408 if (err)
409 return err;
410
411 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
412
413 /* If the operation has been refused by a PF return -EPERM */
414 if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_FAILURE))
415 return -EPERM;
416
417 /* If we didn't get an ACK there must have been
418 * some sort of mailbox error so we should treat it
419 * as such.
420 */
421 if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_SUCCESS))
422 return IXGBE_ERR_MBX;
423
424 memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
425
426 return 0;
427 }
428
429 /**
430 * ixgbevf_set_rar_vf - set device MAC address
431 * @hw: pointer to hardware structure
432 * @index: Receive address register to write
433 * @addr: Address to put into receive address register
434 * @vmdq: Unused in this implementation
435 **/
ixgbevf_set_rar_vf(struct ixgbe_hw * hw,u32 index,u8 * addr,u32 vmdq)436 static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
437 u32 vmdq)
438 {
439 u32 msgbuf[3];
440 u8 *msg_addr = (u8 *)(&msgbuf[1]);
441 s32 ret_val;
442
443 memset(msgbuf, 0, sizeof(msgbuf));
444 msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
445 ether_addr_copy(msg_addr, addr);
446
447 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
448 ARRAY_SIZE(msgbuf));
449 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
450
451 /* if nacked the address was rejected, use "perm_addr" */
452 if (!ret_val &&
453 (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) {
454 ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
455 return IXGBE_ERR_MBX;
456 }
457
458 return ret_val;
459 }
460
461 /**
462 * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
463 * @hw: pointer to hardware structure
464 * @index: Receive address register to write
465 * @addr: Address to put into receive address register
466 * @vmdq: Unused in this implementation
467 *
468 * We don't really allow setting the device MAC address. However,
469 * if the address being set is the permanent MAC address we will
470 * permit that.
471 **/
ixgbevf_hv_set_rar_vf(struct ixgbe_hw * hw,u32 index,u8 * addr,u32 vmdq)472 static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
473 u32 vmdq)
474 {
475 if (ether_addr_equal(addr, hw->mac.perm_addr))
476 return 0;
477
478 return -EOPNOTSUPP;
479 }
480
481 /**
482 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
483 * @hw: pointer to the HW structure
484 * @netdev: pointer to net device structure
485 *
486 * Updates the Multicast Table Array.
487 **/
ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw * hw,struct net_device * netdev)488 static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
489 struct net_device *netdev)
490 {
491 struct netdev_hw_addr *ha;
492 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
493 u16 *vector_list = (u16 *)&msgbuf[1];
494 u32 cnt, i;
495
496 /* Each entry in the list uses 1 16 bit word. We have 30
497 * 16 bit words available in our HW msg buffer (minus 1 for the
498 * msg type). That's 30 hash values if we pack 'em right. If
499 * there are more than 30 MC addresses to add then punt the
500 * extras for now and then add code to handle more than 30 later.
501 * It would be unusual for a server to request that many multi-cast
502 * addresses except for in large enterprise network environments.
503 */
504
505 cnt = netdev_mc_count(netdev);
506 if (cnt > 30)
507 cnt = 30;
508 msgbuf[0] = IXGBE_VF_SET_MULTICAST;
509 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
510
511 i = 0;
512 netdev_for_each_mc_addr(ha, netdev) {
513 if (i == cnt)
514 break;
515 if (is_link_local_ether_addr(ha->addr))
516 continue;
517
518 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
519 }
520
521 return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
522 IXGBE_VFMAILBOX_SIZE);
523 }
524
525 /**
526 * ixgbevf_hv_update_mc_addr_list_vf - stub
527 * @hw: unused
528 * @netdev: unused
529 *
530 * Hyper-V variant - just a stub.
531 */
ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw * hw,struct net_device * netdev)532 static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
533 struct net_device *netdev)
534 {
535 return -EOPNOTSUPP;
536 }
537
538 /**
539 * ixgbevf_update_xcast_mode - Update Multicast mode
540 * @hw: pointer to the HW structure
541 * @xcast_mode: new multicast mode
542 *
543 * Updates the Multicast Mode of VF.
544 **/
ixgbevf_update_xcast_mode(struct ixgbe_hw * hw,int xcast_mode)545 static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
546 {
547 u32 msgbuf[2];
548 s32 err;
549
550 switch (hw->api_version) {
551 case ixgbe_mbox_api_12:
552 /* promisc introduced in 1.3 version */
553 if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
554 return -EOPNOTSUPP;
555 fallthrough;
556 case ixgbe_mbox_api_13:
557 case ixgbe_mbox_api_14:
558 case ixgbe_mbox_api_15:
559 case ixgbe_mbox_api_16:
560 case ixgbe_mbox_api_17:
561 break;
562 default:
563 return -EOPNOTSUPP;
564 }
565
566 msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
567 msgbuf[1] = xcast_mode;
568
569 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
570 ARRAY_SIZE(msgbuf));
571 if (err)
572 return err;
573
574 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
575 if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_FAILURE))
576 return -EPERM;
577
578 return 0;
579 }
580
581 /**
582 * ixgbevf_hv_update_xcast_mode - stub
583 * @hw: unused
584 * @xcast_mode: unused
585 *
586 * Hyper-V variant - just a stub.
587 */
ixgbevf_hv_update_xcast_mode(struct ixgbe_hw * hw,int xcast_mode)588 static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
589 {
590 return -EOPNOTSUPP;
591 }
592
593 /**
594 * ixgbevf_get_link_state_vf - Get VF link state from PF
595 * @hw: pointer to the HW structure
596 * @link_state: link state storage
597 *
598 * Returns state of the operation error or success.
599 */
ixgbevf_get_link_state_vf(struct ixgbe_hw * hw,bool * link_state)600 static s32 ixgbevf_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
601 {
602 u32 msgbuf[2];
603 s32 ret_val;
604 s32 err;
605
606 msgbuf[0] = IXGBE_VF_GET_LINK_STATE;
607 msgbuf[1] = 0x0;
608
609 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
610
611 if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
612 ret_val = IXGBE_ERR_MBX;
613 } else {
614 ret_val = 0;
615 *link_state = msgbuf[1];
616 }
617
618 return ret_val;
619 }
620
621 /**
622 * ixgbevf_hv_get_link_state_vf - * Hyper-V variant - just a stub.
623 * @hw: unused
624 * @link_state: unused
625 *
626 * Hyper-V variant; there is no mailbox communication.
627 */
ixgbevf_hv_get_link_state_vf(struct ixgbe_hw * hw,bool * link_state)628 static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
629 {
630 return -EOPNOTSUPP;
631 }
632
633 /**
634 * ixgbevf_get_pf_link_state - Get PF's link status
635 * @hw: pointer to the HW structure
636 * @speed: link speed
637 * @link_up: indicate if link is up/down
638 *
639 * Ask PF to provide link_up state and speed of the link.
640 *
641 * Return: IXGBE_ERR_MBX in the case of mailbox error,
642 * -EOPNOTSUPP if the op is not supported or 0 on success.
643 */
ixgbevf_get_pf_link_state(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up)644 static int ixgbevf_get_pf_link_state(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
645 bool *link_up)
646 {
647 u32 msgbuf[3] = {};
648 int err;
649
650 switch (hw->api_version) {
651 case ixgbe_mbox_api_16:
652 case ixgbe_mbox_api_17:
653 break;
654 default:
655 return -EOPNOTSUPP;
656 }
657
658 msgbuf[0] = IXGBE_VF_GET_PF_LINK_STATE;
659
660 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
661 ARRAY_SIZE(msgbuf));
662 if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
663 err = IXGBE_ERR_MBX;
664 *speed = IXGBE_LINK_SPEED_UNKNOWN;
665 /* No need to set @link_up to false as it will be done by
666 * ixgbe_check_mac_link_vf().
667 */
668 } else {
669 *speed = msgbuf[1];
670 *link_up = msgbuf[2];
671 }
672
673 return err;
674 }
675
676 /**
677 * ixgbevf_negotiate_features_vf - negotiate supported features with PF driver
678 * @hw: pointer to the HW structure
679 * @pf_features: bitmask of features supported by PF
680 *
681 * Return: IXGBE_ERR_MBX in the case of mailbox error,
682 * -EOPNOTSUPP if the op is not supported or 0 on success.
683 */
ixgbevf_negotiate_features_vf(struct ixgbe_hw * hw,u32 * pf_features)684 static int ixgbevf_negotiate_features_vf(struct ixgbe_hw *hw, u32 *pf_features)
685 {
686 u32 msgbuf[2] = {};
687 int err;
688
689 switch (hw->api_version) {
690 case ixgbe_mbox_api_17:
691 break;
692 default:
693 return -EOPNOTSUPP;
694 }
695
696 msgbuf[0] = IXGBE_VF_FEATURES_NEGOTIATE;
697 msgbuf[1] = IXGBEVF_SUPPORTED_FEATURES;
698
699 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
700 ARRAY_SIZE(msgbuf));
701
702 if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
703 err = IXGBE_ERR_MBX;
704 *pf_features = 0x0;
705 } else {
706 *pf_features = msgbuf[1];
707 }
708
709 return err;
710 }
711
712 /**
713 * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
714 * @hw: pointer to the HW structure
715 * @vlan: 12 bit VLAN ID
716 * @vind: unused by VF drivers
717 * @vlan_on: if true then set bit, else clear bit
718 **/
ixgbevf_set_vfta_vf(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on)719 static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
720 bool vlan_on)
721 {
722 u32 msgbuf[2];
723 s32 err;
724
725 msgbuf[0] = IXGBE_VF_SET_VLAN;
726 msgbuf[1] = vlan;
727 /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
728 msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
729
730 err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
731 ARRAY_SIZE(msgbuf));
732 if (err)
733 goto mbx_err;
734
735 /* remove extra bits from the message */
736 msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
737 msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
738
739 if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_SUCCESS))
740 err = IXGBE_ERR_INVALID_ARGUMENT;
741
742 mbx_err:
743 return err;
744 }
745
746 /**
747 * ixgbe_read_vflinks - Read VFLINKS register
748 * @hw: pointer to the HW structure
749 * @speed: link speed
750 * @link_up: indicate if link is up/down
751 *
752 * Get linkup status and link speed from the VFLINKS register.
753 */
ixgbe_read_vflinks(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up)754 static void ixgbe_read_vflinks(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
755 bool *link_up)
756 {
757 u32 vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
758
759 /* if link status is down no point in checking to see if PF is up */
760 if (!(vflinks & IXGBE_LINKS_UP)) {
761 *link_up = false;
762 return;
763 }
764
765 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
766 * before the link status is correct
767 */
768 if (hw->mac.type == ixgbe_mac_82599_vf) {
769 for (int i = 0; i < 5; i++) {
770 udelay(100);
771 vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
772
773 if (!(vflinks & IXGBE_LINKS_UP)) {
774 *link_up = false;
775 return;
776 }
777 }
778 }
779
780 /* We reached this point so there's link */
781 *link_up = true;
782
783 switch (vflinks & IXGBE_LINKS_SPEED_82599) {
784 case IXGBE_LINKS_SPEED_10G_82599:
785 *speed = IXGBE_LINK_SPEED_10GB_FULL;
786 break;
787 case IXGBE_LINKS_SPEED_1G_82599:
788 *speed = IXGBE_LINK_SPEED_1GB_FULL;
789 break;
790 case IXGBE_LINKS_SPEED_100_82599:
791 *speed = IXGBE_LINK_SPEED_100_FULL;
792 break;
793 default:
794 *speed = IXGBE_LINK_SPEED_UNKNOWN;
795 }
796 }
797
798 /**
799 * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub.
800 * @hw: unused
801 * @vlan: unused
802 * @vind: unused
803 * @vlan_on: unused
804 */
ixgbevf_hv_set_vfta_vf(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on)805 static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
806 bool vlan_on)
807 {
808 return -EOPNOTSUPP;
809 }
810
811 /**
812 * ixgbevf_setup_mac_link_vf - Setup MAC link settings
813 * @hw: pointer to hardware structure
814 * @speed: Unused in this implementation
815 * @autoneg: Unused in this implementation
816 * @autoneg_wait_to_complete: Unused in this implementation
817 *
818 * Do nothing and return success. VF drivers are not allowed to change
819 * global settings. Maintained for driver compatibility.
820 **/
ixgbevf_setup_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg,bool autoneg_wait_to_complete)821 static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
822 ixgbe_link_speed speed, bool autoneg,
823 bool autoneg_wait_to_complete)
824 {
825 return 0;
826 }
827
828 /**
829 * ixgbevf_check_mac_link_vf - Get link/speed status
830 * @hw: pointer to hardware structure
831 * @speed: pointer to link speed
832 * @link_up: true is link is up, false otherwise
833 * @autoneg_wait_to_complete: unused
834 *
835 * Reads the links register to determine if link is up and the current speed
836 **/
ixgbevf_check_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool autoneg_wait_to_complete)837 static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
838 ixgbe_link_speed *speed,
839 bool *link_up,
840 bool autoneg_wait_to_complete)
841 {
842 struct ixgbevf_adapter *adapter = hw->back;
843 struct ixgbe_mbx_info *mbx = &hw->mbx;
844 struct ixgbe_mac_info *mac = &hw->mac;
845 s32 ret_val = 0;
846 u32 in_msg = 0;
847
848 /* If we were hit with a reset drop the link */
849 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
850 mac->get_link_status = true;
851
852 if (!mac->get_link_status)
853 goto out;
854
855 if (hw->mac.type == ixgbe_mac_e610_vf) {
856 ret_val = ixgbevf_get_pf_link_state(hw, speed, link_up);
857 if (ret_val)
858 goto out;
859 } else {
860 ixgbe_read_vflinks(hw, speed, link_up);
861 if (*link_up == false)
862 goto out;
863 }
864
865 /* if the read failed it could just be a mailbox collision, best wait
866 * until we are called again and don't report an error
867 */
868 if (mbx->ops.read(hw, &in_msg, 1)) {
869 if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX)
870 mac->get_link_status = false;
871 goto out;
872 }
873
874 if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
875 /* msg is not CTS and is NACK we must have lost CTS status */
876 if (in_msg & IXGBE_VT_MSGTYPE_FAILURE)
877 ret_val = -1;
878 goto out;
879 }
880
881 /* the pf is talking, if we timed out in the past we reinit */
882 if (!mbx->timeout) {
883 ret_val = -1;
884 goto out;
885 }
886
887 /* if we passed all the tests above then the link is up and we no
888 * longer need to check for link
889 */
890 mac->get_link_status = false;
891
892 out:
893 *link_up = !mac->get_link_status;
894 return ret_val;
895 }
896
897 /**
898 * ixgbevf_hv_check_mac_link_vf - check link
899 * @hw: pointer to private hardware struct
900 * @speed: pointer to link speed
901 * @link_up: true is link is up, false otherwise
902 * @autoneg_wait_to_complete: unused
903 *
904 * Hyper-V variant; there is no mailbox communication.
905 */
ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool autoneg_wait_to_complete)906 static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
907 ixgbe_link_speed *speed,
908 bool *link_up,
909 bool autoneg_wait_to_complete)
910 {
911 struct ixgbe_mbx_info *mbx = &hw->mbx;
912 struct ixgbe_mac_info *mac = &hw->mac;
913 u32 links_reg;
914
915 /* If we were hit with a reset drop the link */
916 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
917 mac->get_link_status = true;
918
919 if (!mac->get_link_status)
920 goto out;
921
922 /* if link status is down no point in checking to see if pf is up */
923 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
924 if (!(links_reg & IXGBE_LINKS_UP))
925 goto out;
926
927 /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
928 * before the link status is correct
929 */
930 if (mac->type == ixgbe_mac_82599_vf) {
931 int i;
932
933 for (i = 0; i < 5; i++) {
934 udelay(100);
935 links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
936
937 if (!(links_reg & IXGBE_LINKS_UP))
938 goto out;
939 }
940 }
941
942 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
943 case IXGBE_LINKS_SPEED_10G_82599:
944 *speed = IXGBE_LINK_SPEED_10GB_FULL;
945 break;
946 case IXGBE_LINKS_SPEED_1G_82599:
947 *speed = IXGBE_LINK_SPEED_1GB_FULL;
948 break;
949 case IXGBE_LINKS_SPEED_100_82599:
950 *speed = IXGBE_LINK_SPEED_100_FULL;
951 break;
952 }
953
954 /* if we passed all the tests above then the link is up and we no
955 * longer need to check for link
956 */
957 mac->get_link_status = false;
958
959 out:
960 *link_up = !mac->get_link_status;
961 return 0;
962 }
963
964 /**
965 * ixgbevf_set_rlpml_vf - Set the maximum receive packet length
966 * @hw: pointer to the HW structure
967 * @max_size: value to assign to max frame size
968 **/
ixgbevf_set_rlpml_vf(struct ixgbe_hw * hw,u16 max_size)969 static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
970 {
971 u32 msgbuf[2];
972 s32 ret_val;
973
974 msgbuf[0] = IXGBE_VF_SET_LPE;
975 msgbuf[1] = max_size;
976
977 ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
978 ARRAY_SIZE(msgbuf));
979 if (ret_val)
980 return ret_val;
981 if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
982 (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE))
983 return IXGBE_ERR_MBX;
984
985 return 0;
986 }
987
988 /**
989 * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
990 * @hw: pointer to the HW structure
991 * @max_size: value to assign to max frame size
992 * Hyper-V variant.
993 **/
ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw * hw,u16 max_size)994 static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
995 {
996 u32 reg;
997
998 /* If we are on Hyper-V, we implement this functionality
999 * differently.
1000 */
1001 reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
1002 /* CRC == 4 */
1003 reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
1004 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
1005
1006 return 0;
1007 }
1008
1009 /**
1010 * ixgbevf_negotiate_api_version_vf - Negotiate supported API version
1011 * @hw: pointer to the HW structure
1012 * @api: integer containing requested API version
1013 **/
ixgbevf_negotiate_api_version_vf(struct ixgbe_hw * hw,int api)1014 static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
1015 {
1016 int err;
1017 u32 msg[3];
1018
1019 /* Negotiate the mailbox API version */
1020 msg[0] = IXGBE_VF_API_NEGOTIATE;
1021 msg[1] = api;
1022 msg[2] = 0;
1023
1024 err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
1025 if (!err) {
1026 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
1027
1028 /* Store value and return 0 on success */
1029 if (msg[0] == (IXGBE_VF_API_NEGOTIATE |
1030 IXGBE_VT_MSGTYPE_SUCCESS)) {
1031 hw->api_version = api;
1032 return 0;
1033 }
1034
1035 err = IXGBE_ERR_INVALID_ARGUMENT;
1036 }
1037
1038 return err;
1039 }
1040
1041 /**
1042 * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
1043 * @hw: pointer to the HW structure
1044 * @api: integer containing requested API version
1045 * Hyper-V version - only ixgbe_mbox_api_10 supported.
1046 **/
ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw * hw,int api)1047 static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
1048 {
1049 /* Hyper-V only supports api version ixgbe_mbox_api_10 */
1050 if (api != ixgbe_mbox_api_10)
1051 return IXGBE_ERR_INVALID_ARGUMENT;
1052
1053 return 0;
1054 }
1055
ixgbevf_get_queues(struct ixgbe_hw * hw,unsigned int * num_tcs,unsigned int * default_tc)1056 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
1057 unsigned int *default_tc)
1058 {
1059 int err;
1060 u32 msg[5];
1061
1062 /* do nothing if API doesn't support ixgbevf_get_queues */
1063 switch (hw->api_version) {
1064 case ixgbe_mbox_api_11:
1065 case ixgbe_mbox_api_12:
1066 case ixgbe_mbox_api_13:
1067 case ixgbe_mbox_api_14:
1068 case ixgbe_mbox_api_15:
1069 case ixgbe_mbox_api_16:
1070 case ixgbe_mbox_api_17:
1071 break;
1072 default:
1073 return 0;
1074 }
1075
1076 /* Fetch queue configuration from the PF */
1077 msg[0] = IXGBE_VF_GET_QUEUE;
1078 msg[1] = msg[2] = msg[3] = msg[4] = 0;
1079
1080 err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
1081 if (!err) {
1082 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
1083
1084 /* if we didn't get an ACK there must have been
1085 * some sort of mailbox error so we should treat it
1086 * as such
1087 */
1088 if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_SUCCESS))
1089 return IXGBE_ERR_MBX;
1090
1091 /* record and validate values from message */
1092 hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
1093 if (hw->mac.max_tx_queues == 0 ||
1094 hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
1095 hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
1096
1097 hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
1098 if (hw->mac.max_rx_queues == 0 ||
1099 hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
1100 hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
1101
1102 *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
1103 /* in case of unknown state assume we cannot tag frames */
1104 if (*num_tcs > hw->mac.max_rx_queues)
1105 *num_tcs = 1;
1106
1107 *default_tc = msg[IXGBE_VF_DEF_QUEUE];
1108 /* default to queue 0 on out-of-bounds queue number */
1109 if (*default_tc >= hw->mac.max_tx_queues)
1110 *default_tc = 0;
1111 }
1112
1113 return err;
1114 }
1115
1116 static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
1117 .init_hw = ixgbevf_init_hw_vf,
1118 .reset_hw = ixgbevf_reset_hw_vf,
1119 .start_hw = ixgbevf_start_hw_vf,
1120 .get_mac_addr = ixgbevf_get_mac_addr_vf,
1121 .stop_adapter = ixgbevf_stop_hw_vf,
1122 .setup_link = ixgbevf_setup_mac_link_vf,
1123 .check_link = ixgbevf_check_mac_link_vf,
1124 .negotiate_api_version = ixgbevf_negotiate_api_version_vf,
1125 .negotiate_features = ixgbevf_negotiate_features_vf,
1126 .set_rar = ixgbevf_set_rar_vf,
1127 .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
1128 .update_xcast_mode = ixgbevf_update_xcast_mode,
1129 .get_link_state = ixgbevf_get_link_state_vf,
1130 .set_uc_addr = ixgbevf_set_uc_addr_vf,
1131 .set_vfta = ixgbevf_set_vfta_vf,
1132 .set_rlpml = ixgbevf_set_rlpml_vf,
1133 };
1134
1135 static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
1136 .init_hw = ixgbevf_init_hw_vf,
1137 .reset_hw = ixgbevf_hv_reset_hw_vf,
1138 .start_hw = ixgbevf_start_hw_vf,
1139 .get_mac_addr = ixgbevf_get_mac_addr_vf,
1140 .stop_adapter = ixgbevf_stop_hw_vf,
1141 .setup_link = ixgbevf_setup_mac_link_vf,
1142 .check_link = ixgbevf_hv_check_mac_link_vf,
1143 .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf,
1144 .set_rar = ixgbevf_hv_set_rar_vf,
1145 .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
1146 .update_xcast_mode = ixgbevf_hv_update_xcast_mode,
1147 .get_link_state = ixgbevf_hv_get_link_state_vf,
1148 .set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
1149 .set_vfta = ixgbevf_hv_set_vfta_vf,
1150 .set_rlpml = ixgbevf_hv_set_rlpml_vf,
1151 };
1152
1153 const struct ixgbevf_info ixgbevf_82599_vf_info = {
1154 .mac = ixgbe_mac_82599_vf,
1155 .mac_ops = &ixgbevf_mac_ops,
1156 };
1157
1158 const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
1159 .mac = ixgbe_mac_82599_vf,
1160 .mac_ops = &ixgbevf_hv_mac_ops,
1161 };
1162
1163 const struct ixgbevf_info ixgbevf_X540_vf_info = {
1164 .mac = ixgbe_mac_X540_vf,
1165 .mac_ops = &ixgbevf_mac_ops,
1166 };
1167
1168 const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
1169 .mac = ixgbe_mac_X540_vf,
1170 .mac_ops = &ixgbevf_hv_mac_ops,
1171 };
1172
1173 const struct ixgbevf_info ixgbevf_X550_vf_info = {
1174 .mac = ixgbe_mac_X550_vf,
1175 .mac_ops = &ixgbevf_mac_ops,
1176 };
1177
1178 const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1179 .mac = ixgbe_mac_X550_vf,
1180 .mac_ops = &ixgbevf_hv_mac_ops,
1181 };
1182
1183 const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1184 .mac = ixgbe_mac_X550EM_x_vf,
1185 .mac_ops = &ixgbevf_mac_ops,
1186 };
1187
1188 const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1189 .mac = ixgbe_mac_X550EM_x_vf,
1190 .mac_ops = &ixgbevf_hv_mac_ops,
1191 };
1192
1193 const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1194 .mac = ixgbe_mac_x550em_a_vf,
1195 .mac_ops = &ixgbevf_mac_ops,
1196 };
1197
1198 const struct ixgbevf_info ixgbevf_e610_vf_info = {
1199 .mac = ixgbe_mac_e610_vf,
1200 .mac_ops = &ixgbevf_mac_ops,
1201 };
1202
1203 const struct ixgbevf_info ixgbevf_e610_vf_hv_info = {
1204 .mac = ixgbe_mac_e610_vf,
1205 .mac_ops = &ixgbevf_hv_mac_ops,
1206 };
1207