xref: /linux/drivers/net/ethernet/intel/ixgbevf/vf.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
3 
4 #include "vf.h"
5 #include "ixgbevf.h"
6 
7 /* On Hyper-V, to reset, we need to read from this offset
8  * from the PCI config space. This is the mechanism used on
9  * Hyper-V to support PF/VF communication.
10  */
11 #define IXGBE_HV_RESET_OFFSET           0x201
12 
ixgbevf_write_msg_read_ack(struct ixgbe_hw * hw,u32 * msg,u32 * retmsg,u16 size)13 static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
14 					     u32 *retmsg, u16 size)
15 {
16 	s32 retval = ixgbevf_write_mbx(hw, msg, size);
17 
18 	if (retval)
19 		return retval;
20 
21 	return ixgbevf_poll_mbx(hw, retmsg, size);
22 }
23 
24 /**
25  *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
26  *  @hw: pointer to hardware structure
27  *
28  *  Starts the hardware by filling the bus info structure and media type, clears
29  *  all on chip counters, initializes receive address registers, multicast
30  *  table, VLAN filter table, calls routine to set up link and flow control
31  *  settings, and leaves transmit and receive units disabled and uninitialized
32  **/
ixgbevf_start_hw_vf(struct ixgbe_hw * hw)33 static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
34 {
35 	/* Clear adapter stopped flag */
36 	hw->adapter_stopped = false;
37 
38 	return 0;
39 }
40 
41 /**
42  *  ixgbevf_init_hw_vf - virtual function hardware initialization
43  *  @hw: pointer to hardware structure
44  *
45  *  Initialize the hardware by resetting the hardware and then starting
46  *  the hardware
47  **/
ixgbevf_init_hw_vf(struct ixgbe_hw * hw)48 static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
49 {
50 	s32 status = hw->mac.ops.start_hw(hw);
51 
52 	hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
53 
54 	return status;
55 }
56 
57 /**
58  *  ixgbevf_reset_hw_vf - Performs hardware reset
59  *  @hw: pointer to hardware structure
60  *
61  *  Resets the hardware by resetting the transmit and receive units, masks and
62  *  clears all interrupts.
63  **/
ixgbevf_reset_hw_vf(struct ixgbe_hw * hw)64 static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
65 {
66 	struct ixgbe_mbx_info *mbx = &hw->mbx;
67 	u32 timeout = IXGBE_VF_INIT_TIMEOUT;
68 	u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
69 	u8 *addr = (u8 *)(&msgbuf[1]);
70 	s32 ret_val;
71 
72 	/* Call adapter stop to disable tx/rx and clear interrupts */
73 	hw->mac.ops.stop_adapter(hw);
74 
75 	/* reset the api version */
76 	hw->api_version = ixgbe_mbox_api_10;
77 	hw->mbx.ops.init_params(hw);
78 	memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
79 	       sizeof(struct ixgbe_mbx_operations));
80 
81 	IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
82 	IXGBE_WRITE_FLUSH(hw);
83 
84 	/* we cannot reset while the RSTI / RSTD bits are asserted */
85 	while (!mbx->ops.check_for_rst(hw) && timeout) {
86 		timeout--;
87 		udelay(5);
88 	}
89 
90 	if (!timeout)
91 		return IXGBE_ERR_RESET_FAILED;
92 
93 	/* mailbox timeout can now become active */
94 	mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
95 
96 	msgbuf[0] = IXGBE_VF_RESET;
97 	ixgbevf_write_mbx(hw, msgbuf, 1);
98 
99 	mdelay(10);
100 
101 	/* set our "perm_addr" based on info provided by PF
102 	 * also set up the mc_filter_type which is piggy backed
103 	 * on the mac address in word 3
104 	 */
105 	ret_val = ixgbevf_poll_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
106 	if (ret_val)
107 		return ret_val;
108 
109 	/* New versions of the PF may NACK the reset return message
110 	 * to indicate that no MAC address has yet been assigned for
111 	 * the VF.
112 	 */
113 	if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) &&
114 	    msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE))
115 		return IXGBE_ERR_INVALID_MAC_ADDR;
116 
117 	if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS))
118 		ether_addr_copy(hw->mac.perm_addr, addr);
119 
120 	hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
121 
122 	return 0;
123 }
124 
125 /**
126  * ixgbevf_hv_reset_hw_vf - reset via Hyper-V
127  * @hw: pointer to private hardware struct
128  *
129  * Hyper-V variant; the VF/PF communication is through the PCI
130  * config space.
131  */
ixgbevf_hv_reset_hw_vf(struct ixgbe_hw * hw)132 static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
133 {
134 #if IS_ENABLED(CONFIG_PCI_MMCONFIG)
135 	struct ixgbevf_adapter *adapter = hw->back;
136 	int i;
137 
138 	for (i = 0; i < 6; i++)
139 		pci_read_config_byte(adapter->pdev,
140 				     (i + IXGBE_HV_RESET_OFFSET),
141 				     &hw->mac.perm_addr[i]);
142 	return 0;
143 #else
144 	pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
145 	return -EOPNOTSUPP;
146 #endif
147 }
148 
149 /**
150  *  ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
151  *  @hw: pointer to hardware structure
152  *
153  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
154  *  disables transmit and receive units. The adapter_stopped flag is used by
155  *  the shared code and drivers to determine if the adapter is in a stopped
156  *  state and should not touch the hardware.
157  **/
ixgbevf_stop_hw_vf(struct ixgbe_hw * hw)158 static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
159 {
160 	u32 number_of_queues;
161 	u32 reg_val;
162 	u16 i;
163 
164 	/* Set the adapter_stopped flag so other driver functions stop touching
165 	 * the hardware
166 	 */
167 	hw->adapter_stopped = true;
168 
169 	/* Disable the receive unit by stopped each queue */
170 	number_of_queues = hw->mac.max_rx_queues;
171 	for (i = 0; i < number_of_queues; i++) {
172 		reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
173 		if (reg_val & IXGBE_RXDCTL_ENABLE) {
174 			reg_val &= ~IXGBE_RXDCTL_ENABLE;
175 			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
176 		}
177 	}
178 
179 	IXGBE_WRITE_FLUSH(hw);
180 
181 	/* Clear interrupt mask to stop from interrupts being generated */
182 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
183 
184 	/* Clear any pending interrupts */
185 	IXGBE_READ_REG(hw, IXGBE_VTEICR);
186 
187 	/* Disable the transmit unit.  Each queue must be disabled. */
188 	number_of_queues = hw->mac.max_tx_queues;
189 	for (i = 0; i < number_of_queues; i++) {
190 		reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
191 		if (reg_val & IXGBE_TXDCTL_ENABLE) {
192 			reg_val &= ~IXGBE_TXDCTL_ENABLE;
193 			IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
194 		}
195 	}
196 
197 	return 0;
198 }
199 
200 /**
201  *  ixgbevf_mta_vector - Determines bit-vector in multicast table to set
202  *  @hw: pointer to hardware structure
203  *  @mc_addr: the multicast address
204  *
205  *  Extracts the 12 bits, from a multicast address, to determine which
206  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
207  *  incoming Rx multicast addresses, to determine the bit-vector to check in
208  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
209  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
210  *  to mc_filter_type.
211  **/
ixgbevf_mta_vector(struct ixgbe_hw * hw,u8 * mc_addr)212 static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
213 {
214 	u32 vector = 0;
215 
216 	switch (hw->mac.mc_filter_type) {
217 	case 0:   /* use bits [47:36] of the address */
218 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
219 		break;
220 	case 1:   /* use bits [46:35] of the address */
221 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
222 		break;
223 	case 2:   /* use bits [45:34] of the address */
224 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
225 		break;
226 	case 3:   /* use bits [43:32] of the address */
227 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
228 		break;
229 	default:  /* Invalid mc_filter_type */
230 		break;
231 	}
232 
233 	/* vector can only be 12-bits or boundary will be exceeded */
234 	vector &= 0xFFF;
235 	return vector;
236 }
237 
238 /**
239  *  ixgbevf_get_mac_addr_vf - Read device MAC address
240  *  @hw: pointer to the HW structure
241  *  @mac_addr: pointer to storage for retrieved MAC address
242  **/
ixgbevf_get_mac_addr_vf(struct ixgbe_hw * hw,u8 * mac_addr)243 static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
244 {
245 	ether_addr_copy(mac_addr, hw->mac.perm_addr);
246 
247 	return 0;
248 }
249 
ixgbevf_set_uc_addr_vf(struct ixgbe_hw * hw,u32 index,u8 * addr)250 static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
251 {
252 	u32 msgbuf[3], msgbuf_chk;
253 	u8 *msg_addr = (u8 *)(&msgbuf[1]);
254 	s32 ret_val;
255 
256 	memset(msgbuf, 0, sizeof(msgbuf));
257 	/* If index is one then this is the start of a new list and needs
258 	 * indication to the PF so it can do it's own list management.
259 	 * If it is zero then that tells the PF to just clear all of
260 	 * this VF's macvlans and there is no new list.
261 	 */
262 	msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
263 	msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
264 	msgbuf_chk = msgbuf[0];
265 
266 	if (addr)
267 		ether_addr_copy(msg_addr, addr);
268 
269 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
270 					     ARRAY_SIZE(msgbuf));
271 	if (!ret_val) {
272 		msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
273 
274 		if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE))
275 			return -ENOMEM;
276 	}
277 
278 	return ret_val;
279 }
280 
ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw * hw,u32 index,u8 * addr)281 static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
282 {
283 	return -EOPNOTSUPP;
284 }
285 
286 /**
287  * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
288  * @hw: pointer to hardware structure
289  * @reta: buffer to fill with RETA contents.
290  * @num_rx_queues: Number of Rx queues configured for this port
291  *
292  * The "reta" buffer should be big enough to contain 32 registers.
293  *
294  * Returns: 0 on success.
295  *          if API doesn't support this operation - (-EOPNOTSUPP).
296  */
ixgbevf_get_reta_locked(struct ixgbe_hw * hw,u32 * reta,int num_rx_queues)297 int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
298 {
299 	int err, i, j;
300 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
301 	u32 *hw_reta = &msgbuf[1];
302 	u32 mask = 0;
303 
304 	/* We have to use a mailbox for 82599 and x540 devices only.
305 	 * For these devices RETA has 128 entries.
306 	 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
307 	 * 16 RETA entries in each DWORD giving 2 bits to each entry.
308 	 */
309 	int dwords = IXGBEVF_82599_RETA_SIZE / 16;
310 
311 	/* We support the RSS querying for 82599 and x540 devices only.
312 	 * Thus return an error if API doesn't support RETA querying or querying
313 	 * is not supported for this device type.
314 	 */
315 	switch (hw->api_version) {
316 	case ixgbe_mbox_api_15:
317 	case ixgbe_mbox_api_14:
318 	case ixgbe_mbox_api_13:
319 	case ixgbe_mbox_api_12:
320 		if (hw->mac.type < ixgbe_mac_X550_vf)
321 			break;
322 		fallthrough;
323 	default:
324 		return -EOPNOTSUPP;
325 	}
326 
327 	msgbuf[0] = IXGBE_VF_GET_RETA;
328 
329 	err = ixgbevf_write_mbx(hw, msgbuf, 1);
330 
331 	if (err)
332 		return err;
333 
334 	err = ixgbevf_poll_mbx(hw, msgbuf, dwords + 1);
335 
336 	if (err)
337 		return err;
338 
339 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
340 
341 	/* If the operation has been refused by a PF return -EPERM */
342 	if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_FAILURE))
343 		return -EPERM;
344 
345 	/* If we didn't get an ACK there must have been
346 	 * some sort of mailbox error so we should treat it
347 	 * as such.
348 	 */
349 	if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_SUCCESS))
350 		return IXGBE_ERR_MBX;
351 
352 	/* ixgbevf doesn't support more than 2 queues at the moment */
353 	if (num_rx_queues > 1)
354 		mask = 0x1;
355 
356 	for (i = 0; i < dwords; i++)
357 		for (j = 0; j < 16; j++)
358 			reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
359 
360 	return 0;
361 }
362 
363 /**
364  * ixgbevf_get_rss_key_locked - get the RSS Random Key
365  * @hw: pointer to the HW structure
366  * @rss_key: buffer to fill with RSS Hash Key contents.
367  *
368  * The "rss_key" buffer should be big enough to contain 10 registers.
369  *
370  * Returns: 0 on success.
371  *          if API doesn't support this operation - (-EOPNOTSUPP).
372  */
ixgbevf_get_rss_key_locked(struct ixgbe_hw * hw,u8 * rss_key)373 int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
374 {
375 	int err;
376 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
377 
378 	/* We currently support the RSS Random Key retrieval for 82599 and x540
379 	 * devices only.
380 	 *
381 	 * Thus return an error if API doesn't support RSS Random Key retrieval
382 	 * or if the operation is not supported for this device type.
383 	 */
384 	switch (hw->api_version) {
385 	case ixgbe_mbox_api_15:
386 	case ixgbe_mbox_api_14:
387 	case ixgbe_mbox_api_13:
388 	case ixgbe_mbox_api_12:
389 		if (hw->mac.type < ixgbe_mac_X550_vf)
390 			break;
391 		fallthrough;
392 	default:
393 		return -EOPNOTSUPP;
394 	}
395 
396 	msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
397 	err = ixgbevf_write_mbx(hw, msgbuf, 1);
398 
399 	if (err)
400 		return err;
401 
402 	err = ixgbevf_poll_mbx(hw, msgbuf, 11);
403 
404 	if (err)
405 		return err;
406 
407 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
408 
409 	/* If the operation has been refused by a PF return -EPERM */
410 	if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_FAILURE))
411 		return -EPERM;
412 
413 	/* If we didn't get an ACK there must have been
414 	 * some sort of mailbox error so we should treat it
415 	 * as such.
416 	 */
417 	if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_SUCCESS))
418 		return IXGBE_ERR_MBX;
419 
420 	memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
421 
422 	return 0;
423 }
424 
425 /**
426  *  ixgbevf_set_rar_vf - set device MAC address
427  *  @hw: pointer to hardware structure
428  *  @index: Receive address register to write
429  *  @addr: Address to put into receive address register
430  *  @vmdq: Unused in this implementation
431  **/
ixgbevf_set_rar_vf(struct ixgbe_hw * hw,u32 index,u8 * addr,u32 vmdq)432 static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
433 			      u32 vmdq)
434 {
435 	u32 msgbuf[3];
436 	u8 *msg_addr = (u8 *)(&msgbuf[1]);
437 	s32 ret_val;
438 
439 	memset(msgbuf, 0, sizeof(msgbuf));
440 	msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
441 	ether_addr_copy(msg_addr, addr);
442 
443 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
444 					     ARRAY_SIZE(msgbuf));
445 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
446 
447 	/* if nacked the address was rejected, use "perm_addr" */
448 	if (!ret_val &&
449 	    (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) {
450 		ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
451 		return IXGBE_ERR_MBX;
452 	}
453 
454 	return ret_val;
455 }
456 
457 /**
458  *  ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
459  *  @hw: pointer to hardware structure
460  *  @index: Receive address register to write
461  *  @addr: Address to put into receive address register
462  *  @vmdq: Unused in this implementation
463  *
464  * We don't really allow setting the device MAC address. However,
465  * if the address being set is the permanent MAC address we will
466  * permit that.
467  **/
ixgbevf_hv_set_rar_vf(struct ixgbe_hw * hw,u32 index,u8 * addr,u32 vmdq)468 static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
469 				 u32 vmdq)
470 {
471 	if (ether_addr_equal(addr, hw->mac.perm_addr))
472 		return 0;
473 
474 	return -EOPNOTSUPP;
475 }
476 
477 /**
478  *  ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
479  *  @hw: pointer to the HW structure
480  *  @netdev: pointer to net device structure
481  *
482  *  Updates the Multicast Table Array.
483  **/
ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw * hw,struct net_device * netdev)484 static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
485 					  struct net_device *netdev)
486 {
487 	struct netdev_hw_addr *ha;
488 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
489 	u16 *vector_list = (u16 *)&msgbuf[1];
490 	u32 cnt, i;
491 
492 	/* Each entry in the list uses 1 16 bit word.  We have 30
493 	 * 16 bit words available in our HW msg buffer (minus 1 for the
494 	 * msg type).  That's 30 hash values if we pack 'em right.  If
495 	 * there are more than 30 MC addresses to add then punt the
496 	 * extras for now and then add code to handle more than 30 later.
497 	 * It would be unusual for a server to request that many multi-cast
498 	 * addresses except for in large enterprise network environments.
499 	 */
500 
501 	cnt = netdev_mc_count(netdev);
502 	if (cnt > 30)
503 		cnt = 30;
504 	msgbuf[0] = IXGBE_VF_SET_MULTICAST;
505 	msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
506 
507 	i = 0;
508 	netdev_for_each_mc_addr(ha, netdev) {
509 		if (i == cnt)
510 			break;
511 		if (is_link_local_ether_addr(ha->addr))
512 			continue;
513 
514 		vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
515 	}
516 
517 	return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
518 			IXGBE_VFMAILBOX_SIZE);
519 }
520 
521 /**
522  * ixgbevf_hv_update_mc_addr_list_vf - stub
523  * @hw: unused
524  * @netdev: unused
525  *
526  * Hyper-V variant - just a stub.
527  */
ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw * hw,struct net_device * netdev)528 static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
529 					     struct net_device *netdev)
530 {
531 	return -EOPNOTSUPP;
532 }
533 
534 /**
535  *  ixgbevf_update_xcast_mode - Update Multicast mode
536  *  @hw: pointer to the HW structure
537  *  @xcast_mode: new multicast mode
538  *
539  *  Updates the Multicast Mode of VF.
540  **/
ixgbevf_update_xcast_mode(struct ixgbe_hw * hw,int xcast_mode)541 static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
542 {
543 	u32 msgbuf[2];
544 	s32 err;
545 
546 	switch (hw->api_version) {
547 	case ixgbe_mbox_api_12:
548 		/* promisc introduced in 1.3 version */
549 		if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
550 			return -EOPNOTSUPP;
551 		fallthrough;
552 	case ixgbe_mbox_api_13:
553 	case ixgbe_mbox_api_14:
554 	case ixgbe_mbox_api_15:
555 		break;
556 	default:
557 		return -EOPNOTSUPP;
558 	}
559 
560 	msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
561 	msgbuf[1] = xcast_mode;
562 
563 	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
564 					 ARRAY_SIZE(msgbuf));
565 	if (err)
566 		return err;
567 
568 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
569 	if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_FAILURE))
570 		return -EPERM;
571 
572 	return 0;
573 }
574 
575 /**
576  * ixgbevf_hv_update_xcast_mode - stub
577  * @hw: unused
578  * @xcast_mode: unused
579  *
580  * Hyper-V variant - just a stub.
581  */
ixgbevf_hv_update_xcast_mode(struct ixgbe_hw * hw,int xcast_mode)582 static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
583 {
584 	return -EOPNOTSUPP;
585 }
586 
587 /**
588  * ixgbevf_get_link_state_vf - Get VF link state from PF
589  * @hw: pointer to the HW structure
590  * @link_state: link state storage
591  *
592  * Returns state of the operation error or success.
593  */
ixgbevf_get_link_state_vf(struct ixgbe_hw * hw,bool * link_state)594 static s32 ixgbevf_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
595 {
596 	u32 msgbuf[2];
597 	s32 ret_val;
598 	s32 err;
599 
600 	msgbuf[0] = IXGBE_VF_GET_LINK_STATE;
601 	msgbuf[1] = 0x0;
602 
603 	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
604 
605 	if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
606 		ret_val = IXGBE_ERR_MBX;
607 	} else {
608 		ret_val = 0;
609 		*link_state = msgbuf[1];
610 	}
611 
612 	return ret_val;
613 }
614 
615 /**
616  * ixgbevf_hv_get_link_state_vf - * Hyper-V variant - just a stub.
617  * @hw: unused
618  * @link_state: unused
619  *
620  * Hyper-V variant; there is no mailbox communication.
621  */
ixgbevf_hv_get_link_state_vf(struct ixgbe_hw * hw,bool * link_state)622 static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
623 {
624 	return -EOPNOTSUPP;
625 }
626 
627 /**
628  *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
629  *  @hw: pointer to the HW structure
630  *  @vlan: 12 bit VLAN ID
631  *  @vind: unused by VF drivers
632  *  @vlan_on: if true then set bit, else clear bit
633  **/
ixgbevf_set_vfta_vf(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on)634 static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
635 			       bool vlan_on)
636 {
637 	u32 msgbuf[2];
638 	s32 err;
639 
640 	msgbuf[0] = IXGBE_VF_SET_VLAN;
641 	msgbuf[1] = vlan;
642 	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
643 	msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
644 
645 	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
646 					 ARRAY_SIZE(msgbuf));
647 	if (err)
648 		goto mbx_err;
649 
650 	/* remove extra bits from the message */
651 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
652 	msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
653 
654 	if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_SUCCESS))
655 		err = IXGBE_ERR_INVALID_ARGUMENT;
656 
657 mbx_err:
658 	return err;
659 }
660 
661 /**
662  * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub.
663  * @hw: unused
664  * @vlan: unused
665  * @vind: unused
666  * @vlan_on: unused
667  */
ixgbevf_hv_set_vfta_vf(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on)668 static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
669 				  bool vlan_on)
670 {
671 	return -EOPNOTSUPP;
672 }
673 
674 /**
675  *  ixgbevf_setup_mac_link_vf - Setup MAC link settings
676  *  @hw: pointer to hardware structure
677  *  @speed: Unused in this implementation
678  *  @autoneg: Unused in this implementation
679  *  @autoneg_wait_to_complete: Unused in this implementation
680  *
681  *  Do nothing and return success.  VF drivers are not allowed to change
682  *  global settings.  Maintained for driver compatibility.
683  **/
ixgbevf_setup_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg,bool autoneg_wait_to_complete)684 static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
685 				     ixgbe_link_speed speed, bool autoneg,
686 				     bool autoneg_wait_to_complete)
687 {
688 	return 0;
689 }
690 
691 /**
692  *  ixgbevf_check_mac_link_vf - Get link/speed status
693  *  @hw: pointer to hardware structure
694  *  @speed: pointer to link speed
695  *  @link_up: true is link is up, false otherwise
696  *  @autoneg_wait_to_complete: unused
697  *
698  *  Reads the links register to determine if link is up and the current speed
699  **/
ixgbevf_check_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool autoneg_wait_to_complete)700 static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
701 				     ixgbe_link_speed *speed,
702 				     bool *link_up,
703 				     bool autoneg_wait_to_complete)
704 {
705 	struct ixgbe_mbx_info *mbx = &hw->mbx;
706 	struct ixgbe_mac_info *mac = &hw->mac;
707 	s32 ret_val = 0;
708 	u32 links_reg;
709 	u32 in_msg = 0;
710 
711 	/* If we were hit with a reset drop the link */
712 	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
713 		mac->get_link_status = true;
714 
715 	if (!mac->get_link_status)
716 		goto out;
717 
718 	/* if link status is down no point in checking to see if pf is up */
719 	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
720 	if (!(links_reg & IXGBE_LINKS_UP))
721 		goto out;
722 
723 	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
724 	 * before the link status is correct
725 	 */
726 	if (mac->type == ixgbe_mac_82599_vf) {
727 		int i;
728 
729 		for (i = 0; i < 5; i++) {
730 			udelay(100);
731 			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
732 
733 			if (!(links_reg & IXGBE_LINKS_UP))
734 				goto out;
735 		}
736 	}
737 
738 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
739 	case IXGBE_LINKS_SPEED_10G_82599:
740 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
741 		break;
742 	case IXGBE_LINKS_SPEED_1G_82599:
743 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
744 		break;
745 	case IXGBE_LINKS_SPEED_100_82599:
746 		*speed = IXGBE_LINK_SPEED_100_FULL;
747 		break;
748 	}
749 
750 	/* if the read failed it could just be a mailbox collision, best wait
751 	 * until we are called again and don't report an error
752 	 */
753 	if (mbx->ops.read(hw, &in_msg, 1)) {
754 		if (hw->api_version >= ixgbe_mbox_api_15)
755 			mac->get_link_status = false;
756 		goto out;
757 	}
758 
759 	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
760 		/* msg is not CTS and is NACK we must have lost CTS status */
761 		if (in_msg & IXGBE_VT_MSGTYPE_FAILURE)
762 			ret_val = -1;
763 		goto out;
764 	}
765 
766 	/* the pf is talking, if we timed out in the past we reinit */
767 	if (!mbx->timeout) {
768 		ret_val = -1;
769 		goto out;
770 	}
771 
772 	/* if we passed all the tests above then the link is up and we no
773 	 * longer need to check for link
774 	 */
775 	mac->get_link_status = false;
776 
777 out:
778 	*link_up = !mac->get_link_status;
779 	return ret_val;
780 }
781 
782 /**
783  * ixgbevf_hv_check_mac_link_vf - check link
784  * @hw: pointer to private hardware struct
785  * @speed: pointer to link speed
786  * @link_up: true is link is up, false otherwise
787  * @autoneg_wait_to_complete: unused
788  *
789  * Hyper-V variant; there is no mailbox communication.
790  */
ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool autoneg_wait_to_complete)791 static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
792 					ixgbe_link_speed *speed,
793 					bool *link_up,
794 					bool autoneg_wait_to_complete)
795 {
796 	struct ixgbe_mbx_info *mbx = &hw->mbx;
797 	struct ixgbe_mac_info *mac = &hw->mac;
798 	u32 links_reg;
799 
800 	/* If we were hit with a reset drop the link */
801 	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
802 		mac->get_link_status = true;
803 
804 	if (!mac->get_link_status)
805 		goto out;
806 
807 	/* if link status is down no point in checking to see if pf is up */
808 	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
809 	if (!(links_reg & IXGBE_LINKS_UP))
810 		goto out;
811 
812 	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
813 	 * before the link status is correct
814 	 */
815 	if (mac->type == ixgbe_mac_82599_vf) {
816 		int i;
817 
818 		for (i = 0; i < 5; i++) {
819 			udelay(100);
820 			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
821 
822 			if (!(links_reg & IXGBE_LINKS_UP))
823 				goto out;
824 		}
825 	}
826 
827 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
828 	case IXGBE_LINKS_SPEED_10G_82599:
829 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
830 		break;
831 	case IXGBE_LINKS_SPEED_1G_82599:
832 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
833 		break;
834 	case IXGBE_LINKS_SPEED_100_82599:
835 		*speed = IXGBE_LINK_SPEED_100_FULL;
836 		break;
837 	}
838 
839 	/* if we passed all the tests above then the link is up and we no
840 	 * longer need to check for link
841 	 */
842 	mac->get_link_status = false;
843 
844 out:
845 	*link_up = !mac->get_link_status;
846 	return 0;
847 }
848 
849 /**
850  *  ixgbevf_set_rlpml_vf - Set the maximum receive packet length
851  *  @hw: pointer to the HW structure
852  *  @max_size: value to assign to max frame size
853  **/
ixgbevf_set_rlpml_vf(struct ixgbe_hw * hw,u16 max_size)854 static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
855 {
856 	u32 msgbuf[2];
857 	s32 ret_val;
858 
859 	msgbuf[0] = IXGBE_VF_SET_LPE;
860 	msgbuf[1] = max_size;
861 
862 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
863 					     ARRAY_SIZE(msgbuf));
864 	if (ret_val)
865 		return ret_val;
866 	if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
867 	    (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE))
868 		return IXGBE_ERR_MBX;
869 
870 	return 0;
871 }
872 
873 /**
874  * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
875  * @hw: pointer to the HW structure
876  * @max_size: value to assign to max frame size
877  * Hyper-V variant.
878  **/
ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw * hw,u16 max_size)879 static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
880 {
881 	u32 reg;
882 
883 	/* If we are on Hyper-V, we implement this functionality
884 	 * differently.
885 	 */
886 	reg =  IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
887 	/* CRC == 4 */
888 	reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
889 	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
890 
891 	return 0;
892 }
893 
894 /**
895  *  ixgbevf_negotiate_api_version_vf - Negotiate supported API version
896  *  @hw: pointer to the HW structure
897  *  @api: integer containing requested API version
898  **/
ixgbevf_negotiate_api_version_vf(struct ixgbe_hw * hw,int api)899 static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
900 {
901 	int err;
902 	u32 msg[3];
903 
904 	/* Negotiate the mailbox API version */
905 	msg[0] = IXGBE_VF_API_NEGOTIATE;
906 	msg[1] = api;
907 	msg[2] = 0;
908 
909 	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
910 	if (!err) {
911 		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
912 
913 		/* Store value and return 0 on success */
914 		if (msg[0] == (IXGBE_VF_API_NEGOTIATE |
915 			      IXGBE_VT_MSGTYPE_SUCCESS)) {
916 			hw->api_version = api;
917 			return 0;
918 		}
919 
920 		err = IXGBE_ERR_INVALID_ARGUMENT;
921 	}
922 
923 	return err;
924 }
925 
926 /**
927  *  ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
928  *  @hw: pointer to the HW structure
929  *  @api: integer containing requested API version
930  *  Hyper-V version - only ixgbe_mbox_api_10 supported.
931  **/
ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw * hw,int api)932 static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
933 {
934 	/* Hyper-V only supports api version ixgbe_mbox_api_10 */
935 	if (api != ixgbe_mbox_api_10)
936 		return IXGBE_ERR_INVALID_ARGUMENT;
937 
938 	return 0;
939 }
940 
ixgbevf_get_queues(struct ixgbe_hw * hw,unsigned int * num_tcs,unsigned int * default_tc)941 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
942 		       unsigned int *default_tc)
943 {
944 	int err;
945 	u32 msg[5];
946 
947 	/* do nothing if API doesn't support ixgbevf_get_queues */
948 	switch (hw->api_version) {
949 	case ixgbe_mbox_api_11:
950 	case ixgbe_mbox_api_12:
951 	case ixgbe_mbox_api_13:
952 	case ixgbe_mbox_api_14:
953 	case ixgbe_mbox_api_15:
954 		break;
955 	default:
956 		return 0;
957 	}
958 
959 	/* Fetch queue configuration from the PF */
960 	msg[0] = IXGBE_VF_GET_QUEUE;
961 	msg[1] = msg[2] = msg[3] = msg[4] = 0;
962 
963 	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
964 	if (!err) {
965 		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
966 
967 		/* if we didn't get an ACK there must have been
968 		 * some sort of mailbox error so we should treat it
969 		 * as such
970 		 */
971 		if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_SUCCESS))
972 			return IXGBE_ERR_MBX;
973 
974 		/* record and validate values from message */
975 		hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
976 		if (hw->mac.max_tx_queues == 0 ||
977 		    hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
978 			hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
979 
980 		hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
981 		if (hw->mac.max_rx_queues == 0 ||
982 		    hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
983 			hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
984 
985 		*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
986 		/* in case of unknown state assume we cannot tag frames */
987 		if (*num_tcs > hw->mac.max_rx_queues)
988 			*num_tcs = 1;
989 
990 		*default_tc = msg[IXGBE_VF_DEF_QUEUE];
991 		/* default to queue 0 on out-of-bounds queue number */
992 		if (*default_tc >= hw->mac.max_tx_queues)
993 			*default_tc = 0;
994 	}
995 
996 	return err;
997 }
998 
999 static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
1000 	.init_hw		= ixgbevf_init_hw_vf,
1001 	.reset_hw		= ixgbevf_reset_hw_vf,
1002 	.start_hw		= ixgbevf_start_hw_vf,
1003 	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
1004 	.stop_adapter		= ixgbevf_stop_hw_vf,
1005 	.setup_link		= ixgbevf_setup_mac_link_vf,
1006 	.check_link		= ixgbevf_check_mac_link_vf,
1007 	.negotiate_api_version	= ixgbevf_negotiate_api_version_vf,
1008 	.set_rar		= ixgbevf_set_rar_vf,
1009 	.update_mc_addr_list	= ixgbevf_update_mc_addr_list_vf,
1010 	.update_xcast_mode	= ixgbevf_update_xcast_mode,
1011 	.get_link_state		= ixgbevf_get_link_state_vf,
1012 	.set_uc_addr		= ixgbevf_set_uc_addr_vf,
1013 	.set_vfta		= ixgbevf_set_vfta_vf,
1014 	.set_rlpml		= ixgbevf_set_rlpml_vf,
1015 };
1016 
1017 static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
1018 	.init_hw		= ixgbevf_init_hw_vf,
1019 	.reset_hw		= ixgbevf_hv_reset_hw_vf,
1020 	.start_hw		= ixgbevf_start_hw_vf,
1021 	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
1022 	.stop_adapter		= ixgbevf_stop_hw_vf,
1023 	.setup_link		= ixgbevf_setup_mac_link_vf,
1024 	.check_link		= ixgbevf_hv_check_mac_link_vf,
1025 	.negotiate_api_version	= ixgbevf_hv_negotiate_api_version_vf,
1026 	.set_rar		= ixgbevf_hv_set_rar_vf,
1027 	.update_mc_addr_list	= ixgbevf_hv_update_mc_addr_list_vf,
1028 	.update_xcast_mode	= ixgbevf_hv_update_xcast_mode,
1029 	.get_link_state		= ixgbevf_hv_get_link_state_vf,
1030 	.set_uc_addr		= ixgbevf_hv_set_uc_addr_vf,
1031 	.set_vfta		= ixgbevf_hv_set_vfta_vf,
1032 	.set_rlpml		= ixgbevf_hv_set_rlpml_vf,
1033 };
1034 
1035 const struct ixgbevf_info ixgbevf_82599_vf_info = {
1036 	.mac = ixgbe_mac_82599_vf,
1037 	.mac_ops = &ixgbevf_mac_ops,
1038 };
1039 
1040 const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
1041 	.mac = ixgbe_mac_82599_vf,
1042 	.mac_ops = &ixgbevf_hv_mac_ops,
1043 };
1044 
1045 const struct ixgbevf_info ixgbevf_X540_vf_info = {
1046 	.mac = ixgbe_mac_X540_vf,
1047 	.mac_ops = &ixgbevf_mac_ops,
1048 };
1049 
1050 const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
1051 	.mac = ixgbe_mac_X540_vf,
1052 	.mac_ops = &ixgbevf_hv_mac_ops,
1053 };
1054 
1055 const struct ixgbevf_info ixgbevf_X550_vf_info = {
1056 	.mac = ixgbe_mac_X550_vf,
1057 	.mac_ops = &ixgbevf_mac_ops,
1058 };
1059 
1060 const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1061 	.mac = ixgbe_mac_X550_vf,
1062 	.mac_ops = &ixgbevf_hv_mac_ops,
1063 };
1064 
1065 const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1066 	.mac = ixgbe_mac_X550EM_x_vf,
1067 	.mac_ops = &ixgbevf_mac_ops,
1068 };
1069 
1070 const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1071 	.mac = ixgbe_mac_X550EM_x_vf,
1072 	.mac_ops = &ixgbevf_hv_mac_ops,
1073 };
1074 
1075 const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1076 	.mac = ixgbe_mac_x550em_a_vf,
1077 	.mac_ops = &ixgbevf_mac_ops,
1078 };
1079