xref: /freebsd/sys/dev/ixgbe/if_sriov.c (revision 389e4940069316fe667ffa263fa7d6390d0a960f)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixgbe.h"
36 #include "ixgbe_sriov.h"
37 
38 #ifdef PCI_IOV
39 
40 MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
41 
42 /************************************************************************
43  * ixgbe_pci_iov_detach
44  ************************************************************************/
45 int
46 ixgbe_pci_iov_detach(device_t dev)
47 {
48 	return pci_iov_detach(dev);
49 }
50 
51 /************************************************************************
52  * ixgbe_define_iov_schemas
53  ************************************************************************/
54 void
55 ixgbe_define_iov_schemas(device_t dev, int *error)
56 {
57 	nvlist_t *pf_schema, *vf_schema;
58 
59 	pf_schema = pci_iov_schema_alloc_node();
60 	vf_schema = pci_iov_schema_alloc_node();
61 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
62 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
63 	    IOV_SCHEMA_HASDEFAULT, TRUE);
64 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
65 	    IOV_SCHEMA_HASDEFAULT, FALSE);
66 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
67 	    IOV_SCHEMA_HASDEFAULT, FALSE);
68 	*error = pci_iov_attach(dev, pf_schema, vf_schema);
69 	if (*error != 0) {
70 		device_printf(dev,
71 		    "Error %d setting up SR-IOV\n", *error);
72 	}
73 } /* ixgbe_define_iov_schemas */
74 
75 /************************************************************************
76  * ixgbe_align_all_queue_indices
77  ************************************************************************/
78 inline void
79 ixgbe_align_all_queue_indices(struct adapter *adapter)
80 {
81 	int i;
82 	int index;
83 
84 	for (i = 0; i < adapter->num_rx_queues; i++) {
85 		index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
86 		adapter->rx_queues[i].rxr.me = index;
87 	}
88 
89 	for (i = 0; i < adapter->num_tx_queues; i++) {
90 		index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
91 		adapter->tx_queues[i].txr.me = index;
92 	}
93 }
94 
95 /* Support functions for SR-IOV/VF management */
96 static inline void
97 ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
98 {
99 	if (vf->flags & IXGBE_VF_CTS)
100 		msg |= IXGBE_VT_MSGTYPE_CTS;
101 
102 	adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
103 }
104 
105 static inline void
106 ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
107 {
108 	msg &= IXGBE_VT_MSG_MASK;
109 	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
110 }
111 
112 static inline void
113 ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
114 {
115 	msg &= IXGBE_VT_MSG_MASK;
116 	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
117 }
118 
119 static inline void
120 ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
121 {
122 	if (!(vf->flags & IXGBE_VF_CTS))
123 		ixgbe_send_vf_nack(adapter, vf, 0);
124 }
125 
126 static inline boolean_t
127 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
128 {
129 	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
130 }
131 
132 static inline int
133 ixgbe_vf_queues(int mode)
134 {
135 	switch (mode) {
136 	case IXGBE_64_VM:
137 		return (2);
138 	case IXGBE_32_VM:
139 		return (4);
140 	case IXGBE_NO_VM:
141 	default:
142 		return (0);
143 	}
144 }
145 
146 inline int
147 ixgbe_vf_que_index(int mode, int vfnum, int num)
148 {
149 	return ((vfnum * ixgbe_vf_queues(mode)) + num);
150 }
151 
152 static inline void
153 ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
154 {
155 	if (adapter->max_frame_size < max_frame)
156 		adapter->max_frame_size = max_frame;
157 }
158 
159 inline u32
160 ixgbe_get_mrqc(int iov_mode)
161 {
162 	u32 mrqc;
163 
164 	switch (iov_mode) {
165 	case IXGBE_64_VM:
166 		mrqc = IXGBE_MRQC_VMDQRSS64EN;
167 		break;
168 	case IXGBE_32_VM:
169 		mrqc = IXGBE_MRQC_VMDQRSS32EN;
170 		break;
171 	case IXGBE_NO_VM:
172 		mrqc = 0;
173 		break;
174 	default:
175 		panic("Unexpected SR-IOV mode %d", iov_mode);
176 	}
177 
178 	return mrqc;
179 }
180 
181 
182 inline u32
183 ixgbe_get_mtqc(int iov_mode)
184 {
185 	uint32_t mtqc;
186 
187 	switch (iov_mode) {
188 	case IXGBE_64_VM:
189 		mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
190 		break;
191 	case IXGBE_32_VM:
192 		mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
193 		break;
194 	case IXGBE_NO_VM:
195 		mtqc = IXGBE_MTQC_64Q_1PB;
196 		break;
197 	default:
198 		panic("Unexpected SR-IOV mode %d", iov_mode);
199 	}
200 
201 	return mtqc;
202 }
203 
204 void
205 ixgbe_ping_all_vfs(struct adapter *adapter)
206 {
207 	struct ixgbe_vf *vf;
208 
209 	for (int i = 0; i < adapter->num_vfs; i++) {
210 		vf = &adapter->vfs[i];
211 		if (vf->flags & IXGBE_VF_ACTIVE)
212 			ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
213 	}
214 } /* ixgbe_ping_all_vfs */
215 
216 
217 static void
218 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
219                           uint16_t tag)
220 {
221 	struct ixgbe_hw *hw;
222 	uint32_t vmolr, vmvir;
223 
224 	hw = &adapter->hw;
225 
226 	vf->vlan_tag = tag;
227 
228 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
229 
230 	/* Do not receive packets that pass inexact filters. */
231 	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
232 
233 	/* Disable Multicast Promicuous Mode. */
234 	vmolr &= ~IXGBE_VMOLR_MPE;
235 
236 	/* Accept broadcasts. */
237 	vmolr |= IXGBE_VMOLR_BAM;
238 
239 	if (tag == 0) {
240 		/* Accept non-vlan tagged traffic. */
241 		vmolr |= IXGBE_VMOLR_AUPE;
242 
243 		/* Allow VM to tag outgoing traffic; no default tag. */
244 		vmvir = 0;
245 	} else {
246 		/* Require vlan-tagged traffic. */
247 		vmolr &= ~IXGBE_VMOLR_AUPE;
248 
249 		/* Tag all traffic with provided vlan tag. */
250 		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
251 	}
252 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
253 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
254 } /* ixgbe_vf_set_default_vlan */
255 
256 
257 static boolean_t
258 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
259 {
260 
261 	/*
262 	 * Frame size compatibility between PF and VF is only a problem on
263 	 * 82599-based cards.  X540 and later support any combination of jumbo
264 	 * frames on PFs and VFs.
265 	 */
266 	if (adapter->hw.mac.type != ixgbe_mac_82599EB)
267 		return (TRUE);
268 
269 	switch (vf->api_ver) {
270 	case IXGBE_API_VER_1_0:
271 	case IXGBE_API_VER_UNKNOWN:
272 		/*
273 		 * On legacy (1.0 and older) VF versions, we don't support jumbo
274 		 * frames on either the PF or the VF.
275 		 */
276 		if (adapter->max_frame_size > ETHER_MAX_LEN ||
277 		    vf->maximum_frame_size > ETHER_MAX_LEN)
278 			return (FALSE);
279 
280 		return (TRUE);
281 
282 		break;
283 	case IXGBE_API_VER_1_1:
284 	default:
285 		/*
286 		 * 1.1 or later VF versions always work if they aren't using
287 		 * jumbo frames.
288 		 */
289 		if (vf->maximum_frame_size <= ETHER_MAX_LEN)
290 			return (TRUE);
291 
292 		/*
293 		 * Jumbo frames only work with VFs if the PF is also using jumbo
294 		 * frames.
295 		 */
296 		if (adapter->max_frame_size <= ETHER_MAX_LEN)
297 			return (TRUE);
298 
299 		return (FALSE);
300 	}
301 } /* ixgbe_vf_frame_size_compatible */
302 
303 
304 static void
305 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
306 {
307 	ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
308 
309 	// XXX clear multicast addresses
310 
311 	ixgbe_clear_rar(&adapter->hw, vf->rar_index);
312 
313 	vf->api_ver = IXGBE_API_VER_UNKNOWN;
314 } /* ixgbe_process_vf_reset */
315 
316 
317 static void
318 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
319 {
320 	struct ixgbe_hw *hw;
321 	uint32_t vf_index, vfte;
322 
323 	hw = &adapter->hw;
324 
325 	vf_index = IXGBE_VF_INDEX(vf->pool);
326 	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
327 	vfte |= IXGBE_VF_BIT(vf->pool);
328 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
329 } /* ixgbe_vf_enable_transmit */
330 
331 
332 static void
333 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
334 {
335 	struct ixgbe_hw *hw;
336 	uint32_t vf_index, vfre;
337 
338 	hw = &adapter->hw;
339 
340 	vf_index = IXGBE_VF_INDEX(vf->pool);
341 	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
342 	if (ixgbe_vf_frame_size_compatible(adapter, vf))
343 		vfre |= IXGBE_VF_BIT(vf->pool);
344 	else
345 		vfre &= ~IXGBE_VF_BIT(vf->pool);
346 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
347 } /* ixgbe_vf_enable_receive */
348 
349 
350 static void
351 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
352 {
353 	struct ixgbe_hw *hw;
354 	uint32_t ack;
355 	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
356 
357 	hw = &adapter->hw;
358 
359 	ixgbe_process_vf_reset(adapter, vf);
360 
361 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
362 		ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
363 		    vf->pool, TRUE);
364 		ack = IXGBE_VT_MSGTYPE_ACK;
365 	} else
366 		ack = IXGBE_VT_MSGTYPE_NACK;
367 
368 	ixgbe_vf_enable_transmit(adapter, vf);
369 	ixgbe_vf_enable_receive(adapter, vf);
370 
371 	vf->flags |= IXGBE_VF_CTS;
372 
373 	resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
374 	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
375 	resp[3] = hw->mac.mc_filter_type;
376 	hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
377 } /* ixgbe_vf_reset_msg */
378 
379 
380 static void
381 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
382 {
383 	uint8_t *mac;
384 
385 	mac = (uint8_t*)&msg[1];
386 
387 	/* Check that the VF has permission to change the MAC address. */
388 	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
389 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
390 		return;
391 	}
392 
393 	if (ixgbe_validate_mac_addr(mac) != 0) {
394 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
395 		return;
396 	}
397 
398 	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
399 
400 	ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
401 	    TRUE);
402 
403 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
404 } /* ixgbe_vf_set_mac */
405 
406 
407 /*
408  * VF multicast addresses are set by using the appropriate bit in
409  * 1 of 128 32 bit addresses (4096 possible).
410  */
411 static void
412 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
413 {
414 	u16	*list = (u16*)&msg[1];
415 	int	entries;
416 	u32	vmolr, vec_bit, vec_reg, mta_reg;
417 
418 	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
419 	entries = min(entries, IXGBE_MAX_VF_MC);
420 
421 	vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
422 
423 	vf->num_mc_hashes = entries;
424 
425 	/* Set the appropriate MTA bit */
426 	for (int i = 0; i < entries; i++) {
427 		vf->mc_hash[i] = list[i];
428 		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
429 		vec_bit = vf->mc_hash[i] & 0x1F;
430 		mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
431 		mta_reg |= (1 << vec_bit);
432 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
433 	}
434 
435 	vmolr |= IXGBE_VMOLR_ROMPE;
436 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
437 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
438 } /* ixgbe_vf_set_mc_addr */
439 
440 
441 static void
442 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
443 {
444 	struct ixgbe_hw *hw;
445 	int enable;
446 	uint16_t tag;
447 
448 	hw = &adapter->hw;
449 	enable = IXGBE_VT_MSGINFO(msg[0]);
450 	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
451 
452 	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
453 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
454 		return;
455 	}
456 
457 	/* It is illegal to enable vlan tag 0. */
458 	if (tag == 0 && enable != 0) {
459 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
460 		return;
461 	}
462 
463 	ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
464 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
465 } /* ixgbe_vf_set_vlan */
466 
467 
468 static void
469 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
470 {
471 	struct ixgbe_hw *hw;
472 	uint32_t vf_max_size, pf_max_size, mhadd;
473 
474 	hw = &adapter->hw;
475 	vf_max_size = msg[1];
476 
477 	if (vf_max_size < ETHER_CRC_LEN) {
478 		/* We intentionally ACK invalid LPE requests. */
479 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
480 		return;
481 	}
482 
483 	vf_max_size -= ETHER_CRC_LEN;
484 
485 	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
486 		/* We intentionally ACK invalid LPE requests. */
487 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
488 		return;
489 	}
490 
491 	vf->maximum_frame_size = vf_max_size;
492 	ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
493 
494 	/*
495 	 * We might have to disable reception to this VF if the frame size is
496 	 * not compatible with the config on the PF.
497 	 */
498 	ixgbe_vf_enable_receive(adapter, vf);
499 
500 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
501 	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
502 
503 	if (pf_max_size < adapter->max_frame_size) {
504 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
505 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
506 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
507 	}
508 
509 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
510 } /* ixgbe_vf_set_lpe */
511 
512 
513 static void
514 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
515                      uint32_t *msg)
516 {
517 	//XXX implement this
518 	ixgbe_send_vf_nack(adapter, vf, msg[0]);
519 } /* ixgbe_vf_set_macvlan */
520 
521 
522 static void
523 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
524     uint32_t *msg)
525 {
526 
527 	switch (msg[1]) {
528 	case IXGBE_API_VER_1_0:
529 	case IXGBE_API_VER_1_1:
530 		vf->api_ver = msg[1];
531 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
532 		break;
533 	default:
534 		vf->api_ver = IXGBE_API_VER_UNKNOWN;
535 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
536 		break;
537 	}
538 } /* ixgbe_vf_api_negotiate */
539 
540 
541 static void
542 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
543 {
544 	struct ixgbe_hw *hw;
545 	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
546 	int num_queues;
547 
548 	hw = &adapter->hw;
549 
550 	/* GET_QUEUES is not supported on pre-1.1 APIs. */
551 	switch (msg[0]) {
552 	case IXGBE_API_VER_1_0:
553 	case IXGBE_API_VER_UNKNOWN:
554 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
555 		return;
556 	}
557 
558 	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
559 	    IXGBE_VT_MSGTYPE_CTS;
560 
561 	num_queues = ixgbe_vf_queues(adapter->iov_mode);
562 	resp[IXGBE_VF_TX_QUEUES] = num_queues;
563 	resp[IXGBE_VF_RX_QUEUES] = num_queues;
564 	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
565 	resp[IXGBE_VF_DEF_QUEUE] = 0;
566 
567 	hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
568 } /* ixgbe_vf_get_queues */
569 
570 
571 static void
572 ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
573 {
574 	struct adapter  *adapter = iflib_get_softc(ctx);
575 #ifdef KTR
576 	struct ifnet	*ifp = iflib_get_ifp(ctx);
577 #endif
578 	struct ixgbe_hw *hw;
579 	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
580 	int error;
581 
582 	hw = &adapter->hw;
583 
584 	error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
585 
586 	if (error != 0)
587 		return;
588 
589 	CTR3(KTR_MALLOC, "%s: received msg %x from %d", ifp->if_xname,
590 	    msg[0], vf->pool);
591 	if (msg[0] == IXGBE_VF_RESET) {
592 		ixgbe_vf_reset_msg(adapter, vf, msg);
593 		return;
594 	}
595 
596 	if (!(vf->flags & IXGBE_VF_CTS)) {
597 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
598 		return;
599 	}
600 
601 	switch (msg[0] & IXGBE_VT_MSG_MASK) {
602 	case IXGBE_VF_SET_MAC_ADDR:
603 		ixgbe_vf_set_mac(adapter, vf, msg);
604 		break;
605 	case IXGBE_VF_SET_MULTICAST:
606 		ixgbe_vf_set_mc_addr(adapter, vf, msg);
607 		break;
608 	case IXGBE_VF_SET_VLAN:
609 		ixgbe_vf_set_vlan(adapter, vf, msg);
610 		break;
611 	case IXGBE_VF_SET_LPE:
612 		ixgbe_vf_set_lpe(adapter, vf, msg);
613 		break;
614 	case IXGBE_VF_SET_MACVLAN:
615 		ixgbe_vf_set_macvlan(adapter, vf, msg);
616 		break;
617 	case IXGBE_VF_API_NEGOTIATE:
618 		ixgbe_vf_api_negotiate(adapter, vf, msg);
619 		break;
620 	case IXGBE_VF_GET_QUEUES:
621 		ixgbe_vf_get_queues(adapter, vf, msg);
622 		break;
623 	default:
624 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
625 	}
626 } /* ixgbe_process_vf_msg */
627 
628 
629 /* Tasklet for handling VF -> PF mailbox messages */
630 void
631 ixgbe_handle_mbx(void *context)
632 {
633 	if_ctx_t        ctx = context;
634 	struct adapter  *adapter = iflib_get_softc(ctx);
635 	struct ixgbe_hw *hw;
636 	struct ixgbe_vf *vf;
637 	int i;
638 
639 	hw = &adapter->hw;
640 
641 	for (i = 0; i < adapter->num_vfs; i++) {
642 		vf = &adapter->vfs[i];
643 
644 		if (vf->flags & IXGBE_VF_ACTIVE) {
645 			if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
646 				ixgbe_process_vf_reset(adapter, vf);
647 
648 			if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
649 				ixgbe_process_vf_msg(ctx, vf);
650 
651 			if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
652 				ixgbe_process_vf_ack(adapter, vf);
653 		}
654 	}
655 } /* ixgbe_handle_mbx */
656 
657 int
658 ixgbe_if_iov_init(if_ctx_t ctx, u16 num_vfs, const nvlist_t *config)
659 {
660 	struct adapter *adapter;
661 	int retval = 0;
662 
663 	adapter = iflib_get_softc(ctx);
664 	adapter->iov_mode = IXGBE_NO_VM;
665 
666 	if (num_vfs == 0) {
667 		/* Would we ever get num_vfs = 0? */
668 		retval = EINVAL;
669 		goto err_init_iov;
670 	}
671 
672 	/*
673 	 * We've got to reserve a VM's worth of queues for the PF,
674 	 * thus we go into "64 VF mode" if 32+ VFs are requested.
675 	 * With 64 VFs, you can only have two queues per VF.
676 	 * With 32 VFs, you can have up to four queues per VF.
677 	 */
678 	if (num_vfs >= IXGBE_32_VM)
679 		adapter->iov_mode = IXGBE_64_VM;
680 	else
681 		adapter->iov_mode = IXGBE_32_VM;
682 
683 	/* Again, reserving 1 VM's worth of queues for the PF */
684 	adapter->pool = adapter->iov_mode - 1;
685 
686 	if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
687 		retval = ENOSPC;
688 		goto err_init_iov;
689 	}
690 
691 	adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
692 	    M_NOWAIT | M_ZERO);
693 
694 	if (adapter->vfs == NULL) {
695 		retval = ENOMEM;
696 		goto err_init_iov;
697 	}
698 
699 	adapter->num_vfs = num_vfs;
700 	ixgbe_if_init(adapter->ctx);
701 	adapter->feat_en |= IXGBE_FEATURE_SRIOV;
702 
703 	return (retval);
704 
705 err_init_iov:
706 	adapter->num_vfs = 0;
707 	adapter->pool = 0;
708 	adapter->iov_mode = IXGBE_NO_VM;
709 
710 	return (retval);
711 } /* ixgbe_if_iov_init */
712 
713 void
714 ixgbe_if_iov_uninit(if_ctx_t ctx)
715 {
716 	struct ixgbe_hw *hw;
717 	struct adapter *adapter;
718 	uint32_t pf_reg, vf_reg;
719 
720 	adapter = iflib_get_softc(ctx);
721 	hw = &adapter->hw;
722 
723 	/* Enable rx/tx for the PF and disable it for all VFs. */
724 	pf_reg = IXGBE_VF_INDEX(adapter->pool);
725 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
726 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
727 
728 	if (pf_reg == 0)
729 		vf_reg = 1;
730 	else
731 		vf_reg = 0;
732 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
733 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
734 
735 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
736 
737 	free(adapter->vfs, M_IXGBE_SRIOV);
738 	adapter->vfs = NULL;
739 	adapter->num_vfs = 0;
740 	adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
741 } /* ixgbe_if_iov_uninit */
742 
743 static void
744 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
745 {
746 	struct ixgbe_hw *hw;
747 	uint32_t vf_index, pfmbimr;
748 
749 	hw = &adapter->hw;
750 
751 	if (!(vf->flags & IXGBE_VF_ACTIVE))
752 		return;
753 
754 	vf_index = IXGBE_VF_INDEX(vf->pool);
755 	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
756 	pfmbimr |= IXGBE_VF_BIT(vf->pool);
757 	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
758 
759 	ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
760 
761 	// XXX multicast addresses
762 
763 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
764 		ixgbe_set_rar(&adapter->hw, vf->rar_index,
765 		    vf->ether_addr, vf->pool, TRUE);
766 	}
767 
768 	ixgbe_vf_enable_transmit(adapter, vf);
769 	ixgbe_vf_enable_receive(adapter, vf);
770 
771 	ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
772 } /* ixgbe_init_vf */
773 
774 void
775 ixgbe_initialize_iov(struct adapter *adapter)
776 {
777 	struct ixgbe_hw *hw = &adapter->hw;
778 	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
779 	int i;
780 
781 	if (adapter->iov_mode == IXGBE_NO_VM)
782 		return;
783 
784 	/* RMW appropriate registers based on IOV mode */
785 	/* Read... */
786 	mrqc    = IXGBE_READ_REG(hw, IXGBE_MRQC);
787 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
788 	gpie    = IXGBE_READ_REG(hw, IXGBE_GPIE);
789 	/* Modify... */
790 	mrqc    &= ~IXGBE_MRQC_MRQE_MASK;
791 	mtqc     =  IXGBE_MTQC_VT_ENA;      /* No initial MTQC read needed */
792 	gcr_ext |=  IXGBE_GCR_EXT_MSIX_EN;
793 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
794 	gpie    &= ~IXGBE_GPIE_VTMODE_MASK;
795 	switch (adapter->iov_mode) {
796 	case IXGBE_64_VM:
797 		mrqc    |= IXGBE_MRQC_VMDQRSS64EN;
798 		mtqc    |= IXGBE_MTQC_64VF;
799 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
800 		gpie    |= IXGBE_GPIE_VTMODE_64;
801 		break;
802 	case IXGBE_32_VM:
803 		mrqc    |= IXGBE_MRQC_VMDQRSS32EN;
804 		mtqc    |= IXGBE_MTQC_32VF;
805 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
806 		gpie    |= IXGBE_GPIE_VTMODE_32;
807 		break;
808 	default:
809 		panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
810 	}
811 	/* Write... */
812 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
813 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
814 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
815 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
816 
817 	/* Enable rx/tx for the PF. */
818 	vf_reg = IXGBE_VF_INDEX(adapter->pool);
819 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
820 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
821 
822 	/* Allow VM-to-VM communication. */
823 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
824 
825 	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
826 	vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
827 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
828 
829 	for (i = 0; i < adapter->num_vfs; i++)
830 		ixgbe_init_vf(adapter, &adapter->vfs[i]);
831 } /* ixgbe_initialize_iov */
832 
833 
834 /* Check the max frame setting of all active VF's */
835 void
836 ixgbe_recalculate_max_frame(struct adapter *adapter)
837 {
838 	struct ixgbe_vf *vf;
839 
840 	for (int i = 0; i < adapter->num_vfs; i++) {
841 		vf = &adapter->vfs[i];
842 		if (vf->flags & IXGBE_VF_ACTIVE)
843 			ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
844 	}
845 } /* ixgbe_recalculate_max_frame */
846 
847 int
848 ixgbe_if_iov_vf_add(if_ctx_t ctx, u16 vfnum, const nvlist_t *config)
849 {
850 	struct adapter *adapter;
851 	struct ixgbe_vf *vf;
852 	const void *mac;
853 
854 	adapter = iflib_get_softc(ctx);
855 
856 	KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
857 	    vfnum, adapter->num_vfs));
858 
859 	vf = &adapter->vfs[vfnum];
860 	vf->pool= vfnum;
861 
862 	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
863 	vf->rar_index = vfnum + 1;
864 	vf->default_vlan = 0;
865 	vf->maximum_frame_size = ETHER_MAX_LEN;
866 	ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
867 
868 	if (nvlist_exists_binary(config, "mac-addr")) {
869 		mac = nvlist_get_binary(config, "mac-addr", NULL);
870 		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
871 		if (nvlist_get_bool(config, "allow-set-mac"))
872 			vf->flags |= IXGBE_VF_CAP_MAC;
873 	} else
874 		/*
875 		 * If the administrator has not specified a MAC address then
876 		 * we must allow the VF to choose one.
877 		 */
878 		vf->flags |= IXGBE_VF_CAP_MAC;
879 
880 	vf->flags |= IXGBE_VF_ACTIVE;
881 
882 	ixgbe_init_vf(adapter, vf);
883 
884 	return (0);
885 } /* ixgbe_if_iov_vf_add */
886 
887 #else
888 
889 void
890 ixgbe_handle_mbx(void *context)
891 {
892 	UNREFERENCED_PARAMETER(context);
893 } /* ixgbe_handle_mbx */
894 
895 #endif
896