xref: /freebsd/sys/dev/ixgbe/if_sriov.c (revision 59c8e88e72633afbc47a4ace0d2170d00d51f7dc)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 
34 #include "ixgbe.h"
35 #include "ixgbe_sriov.h"
36 
37 #ifdef PCI_IOV
38 
39 #include <sys/ktr.h>
40 
41 MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
42 
43 /************************************************************************
44  * ixgbe_pci_iov_detach
45  ************************************************************************/
46 int
47 ixgbe_pci_iov_detach(device_t dev)
48 {
49 	return pci_iov_detach(dev);
50 }
51 
52 /************************************************************************
53  * ixgbe_define_iov_schemas
54  ************************************************************************/
55 void
56 ixgbe_define_iov_schemas(device_t dev, int *error)
57 {
58 	nvlist_t *pf_schema, *vf_schema;
59 
60 	pf_schema = pci_iov_schema_alloc_node();
61 	vf_schema = pci_iov_schema_alloc_node();
62 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
63 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
64 	    IOV_SCHEMA_HASDEFAULT, true);
65 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
66 	    IOV_SCHEMA_HASDEFAULT, false);
67 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
68 	    IOV_SCHEMA_HASDEFAULT, false);
69 	*error = pci_iov_attach(dev, pf_schema, vf_schema);
70 	if (*error != 0) {
71 		device_printf(dev,
72 		    "Error %d setting up SR-IOV\n", *error);
73 	}
74 } /* ixgbe_define_iov_schemas */
75 
76 /************************************************************************
77  * ixgbe_align_all_queue_indices
78  ************************************************************************/
79 inline void
80 ixgbe_align_all_queue_indices(struct ixgbe_softc *sc)
81 {
82 	int i;
83 	int index;
84 
85 	for (i = 0; i < sc->num_rx_queues; i++) {
86 		index = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
87 		sc->rx_queues[i].rxr.me = index;
88 	}
89 
90 	for (i = 0; i < sc->num_tx_queues; i++) {
91 		index = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
92 		sc->tx_queues[i].txr.me = index;
93 	}
94 }
95 
96 /* Support functions for SR-IOV/VF management */
97 static inline void
98 ixgbe_send_vf_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
99 {
100 	if (vf->flags & IXGBE_VF_CTS)
101 		msg |= IXGBE_VT_MSGTYPE_CTS;
102 
103 	sc->hw.mbx.ops.write(&sc->hw, &msg, 1, vf->pool);
104 }
105 
106 static inline void
107 ixgbe_send_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
108 {
109 	msg &= IXGBE_VT_MSG_MASK;
110 	ixgbe_send_vf_msg(sc, vf, msg | IXGBE_VT_MSGTYPE_ACK);
111 }
112 
113 static inline void
114 ixgbe_send_vf_nack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
115 {
116 	msg &= IXGBE_VT_MSG_MASK;
117 	ixgbe_send_vf_msg(sc, vf, msg | IXGBE_VT_MSGTYPE_NACK);
118 }
119 
120 static inline void
121 ixgbe_process_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
122 {
123 	if (!(vf->flags & IXGBE_VF_CTS))
124 		ixgbe_send_vf_nack(sc, vf, 0);
125 }
126 
127 static inline boolean_t
128 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
129 {
130 	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
131 }
132 
133 static inline int
134 ixgbe_vf_queues(int mode)
135 {
136 	switch (mode) {
137 	case IXGBE_64_VM:
138 		return (2);
139 	case IXGBE_32_VM:
140 		return (4);
141 	case IXGBE_NO_VM:
142 	default:
143 		return (0);
144 	}
145 }
146 
147 inline int
148 ixgbe_vf_que_index(int mode, int vfnum, int num)
149 {
150 	return ((vfnum * ixgbe_vf_queues(mode)) + num);
151 }
152 
153 static inline void
154 ixgbe_update_max_frame(struct ixgbe_softc * sc, int max_frame)
155 {
156 	if (sc->max_frame_size < max_frame)
157 		sc->max_frame_size = max_frame;
158 }
159 
160 inline u32
161 ixgbe_get_mrqc(int iov_mode)
162 {
163 	u32 mrqc;
164 
165 	switch (iov_mode) {
166 	case IXGBE_64_VM:
167 		mrqc = IXGBE_MRQC_VMDQRSS64EN;
168 		break;
169 	case IXGBE_32_VM:
170 		mrqc = IXGBE_MRQC_VMDQRSS32EN;
171 		break;
172 	case IXGBE_NO_VM:
173 		mrqc = 0;
174 		break;
175 	default:
176 		panic("Unexpected SR-IOV mode %d", iov_mode);
177 	}
178 
179 	return mrqc;
180 }
181 
182 
183 inline u32
184 ixgbe_get_mtqc(int iov_mode)
185 {
186 	uint32_t mtqc;
187 
188 	switch (iov_mode) {
189 	case IXGBE_64_VM:
190 		mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
191 		break;
192 	case IXGBE_32_VM:
193 		mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
194 		break;
195 	case IXGBE_NO_VM:
196 		mtqc = IXGBE_MTQC_64Q_1PB;
197 		break;
198 	default:
199 		panic("Unexpected SR-IOV mode %d", iov_mode);
200 	}
201 
202 	return mtqc;
203 }
204 
205 void
206 ixgbe_ping_all_vfs(struct ixgbe_softc *sc)
207 {
208 	struct ixgbe_vf *vf;
209 
210 	for (int i = 0; i < sc->num_vfs; i++) {
211 		vf = &sc->vfs[i];
212 		if (vf->flags & IXGBE_VF_ACTIVE)
213 			ixgbe_send_vf_msg(sc, vf, IXGBE_PF_CONTROL_MSG);
214 	}
215 } /* ixgbe_ping_all_vfs */
216 
217 
218 static void
219 ixgbe_vf_set_default_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
220                           uint16_t tag)
221 {
222 	struct ixgbe_hw *hw;
223 	uint32_t vmolr, vmvir;
224 
225 	hw = &sc->hw;
226 
227 	vf->vlan_tag = tag;
228 
229 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
230 
231 	/* Do not receive packets that pass inexact filters. */
232 	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
233 
234 	/* Disable Multicast Promicuous Mode. */
235 	vmolr &= ~IXGBE_VMOLR_MPE;
236 
237 	/* Accept broadcasts. */
238 	vmolr |= IXGBE_VMOLR_BAM;
239 
240 	if (tag == 0) {
241 		/* Accept non-vlan tagged traffic. */
242 		vmolr |= IXGBE_VMOLR_AUPE;
243 
244 		/* Allow VM to tag outgoing traffic; no default tag. */
245 		vmvir = 0;
246 	} else {
247 		/* Require vlan-tagged traffic. */
248 		vmolr &= ~IXGBE_VMOLR_AUPE;
249 
250 		/* Tag all traffic with provided vlan tag. */
251 		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
252 	}
253 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
254 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
255 } /* ixgbe_vf_set_default_vlan */
256 
257 
258 static boolean_t
259 ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
260 {
261 
262 	/*
263 	 * Frame size compatibility between PF and VF is only a problem on
264 	 * 82599-based cards.  X540 and later support any combination of jumbo
265 	 * frames on PFs and VFs.
266 	 */
267 	if (sc->hw.mac.type != ixgbe_mac_82599EB)
268 		return (true);
269 
270 	switch (vf->api_ver) {
271 	case IXGBE_API_VER_1_0:
272 	case IXGBE_API_VER_UNKNOWN:
273 		/*
274 		 * On legacy (1.0 and older) VF versions, we don't support jumbo
275 		 * frames on either the PF or the VF.
276 		 */
277 		if (sc->max_frame_size > ETHER_MAX_LEN ||
278 		    vf->maximum_frame_size > ETHER_MAX_LEN)
279 			return (false);
280 
281 		return (true);
282 
283 		break;
284 	case IXGBE_API_VER_1_1:
285 	default:
286 		/*
287 		 * 1.1 or later VF versions always work if they aren't using
288 		 * jumbo frames.
289 		 */
290 		if (vf->maximum_frame_size <= ETHER_MAX_LEN)
291 			return (true);
292 
293 		/*
294 		 * Jumbo frames only work with VFs if the PF is also using jumbo
295 		 * frames.
296 		 */
297 		if (sc->max_frame_size <= ETHER_MAX_LEN)
298 			return (true);
299 
300 		return (false);
301 	}
302 } /* ixgbe_vf_frame_size_compatible */
303 
304 
305 static void
306 ixgbe_process_vf_reset(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
307 {
308 	ixgbe_vf_set_default_vlan(sc, vf, vf->default_vlan);
309 
310 	// XXX clear multicast addresses
311 
312 	ixgbe_clear_rar(&sc->hw, vf->rar_index);
313 
314 	vf->api_ver = IXGBE_API_VER_UNKNOWN;
315 } /* ixgbe_process_vf_reset */
316 
317 
318 static void
319 ixgbe_vf_enable_transmit(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
320 {
321 	struct ixgbe_hw *hw;
322 	uint32_t vf_index, vfte;
323 
324 	hw = &sc->hw;
325 
326 	vf_index = IXGBE_VF_INDEX(vf->pool);
327 	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
328 	vfte |= IXGBE_VF_BIT(vf->pool);
329 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
330 } /* ixgbe_vf_enable_transmit */
331 
332 
333 static void
334 ixgbe_vf_enable_receive(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
335 {
336 	struct ixgbe_hw *hw;
337 	uint32_t vf_index, vfre;
338 
339 	hw = &sc->hw;
340 
341 	vf_index = IXGBE_VF_INDEX(vf->pool);
342 	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
343 	if (ixgbe_vf_frame_size_compatible(sc, vf))
344 		vfre |= IXGBE_VF_BIT(vf->pool);
345 	else
346 		vfre &= ~IXGBE_VF_BIT(vf->pool);
347 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
348 } /* ixgbe_vf_enable_receive */
349 
350 
351 static void
352 ixgbe_vf_reset_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
353 {
354 	struct ixgbe_hw *hw;
355 	uint32_t ack;
356 	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
357 
358 	hw = &sc->hw;
359 
360 	ixgbe_process_vf_reset(sc, vf);
361 
362 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
363 		ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr,
364 		    vf->pool, true);
365 		ack = IXGBE_VT_MSGTYPE_ACK;
366 	} else
367 		ack = IXGBE_VT_MSGTYPE_NACK;
368 
369 	ixgbe_vf_enable_transmit(sc, vf);
370 	ixgbe_vf_enable_receive(sc, vf);
371 
372 	vf->flags |= IXGBE_VF_CTS;
373 
374 	resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
375 	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
376 	resp[3] = hw->mac.mc_filter_type;
377 	hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
378 } /* ixgbe_vf_reset_msg */
379 
380 
381 static void
382 ixgbe_vf_set_mac(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
383 {
384 	uint8_t *mac;
385 
386 	mac = (uint8_t*)&msg[1];
387 
388 	/* Check that the VF has permission to change the MAC address. */
389 	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
390 		ixgbe_send_vf_nack(sc, vf, msg[0]);
391 		return;
392 	}
393 
394 	if (ixgbe_validate_mac_addr(mac) != 0) {
395 		ixgbe_send_vf_nack(sc, vf, msg[0]);
396 		return;
397 	}
398 
399 	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
400 
401 	ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr, vf->pool,
402 	    true);
403 
404 	ixgbe_send_vf_ack(sc, vf, msg[0]);
405 } /* ixgbe_vf_set_mac */
406 
407 
408 /*
409  * VF multicast addresses are set by using the appropriate bit in
410  * 1 of 128 32 bit addresses (4096 possible).
411  */
412 static void
413 ixgbe_vf_set_mc_addr(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 *msg)
414 {
415 	u16	*list = (u16*)&msg[1];
416 	int	entries;
417 	u32	vmolr, vec_bit, vec_reg, mta_reg;
418 
419 	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
420 	entries = min(entries, IXGBE_MAX_VF_MC);
421 
422 	vmolr = IXGBE_READ_REG(&sc->hw, IXGBE_VMOLR(vf->pool));
423 
424 	vf->num_mc_hashes = entries;
425 
426 	/* Set the appropriate MTA bit */
427 	for (int i = 0; i < entries; i++) {
428 		vf->mc_hash[i] = list[i];
429 		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
430 		vec_bit = vf->mc_hash[i] & 0x1F;
431 		mta_reg = IXGBE_READ_REG(&sc->hw, IXGBE_MTA(vec_reg));
432 		mta_reg |= (1 << vec_bit);
433 		IXGBE_WRITE_REG(&sc->hw, IXGBE_MTA(vec_reg), mta_reg);
434 	}
435 
436 	vmolr |= IXGBE_VMOLR_ROMPE;
437 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VMOLR(vf->pool), vmolr);
438 	ixgbe_send_vf_ack(sc, vf, msg[0]);
439 } /* ixgbe_vf_set_mc_addr */
440 
441 
442 static void
443 ixgbe_vf_set_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
444 {
445 	struct ixgbe_hw *hw;
446 	int enable;
447 	uint16_t tag;
448 
449 	hw = &sc->hw;
450 	enable = IXGBE_VT_MSGINFO(msg[0]);
451 	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
452 
453 	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
454 		ixgbe_send_vf_nack(sc, vf, msg[0]);
455 		return;
456 	}
457 
458 	/* It is illegal to enable vlan tag 0. */
459 	if (tag == 0 && enable != 0) {
460 		ixgbe_send_vf_nack(sc, vf, msg[0]);
461 		return;
462 	}
463 
464 	ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
465 	ixgbe_send_vf_ack(sc, vf, msg[0]);
466 } /* ixgbe_vf_set_vlan */
467 
468 
469 static void
470 ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
471 {
472 	struct ixgbe_hw *hw;
473 	uint32_t vf_max_size, pf_max_size, mhadd;
474 
475 	hw = &sc->hw;
476 	vf_max_size = msg[1];
477 
478 	if (vf_max_size < ETHER_CRC_LEN) {
479 		/* We intentionally ACK invalid LPE requests. */
480 		ixgbe_send_vf_ack(sc, vf, msg[0]);
481 		return;
482 	}
483 
484 	vf_max_size -= ETHER_CRC_LEN;
485 
486 	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
487 		/* We intentionally ACK invalid LPE requests. */
488 		ixgbe_send_vf_ack(sc, vf, msg[0]);
489 		return;
490 	}
491 
492 	vf->maximum_frame_size = vf_max_size;
493 	ixgbe_update_max_frame(sc, vf->maximum_frame_size);
494 
495 	/*
496 	 * We might have to disable reception to this VF if the frame size is
497 	 * not compatible with the config on the PF.
498 	 */
499 	ixgbe_vf_enable_receive(sc, vf);
500 
501 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
502 	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
503 
504 	if (pf_max_size < sc->max_frame_size) {
505 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
506 		mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
507 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
508 	}
509 
510 	ixgbe_send_vf_ack(sc, vf, msg[0]);
511 } /* ixgbe_vf_set_lpe */
512 
513 
514 static void
515 ixgbe_vf_set_macvlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
516                      uint32_t *msg)
517 {
518 	//XXX implement this
519 	ixgbe_send_vf_nack(sc, vf, msg[0]);
520 } /* ixgbe_vf_set_macvlan */
521 
522 
523 static void
524 ixgbe_vf_api_negotiate(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
525     uint32_t *msg)
526 {
527 
528 	switch (msg[1]) {
529 	case IXGBE_API_VER_1_0:
530 	case IXGBE_API_VER_1_1:
531 		vf->api_ver = msg[1];
532 		ixgbe_send_vf_ack(sc, vf, msg[0]);
533 		break;
534 	default:
535 		vf->api_ver = IXGBE_API_VER_UNKNOWN;
536 		ixgbe_send_vf_nack(sc, vf, msg[0]);
537 		break;
538 	}
539 } /* ixgbe_vf_api_negotiate */
540 
541 
542 static void
543 ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
544 {
545 	struct ixgbe_hw *hw;
546 	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
547 	int num_queues;
548 
549 	hw = &sc->hw;
550 
551 	/* GET_QUEUES is not supported on pre-1.1 APIs. */
552 	switch (msg[0]) {
553 	case IXGBE_API_VER_1_0:
554 	case IXGBE_API_VER_UNKNOWN:
555 		ixgbe_send_vf_nack(sc, vf, msg[0]);
556 		return;
557 	}
558 
559 	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
560 	    IXGBE_VT_MSGTYPE_CTS;
561 
562 	num_queues = ixgbe_vf_queues(sc->iov_mode);
563 	resp[IXGBE_VF_TX_QUEUES] = num_queues;
564 	resp[IXGBE_VF_RX_QUEUES] = num_queues;
565 	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
566 	resp[IXGBE_VF_DEF_QUEUE] = 0;
567 
568 	hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
569 } /* ixgbe_vf_get_queues */
570 
571 
572 static void
573 ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
574 {
575 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
576 #ifdef KTR
577 	if_t		ifp = iflib_get_ifp(ctx);
578 #endif
579 	struct ixgbe_hw *hw;
580 	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
581 	int error;
582 
583 	hw = &sc->hw;
584 
585 	error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
586 
587 	if (error != 0)
588 		return;
589 
590 	CTR3(KTR_MALLOC, "%s: received msg %x from %d", if_name(ifp),
591 	    msg[0], vf->pool);
592 	if (msg[0] == IXGBE_VF_RESET) {
593 		ixgbe_vf_reset_msg(sc, vf, msg);
594 		return;
595 	}
596 
597 	if (!(vf->flags & IXGBE_VF_CTS)) {
598 		ixgbe_send_vf_nack(sc, vf, msg[0]);
599 		return;
600 	}
601 
602 	switch (msg[0] & IXGBE_VT_MSG_MASK) {
603 	case IXGBE_VF_SET_MAC_ADDR:
604 		ixgbe_vf_set_mac(sc, vf, msg);
605 		break;
606 	case IXGBE_VF_SET_MULTICAST:
607 		ixgbe_vf_set_mc_addr(sc, vf, msg);
608 		break;
609 	case IXGBE_VF_SET_VLAN:
610 		ixgbe_vf_set_vlan(sc, vf, msg);
611 		break;
612 	case IXGBE_VF_SET_LPE:
613 		ixgbe_vf_set_lpe(sc, vf, msg);
614 		break;
615 	case IXGBE_VF_SET_MACVLAN:
616 		ixgbe_vf_set_macvlan(sc, vf, msg);
617 		break;
618 	case IXGBE_VF_API_NEGOTIATE:
619 		ixgbe_vf_api_negotiate(sc, vf, msg);
620 		break;
621 	case IXGBE_VF_GET_QUEUES:
622 		ixgbe_vf_get_queues(sc, vf, msg);
623 		break;
624 	default:
625 		ixgbe_send_vf_nack(sc, vf, msg[0]);
626 	}
627 } /* ixgbe_process_vf_msg */
628 
629 
630 /* Tasklet for handling VF -> PF mailbox messages */
631 void
632 ixgbe_handle_mbx(void *context)
633 {
634 	if_ctx_t        ctx = context;
635 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
636 	struct ixgbe_hw *hw;
637 	struct ixgbe_vf *vf;
638 	int i;
639 
640 	hw = &sc->hw;
641 
642 	for (i = 0; i < sc->num_vfs; i++) {
643 		vf = &sc->vfs[i];
644 
645 		if (vf->flags & IXGBE_VF_ACTIVE) {
646 			if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
647 				ixgbe_process_vf_reset(sc, vf);
648 
649 			if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
650 				ixgbe_process_vf_msg(ctx, vf);
651 
652 			if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
653 				ixgbe_process_vf_ack(sc, vf);
654 		}
655 	}
656 } /* ixgbe_handle_mbx */
657 
658 int
659 ixgbe_if_iov_init(if_ctx_t ctx, u16 num_vfs, const nvlist_t *config)
660 {
661 	struct ixgbe_softc *sc;
662 	int retval = 0;
663 
664 	sc = iflib_get_softc(ctx);
665 	sc->iov_mode = IXGBE_NO_VM;
666 
667 	if (num_vfs == 0) {
668 		/* Would we ever get num_vfs = 0? */
669 		retval = EINVAL;
670 		goto err_init_iov;
671 	}
672 
673 	/*
674 	 * We've got to reserve a VM's worth of queues for the PF,
675 	 * thus we go into "64 VF mode" if 32+ VFs are requested.
676 	 * With 64 VFs, you can only have two queues per VF.
677 	 * With 32 VFs, you can have up to four queues per VF.
678 	 */
679 	if (num_vfs >= IXGBE_32_VM)
680 		sc->iov_mode = IXGBE_64_VM;
681 	else
682 		sc->iov_mode = IXGBE_32_VM;
683 
684 	/* Again, reserving 1 VM's worth of queues for the PF */
685 	sc->pool = sc->iov_mode - 1;
686 
687 	if ((num_vfs > sc->pool) || (num_vfs >= IXGBE_64_VM)) {
688 		retval = ENOSPC;
689 		goto err_init_iov;
690 	}
691 
692 	sc->vfs = malloc(sizeof(*sc->vfs) * num_vfs, M_IXGBE_SRIOV,
693 	    M_NOWAIT | M_ZERO);
694 
695 	if (sc->vfs == NULL) {
696 		retval = ENOMEM;
697 		goto err_init_iov;
698 	}
699 
700 	sc->num_vfs = num_vfs;
701 	ixgbe_if_init(sc->ctx);
702 	sc->feat_en |= IXGBE_FEATURE_SRIOV;
703 
704 	return (retval);
705 
706 err_init_iov:
707 	sc->num_vfs = 0;
708 	sc->pool = 0;
709 	sc->iov_mode = IXGBE_NO_VM;
710 
711 	return (retval);
712 } /* ixgbe_if_iov_init */
713 
714 void
715 ixgbe_if_iov_uninit(if_ctx_t ctx)
716 {
717 	struct ixgbe_hw *hw;
718 	struct ixgbe_softc *sc;
719 	uint32_t pf_reg, vf_reg;
720 
721 	sc = iflib_get_softc(ctx);
722 	hw = &sc->hw;
723 
724 	/* Enable rx/tx for the PF and disable it for all VFs. */
725 	pf_reg = IXGBE_VF_INDEX(sc->pool);
726 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(sc->pool));
727 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(sc->pool));
728 
729 	if (pf_reg == 0)
730 		vf_reg = 1;
731 	else
732 		vf_reg = 0;
733 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
734 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
735 
736 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
737 
738 	free(sc->vfs, M_IXGBE_SRIOV);
739 	sc->vfs = NULL;
740 	sc->num_vfs = 0;
741 	sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
742 } /* ixgbe_if_iov_uninit */
743 
744 static void
745 ixgbe_init_vf(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
746 {
747 	struct ixgbe_hw *hw;
748 	uint32_t vf_index, pfmbimr;
749 
750 	hw = &sc->hw;
751 
752 	if (!(vf->flags & IXGBE_VF_ACTIVE))
753 		return;
754 
755 	vf_index = IXGBE_VF_INDEX(vf->pool);
756 	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
757 	pfmbimr |= IXGBE_VF_BIT(vf->pool);
758 	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
759 
760 	ixgbe_vf_set_default_vlan(sc, vf, vf->vlan_tag);
761 
762 	// XXX multicast addresses
763 
764 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
765 		ixgbe_set_rar(&sc->hw, vf->rar_index,
766 		    vf->ether_addr, vf->pool, true);
767 	}
768 
769 	ixgbe_vf_enable_transmit(sc, vf);
770 	ixgbe_vf_enable_receive(sc, vf);
771 
772 	ixgbe_send_vf_msg(sc, vf, IXGBE_PF_CONTROL_MSG);
773 } /* ixgbe_init_vf */
774 
775 void
776 ixgbe_initialize_iov(struct ixgbe_softc *sc)
777 {
778 	struct ixgbe_hw *hw = &sc->hw;
779 	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
780 	int i;
781 
782 	if (sc->iov_mode == IXGBE_NO_VM)
783 		return;
784 
785 	/* RMW appropriate registers based on IOV mode */
786 	/* Read... */
787 	mrqc    = IXGBE_READ_REG(hw, IXGBE_MRQC);
788 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
789 	gpie    = IXGBE_READ_REG(hw, IXGBE_GPIE);
790 	/* Modify... */
791 	mrqc    &= ~IXGBE_MRQC_MRQE_MASK;
792 	mtqc     =  IXGBE_MTQC_VT_ENA;      /* No initial MTQC read needed */
793 	gcr_ext |=  IXGBE_GCR_EXT_MSIX_EN;
794 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
795 	gpie    &= ~IXGBE_GPIE_VTMODE_MASK;
796 	switch (sc->iov_mode) {
797 	case IXGBE_64_VM:
798 		mrqc    |= IXGBE_MRQC_VMDQRSS64EN;
799 		mtqc    |= IXGBE_MTQC_64VF;
800 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
801 		gpie    |= IXGBE_GPIE_VTMODE_64;
802 		break;
803 	case IXGBE_32_VM:
804 		mrqc    |= IXGBE_MRQC_VMDQRSS32EN;
805 		mtqc    |= IXGBE_MTQC_32VF;
806 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
807 		gpie    |= IXGBE_GPIE_VTMODE_32;
808 		break;
809 	default:
810 		panic("Unexpected SR-IOV mode %d", sc->iov_mode);
811 	}
812 	/* Write... */
813 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
814 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
815 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
816 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
817 
818 	/* Enable rx/tx for the PF. */
819 	vf_reg = IXGBE_VF_INDEX(sc->pool);
820 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(sc->pool));
821 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(sc->pool));
822 
823 	/* Allow VM-to-VM communication. */
824 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
825 
826 	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
827 	vt_ctl |= (sc->pool << IXGBE_VT_CTL_POOL_SHIFT);
828 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
829 
830 	for (i = 0; i < sc->num_vfs; i++)
831 		ixgbe_init_vf(sc, &sc->vfs[i]);
832 } /* ixgbe_initialize_iov */
833 
834 
835 /* Check the max frame setting of all active VF's */
836 void
837 ixgbe_recalculate_max_frame(struct ixgbe_softc *sc)
838 {
839 	struct ixgbe_vf *vf;
840 
841 	for (int i = 0; i < sc->num_vfs; i++) {
842 		vf = &sc->vfs[i];
843 		if (vf->flags & IXGBE_VF_ACTIVE)
844 			ixgbe_update_max_frame(sc, vf->maximum_frame_size);
845 	}
846 } /* ixgbe_recalculate_max_frame */
847 
848 int
849 ixgbe_if_iov_vf_add(if_ctx_t ctx, u16 vfnum, const nvlist_t *config)
850 {
851 	struct ixgbe_softc *sc;
852 	struct ixgbe_vf *vf;
853 	const void *mac;
854 
855 	sc = iflib_get_softc(ctx);
856 
857 	KASSERT(vfnum < sc->num_vfs, ("VF index %d is out of range %d",
858 	    vfnum, sc->num_vfs));
859 
860 	vf = &sc->vfs[vfnum];
861 	vf->pool= vfnum;
862 
863 	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
864 	vf->rar_index = vfnum + 1;
865 	vf->default_vlan = 0;
866 	vf->maximum_frame_size = ETHER_MAX_LEN;
867 	ixgbe_update_max_frame(sc, vf->maximum_frame_size);
868 
869 	if (nvlist_exists_binary(config, "mac-addr")) {
870 		mac = nvlist_get_binary(config, "mac-addr", NULL);
871 		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
872 		if (nvlist_get_bool(config, "allow-set-mac"))
873 			vf->flags |= IXGBE_VF_CAP_MAC;
874 	} else
875 		/*
876 		 * If the administrator has not specified a MAC address then
877 		 * we must allow the VF to choose one.
878 		 */
879 		vf->flags |= IXGBE_VF_CAP_MAC;
880 
881 	vf->flags |= IXGBE_VF_ACTIVE;
882 
883 	ixgbe_init_vf(sc, vf);
884 
885 	return (0);
886 } /* ixgbe_if_iov_vf_add */
887 
888 #else
889 
890 void
891 ixgbe_handle_mbx(void *context)
892 {
893 	UNREFERENCED_PARAMETER(context);
894 } /* ixgbe_handle_mbx */
895 
896 #endif
897