xref: /freebsd/sys/dev/ixgbe/if_sriov.c (revision 718519f4efc71096422fc71dab90b2a3369871ff)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 
34 #include "ixgbe.h"
35 #include "ixgbe_sriov.h"
36 
37 #ifdef PCI_IOV
38 
39 #include <sys/ktr.h>
40 
41 MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
42 
43 /************************************************************************
44  * ixgbe_pci_iov_detach
45  ************************************************************************/
46 int
47 ixgbe_pci_iov_detach(device_t dev)
48 {
49 	return pci_iov_detach(dev);
50 }
51 
52 /************************************************************************
53  * ixgbe_define_iov_schemas
54  ************************************************************************/
55 void
56 ixgbe_define_iov_schemas(device_t dev, int *error)
57 {
58 	nvlist_t *pf_schema, *vf_schema;
59 
60 	pf_schema = pci_iov_schema_alloc_node();
61 	vf_schema = pci_iov_schema_alloc_node();
62 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
63 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
64 	    IOV_SCHEMA_HASDEFAULT, true);
65 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
66 	    IOV_SCHEMA_HASDEFAULT, false);
67 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
68 	    IOV_SCHEMA_HASDEFAULT, false);
69 	*error = pci_iov_attach(dev, pf_schema, vf_schema);
70 	if (*error != 0) {
71 		device_printf(dev,
72 		    "Error %d setting up SR-IOV\n", *error);
73 	}
74 } /* ixgbe_define_iov_schemas */
75 
76 /************************************************************************
77  * ixgbe_align_all_queue_indices
78  ************************************************************************/
79 inline void
80 ixgbe_align_all_queue_indices(struct ixgbe_softc *sc)
81 {
82 	int i;
83 	int index;
84 
85 	for (i = 0; i < sc->num_rx_queues; i++) {
86 		index = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
87 		sc->rx_queues[i].rxr.me = index;
88 	}
89 
90 	for (i = 0; i < sc->num_tx_queues; i++) {
91 		index = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
92 		sc->tx_queues[i].txr.me = index;
93 	}
94 }
95 
96 /* Support functions for SR-IOV/VF management */
97 static inline void
98 ixgbe_send_vf_msg(struct ixgbe_hw *hw, struct ixgbe_vf *vf, u32 msg)
99 {
100 	if (vf->flags & IXGBE_VF_CTS)
101 		msg |= IXGBE_VT_MSGTYPE_CTS;
102 
103 	ixgbe_write_mbx(hw, &msg, 1, vf->pool);
104 }
105 
106 static inline void
107 ixgbe_send_vf_success(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
108 {
109 	msg &= IXGBE_VT_MSG_MASK;
110 	ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_SUCCESS);
111 }
112 
113 static inline void
114 ixgbe_send_vf_failure(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
115 {
116 	msg &= IXGBE_VT_MSG_MASK;
117 	ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_FAILURE);
118 }
119 
120 static inline void
121 ixgbe_process_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
122 {
123 	if (!(vf->flags & IXGBE_VF_CTS))
124 		ixgbe_send_vf_failure(sc, vf, 0);
125 }
126 
127 static inline boolean_t
128 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
129 {
130 	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
131 }
132 
133 static inline int
134 ixgbe_vf_queues(int mode)
135 {
136 	switch (mode) {
137 	case IXGBE_64_VM:
138 		return (2);
139 	case IXGBE_32_VM:
140 		return (4);
141 	case IXGBE_NO_VM:
142 	default:
143 		return (0);
144 	}
145 }
146 
147 inline int
148 ixgbe_vf_que_index(int mode, int vfnum, int num)
149 {
150 	return ((vfnum * ixgbe_vf_queues(mode)) + num);
151 }
152 
153 static inline void
154 ixgbe_update_max_frame(struct ixgbe_softc * sc, int max_frame)
155 {
156 	if (sc->max_frame_size < max_frame)
157 		sc->max_frame_size = max_frame;
158 }
159 
160 inline u32
161 ixgbe_get_mrqc(int iov_mode)
162 {
163 	u32 mrqc;
164 
165 	switch (iov_mode) {
166 	case IXGBE_64_VM:
167 		mrqc = IXGBE_MRQC_VMDQRSS64EN;
168 		break;
169 	case IXGBE_32_VM:
170 		mrqc = IXGBE_MRQC_VMDQRSS32EN;
171 		break;
172 	case IXGBE_NO_VM:
173 		mrqc = 0;
174 		break;
175 	default:
176 		panic("Unexpected SR-IOV mode %d", iov_mode);
177 	}
178 
179 	return mrqc;
180 }
181 
182 
183 inline u32
184 ixgbe_get_mtqc(int iov_mode)
185 {
186 	uint32_t mtqc;
187 
188 	switch (iov_mode) {
189 	case IXGBE_64_VM:
190 		mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
191 		break;
192 	case IXGBE_32_VM:
193 		mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
194 		break;
195 	case IXGBE_NO_VM:
196 		mtqc = IXGBE_MTQC_64Q_1PB;
197 		break;
198 	default:
199 		panic("Unexpected SR-IOV mode %d", iov_mode);
200 	}
201 
202 	return mtqc;
203 }
204 
205 void
206 ixgbe_ping_all_vfs(struct ixgbe_softc *sc)
207 {
208 	struct ixgbe_vf *vf;
209 
210 	for (int i = 0; i < sc->num_vfs; i++) {
211 		vf = &sc->vfs[i];
212 		if (vf->flags & IXGBE_VF_ACTIVE)
213 			ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
214 	}
215 } /* ixgbe_ping_all_vfs */
216 
217 
218 static void
219 ixgbe_vf_set_default_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
220     uint16_t tag)
221 {
222 	struct ixgbe_hw *hw;
223 	uint32_t vmolr, vmvir;
224 
225 	hw = &sc->hw;
226 
227 	vf->vlan_tag = tag;
228 
229 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
230 
231 	/* Do not receive packets that pass inexact filters. */
232 	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
233 
234 	/* Disable Multicast Promicuous Mode. */
235 	vmolr &= ~IXGBE_VMOLR_MPE;
236 
237 	/* Accept broadcasts. */
238 	vmolr |= IXGBE_VMOLR_BAM;
239 
240 	if (tag == 0) {
241 		/* Accept non-vlan tagged traffic. */
242 		vmolr |= IXGBE_VMOLR_AUPE;
243 
244 		/* Allow VM to tag outgoing traffic; no default tag. */
245 		vmvir = 0;
246 	} else {
247 		/* Require vlan-tagged traffic. */
248 		vmolr &= ~IXGBE_VMOLR_AUPE;
249 
250 		/* Tag all traffic with provided vlan tag. */
251 		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
252 	}
253 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
254 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
255 } /* ixgbe_vf_set_default_vlan */
256 
257 static void
258 ixgbe_clear_vfmbmem(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
259 {
260 	struct ixgbe_hw *hw = &sc->hw;
261 	uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
262 	uint16_t mbx_size = hw->mbx.size;
263 	uint16_t i;
264 
265 	for (i = 0; i < mbx_size; ++i)
266 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
267 } /* ixgbe_clear_vfmbmem */
268 
269 static boolean_t
270 ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
271 {
272 	/*
273 	 * Frame size compatibility between PF and VF is only a problem on
274 	 * 82599-based cards.  X540 and later support any combination of jumbo
275 	 * frames on PFs and VFs.
276 	 */
277 	if (sc->hw.mac.type != ixgbe_mac_82599EB)
278 		return (true);
279 
280 	switch (vf->api_ver) {
281 	case IXGBE_API_VER_1_0:
282 	case IXGBE_API_VER_UNKNOWN:
283 		/*
284 		 * On legacy (1.0 and older) VF versions, we don't support
285 		 * jumbo frames on either the PF or the VF.
286 		 */
287 		if (sc->max_frame_size > ETHER_MAX_LEN ||
288 		    vf->maximum_frame_size > ETHER_MAX_LEN)
289 			return (false);
290 
291 		return (true);
292 
293 		break;
294 	case IXGBE_API_VER_1_1:
295 	default:
296 		/*
297 		 * 1.1 or later VF versions always work if they aren't using
298 		 * jumbo frames.
299 		 */
300 		if (vf->maximum_frame_size <= ETHER_MAX_LEN)
301 			return (true);
302 
303 		/*
304 		 * Jumbo frames only work with VFs if the PF is also using
305 		 * jumbo frames.
306 		 */
307 		if (sc->max_frame_size <= ETHER_MAX_LEN)
308 			return (true);
309 
310 		return (false);
311 	}
312 } /* ixgbe_vf_frame_size_compatible */
313 
314 
315 static void
316 ixgbe_process_vf_reset(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
317 {
318 	ixgbe_vf_set_default_vlan(sc, vf, vf->default_vlan);
319 
320 	// XXX clear multicast addresses
321 
322 	ixgbe_clear_rar(&sc->hw, vf->rar_index);
323 	ixgbe_clear_vfmbmem(sc, vf);
324 	ixgbe_toggle_txdctl(&sc->hw, IXGBE_VF_INDEX(vf->pool));
325 
326 	vf->api_ver = IXGBE_API_VER_UNKNOWN;
327 } /* ixgbe_process_vf_reset */
328 
329 
330 static void
331 ixgbe_vf_enable_transmit(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
332 {
333 	struct ixgbe_hw *hw;
334 	uint32_t vf_index, vfte;
335 
336 	hw = &sc->hw;
337 
338 	vf_index = IXGBE_VF_INDEX(vf->pool);
339 	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
340 	vfte |= IXGBE_VF_BIT(vf->pool);
341 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
342 } /* ixgbe_vf_enable_transmit */
343 
344 
345 static void
346 ixgbe_vf_enable_receive(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
347 {
348 	struct ixgbe_hw *hw;
349 	uint32_t vf_index, vfre;
350 
351 	hw = &sc->hw;
352 
353 	vf_index = IXGBE_VF_INDEX(vf->pool);
354 	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
355 	if (ixgbe_vf_frame_size_compatible(sc, vf))
356 		vfre |= IXGBE_VF_BIT(vf->pool);
357 	else
358 		vfre &= ~IXGBE_VF_BIT(vf->pool);
359 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
360 } /* ixgbe_vf_enable_receive */
361 
362 
363 static void
364 ixgbe_vf_reset_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
365 {
366 	struct ixgbe_hw *hw;
367 	uint32_t ack;
368 	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
369 
370 	hw = &sc->hw;
371 
372 	ixgbe_process_vf_reset(sc, vf);
373 
374 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
375 		ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr,
376 		    vf->pool, true);
377 		ack = IXGBE_VT_MSGTYPE_SUCCESS;
378 	} else
379 		ack = IXGBE_VT_MSGTYPE_FAILURE;
380 
381 	ixgbe_vf_enable_transmit(sc, vf);
382 	ixgbe_vf_enable_receive(sc, vf);
383 
384 	vf->flags |= IXGBE_VF_CTS;
385 
386 	resp[0] = IXGBE_VF_RESET | ack;
387 	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
388 	resp[3] = hw->mac.mc_filter_type;
389 	ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
390 } /* ixgbe_vf_reset_msg */
391 
392 
393 static void
394 ixgbe_vf_set_mac(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
395 {
396 	uint8_t *mac;
397 
398 	mac = (uint8_t*)&msg[1];
399 
400 	/* Check that the VF has permission to change the MAC address. */
401 	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
402 		ixgbe_send_vf_failure(sc, vf, msg[0]);
403 		return;
404 	}
405 
406 	if (ixgbe_validate_mac_addr(mac) != 0) {
407 		ixgbe_send_vf_failure(sc, vf, msg[0]);
408 		return;
409 	}
410 
411 	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
412 
413 	ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr, vf->pool,
414 	    true);
415 
416 	ixgbe_send_vf_success(sc, vf, msg[0]);
417 } /* ixgbe_vf_set_mac */
418 
419 
420 /*
421  * VF multicast addresses are set by using the appropriate bit in
422  * 1 of 128 32 bit addresses (4096 possible).
423  */
424 static void
425 ixgbe_vf_set_mc_addr(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 *msg)
426 {
427 	u16	*list = (u16*)&msg[1];
428 	int	entries;
429 	u32	vmolr, vec_bit, vec_reg, mta_reg;
430 
431 	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
432 	entries = min(entries, IXGBE_MAX_VF_MC);
433 
434 	vmolr = IXGBE_READ_REG(&sc->hw, IXGBE_VMOLR(vf->pool));
435 
436 	vf->num_mc_hashes = entries;
437 
438 	/* Set the appropriate MTA bit */
439 	for (int i = 0; i < entries; i++) {
440 		vf->mc_hash[i] = list[i];
441 		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
442 		vec_bit = vf->mc_hash[i] & 0x1F;
443 		mta_reg = IXGBE_READ_REG(&sc->hw, IXGBE_MTA(vec_reg));
444 		mta_reg |= (1 << vec_bit);
445 		IXGBE_WRITE_REG(&sc->hw, IXGBE_MTA(vec_reg), mta_reg);
446 	}
447 
448 	vmolr |= IXGBE_VMOLR_ROMPE;
449 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VMOLR(vf->pool), vmolr);
450 	ixgbe_send_vf_success(sc, vf, msg[0]);
451 } /* ixgbe_vf_set_mc_addr */
452 
453 
454 static void
455 ixgbe_vf_set_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
456 {
457 	struct ixgbe_hw *hw;
458 	int enable;
459 	uint16_t tag;
460 
461 	hw = &sc->hw;
462 	enable = IXGBE_VT_MSGINFO(msg[0]);
463 	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
464 
465 	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
466 		ixgbe_send_vf_failure(sc, vf, msg[0]);
467 		return;
468 	}
469 
470 	/* It is illegal to enable vlan tag 0. */
471 	if (tag == 0 && enable != 0) {
472 		ixgbe_send_vf_failure(sc, vf, msg[0]);
473 		return;
474 	}
475 
476 	ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
477 	ixgbe_send_vf_success(sc, vf, msg[0]);
478 } /* ixgbe_vf_set_vlan */
479 
480 
481 static void
482 ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
483 {
484 	struct ixgbe_hw *hw;
485 	uint32_t vf_max_size, pf_max_size, mhadd;
486 
487 	hw = &sc->hw;
488 	vf_max_size = msg[1];
489 
490 	if (vf_max_size < ETHER_CRC_LEN) {
491 		/* We intentionally ACK invalid LPE requests. */
492 		ixgbe_send_vf_success(sc, vf, msg[0]);
493 		return;
494 	}
495 
496 	vf_max_size -= ETHER_CRC_LEN;
497 
498 	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
499 		/* We intentionally ACK invalid LPE requests. */
500 		ixgbe_send_vf_success(sc, vf, msg[0]);
501 		return;
502 	}
503 
504 	vf->maximum_frame_size = vf_max_size;
505 	ixgbe_update_max_frame(sc, vf->maximum_frame_size);
506 
507 	/*
508 	 * We might have to disable reception to this VF if the frame size is
509 	 * not compatible with the config on the PF.
510 	 */
511 	ixgbe_vf_enable_receive(sc, vf);
512 
513 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
514 	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
515 
516 	if (pf_max_size < sc->max_frame_size) {
517 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
518 		mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
519 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
520 	}
521 
522 	ixgbe_send_vf_success(sc, vf, msg[0]);
523 } /* ixgbe_vf_set_lpe */
524 
525 
526 static void
527 ixgbe_vf_set_macvlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
528     uint32_t *msg)
529 {
530 	//XXX implement this
531 	ixgbe_send_vf_failure(sc, vf, msg[0]);
532 } /* ixgbe_vf_set_macvlan */
533 
534 
535 static void
536 ixgbe_vf_api_negotiate(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
537     uint32_t *msg)
538 {
539 	switch (msg[1]) {
540 	case IXGBE_API_VER_1_0:
541 	case IXGBE_API_VER_1_1:
542 		vf->api_ver = msg[1];
543 		ixgbe_send_vf_success(sc, vf, msg[0]);
544 		break;
545 	default:
546 		vf->api_ver = IXGBE_API_VER_UNKNOWN;
547 		ixgbe_send_vf_failure(sc, vf, msg[0]);
548 		break;
549 	}
550 } /* ixgbe_vf_api_negotiate */
551 
552 
553 static void
554 ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
555     uint32_t *msg)
556 {
557 	struct ixgbe_hw *hw;
558 	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
559 	int num_queues;
560 
561 	hw = &sc->hw;
562 
563 	/* GET_QUEUES is not supported on pre-1.1 APIs. */
564 	switch (msg[0]) {
565 	case IXGBE_API_VER_1_0:
566 	case IXGBE_API_VER_UNKNOWN:
567 		ixgbe_send_vf_failure(sc, vf, msg[0]);
568 		return;
569 	}
570 
571 	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_SUCCESS |
572 	    IXGBE_VT_MSGTYPE_CTS;
573 
574 	num_queues = ixgbe_vf_queues(sc->iov_mode);
575 	resp[IXGBE_VF_TX_QUEUES] = num_queues;
576 	resp[IXGBE_VF_RX_QUEUES] = num_queues;
577 	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
578 	resp[IXGBE_VF_DEF_QUEUE] = 0;
579 
580 	ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
581 } /* ixgbe_vf_get_queues */
582 
583 
584 static void
585 ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
586 {
587 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
588 #ifdef KTR
589 	if_t ifp = iflib_get_ifp(ctx);
590 #endif
591 	struct ixgbe_hw *hw;
592 	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
593 	int error;
594 
595 	hw = &sc->hw;
596 
597 	error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
598 
599 	if (error != 0)
600 		return;
601 
602 	CTR3(KTR_MALLOC, "%s: received msg %x from %d", if_name(ifp),
603 	    msg[0], vf->pool);
604 	if (msg[0] == IXGBE_VF_RESET) {
605 		ixgbe_vf_reset_msg(sc, vf, msg);
606 		return;
607 	}
608 
609 	if (!(vf->flags & IXGBE_VF_CTS)) {
610 		ixgbe_send_vf_success(sc, vf, msg[0]);
611 		return;
612 	}
613 
614 	switch (msg[0] & IXGBE_VT_MSG_MASK) {
615 	case IXGBE_VF_SET_MAC_ADDR:
616 		ixgbe_vf_set_mac(sc, vf, msg);
617 		break;
618 	case IXGBE_VF_SET_MULTICAST:
619 		ixgbe_vf_set_mc_addr(sc, vf, msg);
620 		break;
621 	case IXGBE_VF_SET_VLAN:
622 		ixgbe_vf_set_vlan(sc, vf, msg);
623 		break;
624 	case IXGBE_VF_SET_LPE:
625 		ixgbe_vf_set_lpe(sc, vf, msg);
626 		break;
627 	case IXGBE_VF_SET_MACVLAN:
628 		ixgbe_vf_set_macvlan(sc, vf, msg);
629 		break;
630 	case IXGBE_VF_API_NEGOTIATE:
631 		ixgbe_vf_api_negotiate(sc, vf, msg);
632 		break;
633 	case IXGBE_VF_GET_QUEUES:
634 		ixgbe_vf_get_queues(sc, vf, msg);
635 		break;
636 	default:
637 		ixgbe_send_vf_failure(sc, vf, msg[0]);
638 	}
639 } /* ixgbe_process_vf_msg */
640 
641 /* Tasklet for handling VF -> PF mailbox messages */
642 void
643 ixgbe_handle_mbx(void *context)
644 {
645 	if_ctx_t ctx = context;
646 	struct ixgbe_softc *sc = iflib_get_softc(ctx);
647 	struct ixgbe_hw *hw;
648 	struct ixgbe_vf *vf;
649 	int i;
650 
651 	hw = &sc->hw;
652 
653 	for (i = 0; i < sc->num_vfs; i++) {
654 		vf = &sc->vfs[i];
655 
656 		if (vf->flags & IXGBE_VF_ACTIVE) {
657 			if (hw->mbx.ops[vf->pool].check_for_rst(hw,
658 			    vf->pool) == 0)
659 				ixgbe_process_vf_reset(sc, vf);
660 
661 			if (hw->mbx.ops[vf->pool].check_for_msg(hw,
662 			    vf->pool) == 0)
663 				ixgbe_process_vf_msg(ctx, vf);
664 
665 			if (hw->mbx.ops[vf->pool].check_for_ack(hw,
666 			    vf->pool) == 0)
667 				ixgbe_process_vf_ack(sc, vf);
668 		}
669 	}
670 } /* ixgbe_handle_mbx */
671 
672 int
673 ixgbe_if_iov_init(if_ctx_t ctx, u16 num_vfs, const nvlist_t *config)
674 {
675 	struct ixgbe_softc *sc;
676 	int retval = 0;
677 
678 	sc = iflib_get_softc(ctx);
679 	sc->iov_mode = IXGBE_NO_VM;
680 
681 	if (num_vfs == 0) {
682 		/* Would we ever get num_vfs = 0? */
683 		retval = EINVAL;
684 		goto err_init_iov;
685 	}
686 
687 	/*
688 	 * We've got to reserve a VM's worth of queues for the PF,
689 	 * thus we go into "64 VF mode" if 32+ VFs are requested.
690 	 * With 64 VFs, you can only have two queues per VF.
691 	 * With 32 VFs, you can have up to four queues per VF.
692 	 */
693 	if (num_vfs >= IXGBE_32_VM)
694 		sc->iov_mode = IXGBE_64_VM;
695 	else
696 		sc->iov_mode = IXGBE_32_VM;
697 
698 	/* Again, reserving 1 VM's worth of queues for the PF */
699 	sc->pool = sc->iov_mode - 1;
700 
701 	if ((num_vfs > sc->pool) || (num_vfs >= IXGBE_64_VM)) {
702 		retval = ENOSPC;
703 		goto err_init_iov;
704 	}
705 
706 	sc->vfs = malloc(sizeof(*sc->vfs) * num_vfs, M_IXGBE_SRIOV,
707 	    M_NOWAIT | M_ZERO);
708 
709 	if (sc->vfs == NULL) {
710 		retval = ENOMEM;
711 		goto err_init_iov;
712 	}
713 
714 	sc->num_vfs = num_vfs;
715 	ixgbe_init_mbx_params_pf(&sc->hw);
716 
717 	sc->feat_en |= IXGBE_FEATURE_SRIOV;
718 	ixgbe_if_init(sc->ctx);
719 
720 	return (retval);
721 
722 err_init_iov:
723 	sc->num_vfs = 0;
724 	sc->pool = 0;
725 	sc->iov_mode = IXGBE_NO_VM;
726 
727 	return (retval);
728 } /* ixgbe_if_iov_init */
729 
730 void
731 ixgbe_if_iov_uninit(if_ctx_t ctx)
732 {
733 	struct ixgbe_hw *hw;
734 	struct ixgbe_softc *sc;
735 	uint32_t pf_reg, vf_reg;
736 
737 	sc = iflib_get_softc(ctx);
738 	hw = &sc->hw;
739 
740 	/* Enable rx/tx for the PF and disable it for all VFs. */
741 	pf_reg = IXGBE_VF_INDEX(sc->pool);
742 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(sc->pool));
743 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(sc->pool));
744 
745 	if (pf_reg == 0)
746 		vf_reg = 1;
747 	else
748 		vf_reg = 0;
749 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
750 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
751 
752 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
753 
754 	free(sc->vfs, M_IXGBE_SRIOV);
755 	sc->vfs = NULL;
756 	sc->num_vfs = 0;
757 	sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
758 } /* ixgbe_if_iov_uninit */
759 
760 static void
761 ixgbe_init_vf(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
762 {
763 	struct ixgbe_hw *hw;
764 	uint32_t vf_index, pfmbimr;
765 
766 	hw = &sc->hw;
767 
768 	if (!(vf->flags & IXGBE_VF_ACTIVE))
769 		return;
770 
771 	vf_index = IXGBE_VF_INDEX(vf->pool);
772 	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
773 	pfmbimr |= IXGBE_VF_BIT(vf->pool);
774 	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
775 
776 	ixgbe_vf_set_default_vlan(sc, vf, vf->vlan_tag);
777 
778 	// XXX multicast addresses
779 
780 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
781 		ixgbe_set_rar(&sc->hw, vf->rar_index,
782 		    vf->ether_addr, vf->pool, true);
783 	}
784 
785 	ixgbe_vf_enable_transmit(sc, vf);
786 	ixgbe_vf_enable_receive(sc, vf);
787 
788 	ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
789 } /* ixgbe_init_vf */
790 
791 void
792 ixgbe_initialize_iov(struct ixgbe_softc *sc)
793 {
794 	struct ixgbe_hw *hw = &sc->hw;
795 	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
796 	int i;
797 
798 	if (sc->iov_mode == IXGBE_NO_VM)
799 		return;
800 
801 	/* RMW appropriate registers based on IOV mode */
802 	/* Read... */
803 	mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
804 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
805 	gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
806 	/* Modify... */
807 	mrqc &= ~IXGBE_MRQC_MRQE_MASK;
808 	mtqc = IXGBE_MTQC_VT_ENA;      /* No initial MTQC read needed */
809 	gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
810 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
811 	gpie &= ~IXGBE_GPIE_VTMODE_MASK;
812 	switch (sc->iov_mode) {
813 	case IXGBE_64_VM:
814 		mrqc |= IXGBE_MRQC_VMDQRSS64EN;
815 		mtqc |= IXGBE_MTQC_64VF;
816 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
817 		gpie |= IXGBE_GPIE_VTMODE_64;
818 		break;
819 	case IXGBE_32_VM:
820 		mrqc |= IXGBE_MRQC_VMDQRSS32EN;
821 		mtqc |= IXGBE_MTQC_32VF;
822 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
823 		gpie |= IXGBE_GPIE_VTMODE_32;
824 		break;
825 	default:
826 		panic("Unexpected SR-IOV mode %d", sc->iov_mode);
827 	}
828 	/* Write... */
829 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
830 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
831 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
832 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
833 
834 	/* Enable rx/tx for the PF. */
835 	vf_reg = IXGBE_VF_INDEX(sc->pool);
836 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(sc->pool));
837 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(sc->pool));
838 
839 	/* Allow VM-to-VM communication. */
840 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
841 
842 	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
843 	vt_ctl |= (sc->pool << IXGBE_VT_CTL_POOL_SHIFT);
844 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
845 
846 	for (i = 0; i < sc->num_vfs; i++)
847 		ixgbe_init_vf(sc, &sc->vfs[i]);
848 } /* ixgbe_initialize_iov */
849 
850 
851 /* Check the max frame setting of all active VF's */
852 void
853 ixgbe_recalculate_max_frame(struct ixgbe_softc *sc)
854 {
855 	struct ixgbe_vf *vf;
856 
857 	for (int i = 0; i < sc->num_vfs; i++) {
858 		vf = &sc->vfs[i];
859 		if (vf->flags & IXGBE_VF_ACTIVE)
860 			ixgbe_update_max_frame(sc, vf->maximum_frame_size);
861 	}
862 } /* ixgbe_recalculate_max_frame */
863 
864 int
865 ixgbe_if_iov_vf_add(if_ctx_t ctx, u16 vfnum, const nvlist_t *config)
866 {
867 	struct ixgbe_softc *sc;
868 	struct ixgbe_vf *vf;
869 	const void *mac;
870 
871 	sc = iflib_get_softc(ctx);
872 
873 	KASSERT(vfnum < sc->num_vfs, ("VF index %d is out of range %d",
874 	    vfnum, sc->num_vfs));
875 
876 	vf = &sc->vfs[vfnum];
877 	vf->pool= vfnum;
878 
879 	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
880 	vf->rar_index = vfnum + 1;
881 	vf->default_vlan = 0;
882 	vf->maximum_frame_size = ETHER_MAX_LEN;
883 	ixgbe_update_max_frame(sc, vf->maximum_frame_size);
884 
885 	if (nvlist_exists_binary(config, "mac-addr")) {
886 		mac = nvlist_get_binary(config, "mac-addr", NULL);
887 		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
888 		if (nvlist_get_bool(config, "allow-set-mac"))
889 			vf->flags |= IXGBE_VF_CAP_MAC;
890 	} else
891 		/*
892 		 * If the administrator has not specified a MAC address then
893 		 * we must allow the VF to choose one.
894 		 */
895 		vf->flags |= IXGBE_VF_CAP_MAC;
896 
897 	vf->flags |= IXGBE_VF_ACTIVE;
898 
899 	ixgbe_init_vf(sc, vf);
900 
901 	return (0);
902 } /* ixgbe_if_iov_vf_add */
903 
904 #else
905 
906 void
907 ixgbe_handle_mbx(void *context)
908 {
909 	UNREFERENCED_PARAMETER(context);
910 } /* ixgbe_handle_mbx */
911 
912 #endif
913