xref: /freebsd/sys/dev/ixgbe/if_sriov.c (revision 7ab1a32cd43cbae61ad4dd435d6a482bbf61cb52)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 
34 #include "ixgbe.h"
35 #include "ixgbe_sriov.h"
36 
37 #ifdef PCI_IOV
38 
39 #include <sys/ktr.h>
40 
41 MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
42 
43 /************************************************************************
44  * ixgbe_pci_iov_detach
45  ************************************************************************/
46 int
47 ixgbe_pci_iov_detach(device_t dev)
48 {
49 	return pci_iov_detach(dev);
50 }
51 
52 /************************************************************************
53  * ixgbe_define_iov_schemas
54  ************************************************************************/
55 void
56 ixgbe_define_iov_schemas(device_t dev, int *error)
57 {
58 	nvlist_t *pf_schema, *vf_schema;
59 
60 	pf_schema = pci_iov_schema_alloc_node();
61 	vf_schema = pci_iov_schema_alloc_node();
62 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
63 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
64 	    IOV_SCHEMA_HASDEFAULT, true);
65 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
66 	    IOV_SCHEMA_HASDEFAULT, false);
67 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
68 	    IOV_SCHEMA_HASDEFAULT, false);
69 	*error = pci_iov_attach(dev, pf_schema, vf_schema);
70 	if (*error != 0) {
71 		device_printf(dev,
72 		    "Error %d setting up SR-IOV\n", *error);
73 	}
74 } /* ixgbe_define_iov_schemas */
75 
76 /************************************************************************
77  * ixgbe_align_all_queue_indices
78  ************************************************************************/
79 inline void
80 ixgbe_align_all_queue_indices(struct ixgbe_softc *sc)
81 {
82 	int i;
83 	int index;
84 
85 	for (i = 0; i < sc->num_rx_queues; i++) {
86 		index = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
87 		sc->rx_queues[i].rxr.me = index;
88 	}
89 
90 	for (i = 0; i < sc->num_tx_queues; i++) {
91 		index = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
92 		sc->tx_queues[i].txr.me = index;
93 	}
94 }
95 
96 /* Support functions for SR-IOV/VF management */
97 static inline void
98 ixgbe_send_vf_msg(struct ixgbe_hw *hw, struct ixgbe_vf *vf, u32 msg)
99 {
100 	if (vf->flags & IXGBE_VF_CTS)
101 		msg |= IXGBE_VT_MSGTYPE_CTS;
102 
103 	ixgbe_write_mbx(hw, &msg, 1, vf->pool);
104 }
105 
106 static inline void
107 ixgbe_send_vf_success(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
108 {
109 	msg &= IXGBE_VT_MSG_MASK;
110 	ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_SUCCESS);
111 }
112 
113 static inline void
114 ixgbe_send_vf_failure(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
115 {
116 	msg &= IXGBE_VT_MSG_MASK;
117 	ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_FAILURE);
118 }
119 
120 static inline void
121 ixgbe_process_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
122 {
123 	if (!(vf->flags & IXGBE_VF_CTS))
124 		ixgbe_send_vf_failure(sc, vf, 0);
125 }
126 
127 static inline boolean_t
128 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
129 {
130 	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
131 }
132 
133 static inline int
134 ixgbe_vf_queues(int mode)
135 {
136 	switch (mode) {
137 	case IXGBE_64_VM:
138 		return (2);
139 	case IXGBE_32_VM:
140 		return (4);
141 	case IXGBE_NO_VM:
142 	default:
143 		return (0);
144 	}
145 }
146 
147 inline int
148 ixgbe_vf_que_index(int mode, int vfnum, int num)
149 {
150 	return ((vfnum * ixgbe_vf_queues(mode)) + num);
151 }
152 
153 static inline void
154 ixgbe_update_max_frame(struct ixgbe_softc * sc, int max_frame)
155 {
156 	if (sc->max_frame_size < max_frame)
157 		sc->max_frame_size = max_frame;
158 }
159 
160 inline u32
161 ixgbe_get_mrqc(int iov_mode)
162 {
163 	u32 mrqc;
164 
165 	switch (iov_mode) {
166 	case IXGBE_64_VM:
167 		mrqc = IXGBE_MRQC_VMDQRSS64EN;
168 		break;
169 	case IXGBE_32_VM:
170 		mrqc = IXGBE_MRQC_VMDQRSS32EN;
171 		break;
172 	case IXGBE_NO_VM:
173 		mrqc = 0;
174 		break;
175 	default:
176 		panic("Unexpected SR-IOV mode %d", iov_mode);
177 	}
178 
179 	return mrqc;
180 }
181 
182 
183 inline u32
184 ixgbe_get_mtqc(int iov_mode)
185 {
186 	uint32_t mtqc;
187 
188 	switch (iov_mode) {
189 	case IXGBE_64_VM:
190 		mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
191 		break;
192 	case IXGBE_32_VM:
193 		mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
194 		break;
195 	case IXGBE_NO_VM:
196 		mtqc = IXGBE_MTQC_64Q_1PB;
197 		break;
198 	default:
199 		panic("Unexpected SR-IOV mode %d", iov_mode);
200 	}
201 
202 	return mtqc;
203 }
204 
205 void
206 ixgbe_ping_all_vfs(struct ixgbe_softc *sc)
207 {
208 	struct ixgbe_vf *vf;
209 
210 	for (int i = 0; i < sc->num_vfs; i++) {
211 		vf = &sc->vfs[i];
212 		if (vf->flags & IXGBE_VF_ACTIVE)
213 			ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
214 	}
215 } /* ixgbe_ping_all_vfs */
216 
217 
218 static void
219 ixgbe_vf_set_default_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
220                           uint16_t tag)
221 {
222 	struct ixgbe_hw *hw;
223 	uint32_t vmolr, vmvir;
224 
225 	hw = &sc->hw;
226 
227 	vf->vlan_tag = tag;
228 
229 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
230 
231 	/* Do not receive packets that pass inexact filters. */
232 	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
233 
234 	/* Disable Multicast Promicuous Mode. */
235 	vmolr &= ~IXGBE_VMOLR_MPE;
236 
237 	/* Accept broadcasts. */
238 	vmolr |= IXGBE_VMOLR_BAM;
239 
240 	if (tag == 0) {
241 		/* Accept non-vlan tagged traffic. */
242 		vmolr |= IXGBE_VMOLR_AUPE;
243 
244 		/* Allow VM to tag outgoing traffic; no default tag. */
245 		vmvir = 0;
246 	} else {
247 		/* Require vlan-tagged traffic. */
248 		vmolr &= ~IXGBE_VMOLR_AUPE;
249 
250 		/* Tag all traffic with provided vlan tag. */
251 		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
252 	}
253 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
254 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
255 } /* ixgbe_vf_set_default_vlan */
256 
257 static void
258 ixgbe_clear_vfmbmem(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
259 {
260 	struct ixgbe_hw *hw = &sc->hw;
261 	uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
262 	uint16_t mbx_size = hw->mbx.size;
263 	uint16_t i;
264 
265 	for (i = 0; i < mbx_size; ++i)
266 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
267 } /* ixgbe_clear_vfmbmem */
268 
269 static boolean_t
270 ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
271 {
272 
273 	/*
274 	 * Frame size compatibility between PF and VF is only a problem on
275 	 * 82599-based cards.  X540 and later support any combination of jumbo
276 	 * frames on PFs and VFs.
277 	 */
278 	if (sc->hw.mac.type != ixgbe_mac_82599EB)
279 		return (true);
280 
281 	switch (vf->api_ver) {
282 	case IXGBE_API_VER_1_0:
283 	case IXGBE_API_VER_UNKNOWN:
284 		/*
285 		 * On legacy (1.0 and older) VF versions, we don't support jumbo
286 		 * frames on either the PF or the VF.
287 		 */
288 		if (sc->max_frame_size > ETHER_MAX_LEN ||
289 		    vf->maximum_frame_size > ETHER_MAX_LEN)
290 			return (false);
291 
292 		return (true);
293 
294 		break;
295 	case IXGBE_API_VER_1_1:
296 	default:
297 		/*
298 		 * 1.1 or later VF versions always work if they aren't using
299 		 * jumbo frames.
300 		 */
301 		if (vf->maximum_frame_size <= ETHER_MAX_LEN)
302 			return (true);
303 
304 		/*
305 		 * Jumbo frames only work with VFs if the PF is also using jumbo
306 		 * frames.
307 		 */
308 		if (sc->max_frame_size <= ETHER_MAX_LEN)
309 			return (true);
310 
311 		return (false);
312 	}
313 } /* ixgbe_vf_frame_size_compatible */
314 
315 
316 static void
317 ixgbe_process_vf_reset(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
318 {
319 	ixgbe_vf_set_default_vlan(sc, vf, vf->default_vlan);
320 
321 	// XXX clear multicast addresses
322 
323 	ixgbe_clear_rar(&sc->hw, vf->rar_index);
324 	ixgbe_clear_vfmbmem(sc, vf);
325 	ixgbe_toggle_txdctl(&sc->hw, IXGBE_VF_INDEX(vf->pool));
326 
327 	vf->api_ver = IXGBE_API_VER_UNKNOWN;
328 } /* ixgbe_process_vf_reset */
329 
330 
331 static void
332 ixgbe_vf_enable_transmit(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
333 {
334 	struct ixgbe_hw *hw;
335 	uint32_t vf_index, vfte;
336 
337 	hw = &sc->hw;
338 
339 	vf_index = IXGBE_VF_INDEX(vf->pool);
340 	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
341 	vfte |= IXGBE_VF_BIT(vf->pool);
342 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
343 } /* ixgbe_vf_enable_transmit */
344 
345 
346 static void
347 ixgbe_vf_enable_receive(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
348 {
349 	struct ixgbe_hw *hw;
350 	uint32_t vf_index, vfre;
351 
352 	hw = &sc->hw;
353 
354 	vf_index = IXGBE_VF_INDEX(vf->pool);
355 	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
356 	if (ixgbe_vf_frame_size_compatible(sc, vf))
357 		vfre |= IXGBE_VF_BIT(vf->pool);
358 	else
359 		vfre &= ~IXGBE_VF_BIT(vf->pool);
360 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
361 } /* ixgbe_vf_enable_receive */
362 
363 
364 static void
365 ixgbe_vf_reset_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
366 {
367 	struct ixgbe_hw *hw;
368 	uint32_t ack;
369 	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
370 
371 	hw = &sc->hw;
372 
373 	ixgbe_process_vf_reset(sc, vf);
374 
375 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
376 		ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr,
377 		    vf->pool, true);
378 		ack = IXGBE_VT_MSGTYPE_SUCCESS;
379 	} else
380 		ack = IXGBE_VT_MSGTYPE_FAILURE;
381 
382 	ixgbe_vf_enable_transmit(sc, vf);
383 	ixgbe_vf_enable_receive(sc, vf);
384 
385 	vf->flags |= IXGBE_VF_CTS;
386 
387 	resp[0] = IXGBE_VF_RESET | ack;
388 	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
389 	resp[3] = hw->mac.mc_filter_type;
390 	ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
391 } /* ixgbe_vf_reset_msg */
392 
393 
394 static void
395 ixgbe_vf_set_mac(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
396 {
397 	uint8_t *mac;
398 
399 	mac = (uint8_t*)&msg[1];
400 
401 	/* Check that the VF has permission to change the MAC address. */
402 	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
403 		ixgbe_send_vf_failure(sc, vf, msg[0]);
404 		return;
405 	}
406 
407 	if (ixgbe_validate_mac_addr(mac) != 0) {
408 		ixgbe_send_vf_failure(sc, vf, msg[0]);
409 		return;
410 	}
411 
412 	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
413 
414 	ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr, vf->pool,
415 	    true);
416 
417 	ixgbe_send_vf_success(sc, vf, msg[0]);
418 } /* ixgbe_vf_set_mac */
419 
420 
421 /*
422  * VF multicast addresses are set by using the appropriate bit in
423  * 1 of 128 32 bit addresses (4096 possible).
424  */
425 static void
426 ixgbe_vf_set_mc_addr(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 *msg)
427 {
428 	u16	*list = (u16*)&msg[1];
429 	int	entries;
430 	u32	vmolr, vec_bit, vec_reg, mta_reg;
431 
432 	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
433 	entries = min(entries, IXGBE_MAX_VF_MC);
434 
435 	vmolr = IXGBE_READ_REG(&sc->hw, IXGBE_VMOLR(vf->pool));
436 
437 	vf->num_mc_hashes = entries;
438 
439 	/* Set the appropriate MTA bit */
440 	for (int i = 0; i < entries; i++) {
441 		vf->mc_hash[i] = list[i];
442 		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
443 		vec_bit = vf->mc_hash[i] & 0x1F;
444 		mta_reg = IXGBE_READ_REG(&sc->hw, IXGBE_MTA(vec_reg));
445 		mta_reg |= (1 << vec_bit);
446 		IXGBE_WRITE_REG(&sc->hw, IXGBE_MTA(vec_reg), mta_reg);
447 	}
448 
449 	vmolr |= IXGBE_VMOLR_ROMPE;
450 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VMOLR(vf->pool), vmolr);
451 	ixgbe_send_vf_success(sc, vf, msg[0]);
452 } /* ixgbe_vf_set_mc_addr */
453 
454 
455 static void
456 ixgbe_vf_set_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
457 {
458 	struct ixgbe_hw *hw;
459 	int enable;
460 	uint16_t tag;
461 
462 	hw = &sc->hw;
463 	enable = IXGBE_VT_MSGINFO(msg[0]);
464 	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
465 
466 	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
467 		ixgbe_send_vf_failure(sc, vf, msg[0]);
468 		return;
469 	}
470 
471 	/* It is illegal to enable vlan tag 0. */
472 	if (tag == 0 && enable != 0) {
473 		ixgbe_send_vf_failure(sc, vf, msg[0]);
474 		return;
475 	}
476 
477 	ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
478 	ixgbe_send_vf_success(sc, vf, msg[0]);
479 } /* ixgbe_vf_set_vlan */
480 
481 
482 static void
483 ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
484 {
485 	struct ixgbe_hw *hw;
486 	uint32_t vf_max_size, pf_max_size, mhadd;
487 
488 	hw = &sc->hw;
489 	vf_max_size = msg[1];
490 
491 	if (vf_max_size < ETHER_CRC_LEN) {
492 		/* We intentionally ACK invalid LPE requests. */
493 		ixgbe_send_vf_success(sc, vf, msg[0]);
494 		return;
495 	}
496 
497 	vf_max_size -= ETHER_CRC_LEN;
498 
499 	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
500 		/* We intentionally ACK invalid LPE requests. */
501 		ixgbe_send_vf_success(sc, vf, msg[0]);
502 		return;
503 	}
504 
505 	vf->maximum_frame_size = vf_max_size;
506 	ixgbe_update_max_frame(sc, vf->maximum_frame_size);
507 
508 	/*
509 	 * We might have to disable reception to this VF if the frame size is
510 	 * not compatible with the config on the PF.
511 	 */
512 	ixgbe_vf_enable_receive(sc, vf);
513 
514 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
515 	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
516 
517 	if (pf_max_size < sc->max_frame_size) {
518 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
519 		mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
520 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
521 	}
522 
523 	ixgbe_send_vf_success(sc, vf, msg[0]);
524 } /* ixgbe_vf_set_lpe */
525 
526 
527 static void
528 ixgbe_vf_set_macvlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
529                      uint32_t *msg)
530 {
531 	//XXX implement this
532 	ixgbe_send_vf_failure(sc, vf, msg[0]);
533 } /* ixgbe_vf_set_macvlan */
534 
535 
536 static void
537 ixgbe_vf_api_negotiate(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
538     uint32_t *msg)
539 {
540 
541 	switch (msg[1]) {
542 	case IXGBE_API_VER_1_0:
543 	case IXGBE_API_VER_1_1:
544 		vf->api_ver = msg[1];
545 		ixgbe_send_vf_success(sc, vf, msg[0]);
546 		break;
547 	default:
548 		vf->api_ver = IXGBE_API_VER_UNKNOWN;
549 		ixgbe_send_vf_failure(sc, vf, msg[0]);
550 		break;
551 	}
552 } /* ixgbe_vf_api_negotiate */
553 
554 
555 static void
556 ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
557 {
558 	struct ixgbe_hw *hw;
559 	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
560 	int num_queues;
561 
562 	hw = &sc->hw;
563 
564 	/* GET_QUEUES is not supported on pre-1.1 APIs. */
565 	switch (msg[0]) {
566 	case IXGBE_API_VER_1_0:
567 	case IXGBE_API_VER_UNKNOWN:
568 		ixgbe_send_vf_failure(sc, vf, msg[0]);
569 		return;
570 	}
571 
572 	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_SUCCESS |
573 	    IXGBE_VT_MSGTYPE_CTS;
574 
575 	num_queues = ixgbe_vf_queues(sc->iov_mode);
576 	resp[IXGBE_VF_TX_QUEUES] = num_queues;
577 	resp[IXGBE_VF_RX_QUEUES] = num_queues;
578 	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
579 	resp[IXGBE_VF_DEF_QUEUE] = 0;
580 
581 	ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
582 } /* ixgbe_vf_get_queues */
583 
584 
585 static void
586 ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
587 {
588 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
589 #ifdef KTR
590 	if_t		ifp = iflib_get_ifp(ctx);
591 #endif
592 	struct ixgbe_hw *hw;
593 	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
594 	int error;
595 
596 	hw = &sc->hw;
597 
598 	error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
599 
600 	if (error != 0)
601 		return;
602 
603 	CTR3(KTR_MALLOC, "%s: received msg %x from %d", if_name(ifp),
604 	    msg[0], vf->pool);
605 	if (msg[0] == IXGBE_VF_RESET) {
606 		ixgbe_vf_reset_msg(sc, vf, msg);
607 		return;
608 	}
609 
610 	if (!(vf->flags & IXGBE_VF_CTS)) {
611 		ixgbe_send_vf_success(sc, vf, msg[0]);
612 		return;
613 	}
614 
615 	switch (msg[0] & IXGBE_VT_MSG_MASK) {
616 	case IXGBE_VF_SET_MAC_ADDR:
617 		ixgbe_vf_set_mac(sc, vf, msg);
618 		break;
619 	case IXGBE_VF_SET_MULTICAST:
620 		ixgbe_vf_set_mc_addr(sc, vf, msg);
621 		break;
622 	case IXGBE_VF_SET_VLAN:
623 		ixgbe_vf_set_vlan(sc, vf, msg);
624 		break;
625 	case IXGBE_VF_SET_LPE:
626 		ixgbe_vf_set_lpe(sc, vf, msg);
627 		break;
628 	case IXGBE_VF_SET_MACVLAN:
629 		ixgbe_vf_set_macvlan(sc, vf, msg);
630 		break;
631 	case IXGBE_VF_API_NEGOTIATE:
632 		ixgbe_vf_api_negotiate(sc, vf, msg);
633 		break;
634 	case IXGBE_VF_GET_QUEUES:
635 		ixgbe_vf_get_queues(sc, vf, msg);
636 		break;
637 	default:
638 		ixgbe_send_vf_failure(sc, vf, msg[0]);
639 	}
640 } /* ixgbe_process_vf_msg */
641 
642 
643 /* Tasklet for handling VF -> PF mailbox messages */
644 void
645 ixgbe_handle_mbx(void *context)
646 {
647 	if_ctx_t        ctx = context;
648 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
649 	struct ixgbe_hw *hw;
650 	struct ixgbe_vf *vf;
651 	int i;
652 
653 	hw = &sc->hw;
654 
655 	for (i = 0; i < sc->num_vfs; i++) {
656 		vf = &sc->vfs[i];
657 
658 		if (vf->flags & IXGBE_VF_ACTIVE) {
659 			if (hw->mbx.ops[vf->pool].check_for_rst(hw, vf->pool) == 0)
660 				ixgbe_process_vf_reset(sc, vf);
661 
662 			if (hw->mbx.ops[vf->pool].check_for_msg(hw, vf->pool) == 0)
663 				ixgbe_process_vf_msg(ctx, vf);
664 
665 			if (hw->mbx.ops[vf->pool].check_for_ack(hw, vf->pool) == 0)
666 				ixgbe_process_vf_ack(sc, vf);
667 		}
668 	}
669 } /* ixgbe_handle_mbx */
670 
671 int
672 ixgbe_if_iov_init(if_ctx_t ctx, u16 num_vfs, const nvlist_t *config)
673 {
674 	struct ixgbe_softc *sc;
675 	int retval = 0;
676 
677 	sc = iflib_get_softc(ctx);
678 	sc->iov_mode = IXGBE_NO_VM;
679 
680 	if (num_vfs == 0) {
681 		/* Would we ever get num_vfs = 0? */
682 		retval = EINVAL;
683 		goto err_init_iov;
684 	}
685 
686 	/*
687 	 * We've got to reserve a VM's worth of queues for the PF,
688 	 * thus we go into "64 VF mode" if 32+ VFs are requested.
689 	 * With 64 VFs, you can only have two queues per VF.
690 	 * With 32 VFs, you can have up to four queues per VF.
691 	 */
692 	if (num_vfs >= IXGBE_32_VM)
693 		sc->iov_mode = IXGBE_64_VM;
694 	else
695 		sc->iov_mode = IXGBE_32_VM;
696 
697 	/* Again, reserving 1 VM's worth of queues for the PF */
698 	sc->pool = sc->iov_mode - 1;
699 
700 	if ((num_vfs > sc->pool) || (num_vfs >= IXGBE_64_VM)) {
701 		retval = ENOSPC;
702 		goto err_init_iov;
703 	}
704 
705 	sc->vfs = malloc(sizeof(*sc->vfs) * num_vfs, M_IXGBE_SRIOV,
706 	    M_NOWAIT | M_ZERO);
707 
708 	if (sc->vfs == NULL) {
709 		retval = ENOMEM;
710 		goto err_init_iov;
711 	}
712 
713 	sc->num_vfs = num_vfs;
714 	ixgbe_init_mbx_params_pf(&sc->hw);
715 
716 	sc->feat_en |= IXGBE_FEATURE_SRIOV;
717 	ixgbe_if_init(sc->ctx);
718 
719 	return (retval);
720 
721 err_init_iov:
722 	sc->num_vfs = 0;
723 	sc->pool = 0;
724 	sc->iov_mode = IXGBE_NO_VM;
725 
726 	return (retval);
727 } /* ixgbe_if_iov_init */
728 
729 void
730 ixgbe_if_iov_uninit(if_ctx_t ctx)
731 {
732 	struct ixgbe_hw *hw;
733 	struct ixgbe_softc *sc;
734 	uint32_t pf_reg, vf_reg;
735 
736 	sc = iflib_get_softc(ctx);
737 	hw = &sc->hw;
738 
739 	/* Enable rx/tx for the PF and disable it for all VFs. */
740 	pf_reg = IXGBE_VF_INDEX(sc->pool);
741 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(sc->pool));
742 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(sc->pool));
743 
744 	if (pf_reg == 0)
745 		vf_reg = 1;
746 	else
747 		vf_reg = 0;
748 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
749 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
750 
751 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
752 
753 	free(sc->vfs, M_IXGBE_SRIOV);
754 	sc->vfs = NULL;
755 	sc->num_vfs = 0;
756 	sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
757 } /* ixgbe_if_iov_uninit */
758 
759 static void
760 ixgbe_init_vf(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
761 {
762 	struct ixgbe_hw *hw;
763 	uint32_t vf_index, pfmbimr;
764 
765 	hw = &sc->hw;
766 
767 	if (!(vf->flags & IXGBE_VF_ACTIVE))
768 		return;
769 
770 	vf_index = IXGBE_VF_INDEX(vf->pool);
771 	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
772 	pfmbimr |= IXGBE_VF_BIT(vf->pool);
773 	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
774 
775 	ixgbe_vf_set_default_vlan(sc, vf, vf->vlan_tag);
776 
777 	// XXX multicast addresses
778 
779 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
780 		ixgbe_set_rar(&sc->hw, vf->rar_index,
781 		    vf->ether_addr, vf->pool, true);
782 	}
783 
784 	ixgbe_vf_enable_transmit(sc, vf);
785 	ixgbe_vf_enable_receive(sc, vf);
786 
787 	ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
788 } /* ixgbe_init_vf */
789 
790 void
791 ixgbe_initialize_iov(struct ixgbe_softc *sc)
792 {
793 	struct ixgbe_hw *hw = &sc->hw;
794 	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
795 	int i;
796 
797 	if (sc->iov_mode == IXGBE_NO_VM)
798 		return;
799 
800 	/* RMW appropriate registers based on IOV mode */
801 	/* Read... */
802 	mrqc    = IXGBE_READ_REG(hw, IXGBE_MRQC);
803 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
804 	gpie    = IXGBE_READ_REG(hw, IXGBE_GPIE);
805 	/* Modify... */
806 	mrqc    &= ~IXGBE_MRQC_MRQE_MASK;
807 	mtqc     =  IXGBE_MTQC_VT_ENA;      /* No initial MTQC read needed */
808 	gcr_ext |=  IXGBE_GCR_EXT_MSIX_EN;
809 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
810 	gpie    &= ~IXGBE_GPIE_VTMODE_MASK;
811 	switch (sc->iov_mode) {
812 	case IXGBE_64_VM:
813 		mrqc    |= IXGBE_MRQC_VMDQRSS64EN;
814 		mtqc    |= IXGBE_MTQC_64VF;
815 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
816 		gpie    |= IXGBE_GPIE_VTMODE_64;
817 		break;
818 	case IXGBE_32_VM:
819 		mrqc    |= IXGBE_MRQC_VMDQRSS32EN;
820 		mtqc    |= IXGBE_MTQC_32VF;
821 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
822 		gpie    |= IXGBE_GPIE_VTMODE_32;
823 		break;
824 	default:
825 		panic("Unexpected SR-IOV mode %d", sc->iov_mode);
826 	}
827 	/* Write... */
828 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
829 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
830 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
831 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
832 
833 	/* Enable rx/tx for the PF. */
834 	vf_reg = IXGBE_VF_INDEX(sc->pool);
835 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(sc->pool));
836 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(sc->pool));
837 
838 	/* Allow VM-to-VM communication. */
839 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
840 
841 	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
842 	vt_ctl |= (sc->pool << IXGBE_VT_CTL_POOL_SHIFT);
843 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
844 
845 	for (i = 0; i < sc->num_vfs; i++)
846 		ixgbe_init_vf(sc, &sc->vfs[i]);
847 } /* ixgbe_initialize_iov */
848 
849 
850 /* Check the max frame setting of all active VF's */
851 void
852 ixgbe_recalculate_max_frame(struct ixgbe_softc *sc)
853 {
854 	struct ixgbe_vf *vf;
855 
856 	for (int i = 0; i < sc->num_vfs; i++) {
857 		vf = &sc->vfs[i];
858 		if (vf->flags & IXGBE_VF_ACTIVE)
859 			ixgbe_update_max_frame(sc, vf->maximum_frame_size);
860 	}
861 } /* ixgbe_recalculate_max_frame */
862 
863 int
864 ixgbe_if_iov_vf_add(if_ctx_t ctx, u16 vfnum, const nvlist_t *config)
865 {
866 	struct ixgbe_softc *sc;
867 	struct ixgbe_vf *vf;
868 	const void *mac;
869 
870 	sc = iflib_get_softc(ctx);
871 
872 	KASSERT(vfnum < sc->num_vfs, ("VF index %d is out of range %d",
873 	    vfnum, sc->num_vfs));
874 
875 	vf = &sc->vfs[vfnum];
876 	vf->pool= vfnum;
877 
878 	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
879 	vf->rar_index = vfnum + 1;
880 	vf->default_vlan = 0;
881 	vf->maximum_frame_size = ETHER_MAX_LEN;
882 	ixgbe_update_max_frame(sc, vf->maximum_frame_size);
883 
884 	if (nvlist_exists_binary(config, "mac-addr")) {
885 		mac = nvlist_get_binary(config, "mac-addr", NULL);
886 		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
887 		if (nvlist_get_bool(config, "allow-set-mac"))
888 			vf->flags |= IXGBE_VF_CAP_MAC;
889 	} else
890 		/*
891 		 * If the administrator has not specified a MAC address then
892 		 * we must allow the VF to choose one.
893 		 */
894 		vf->flags |= IXGBE_VF_CAP_MAC;
895 
896 	vf->flags |= IXGBE_VF_ACTIVE;
897 
898 	ixgbe_init_vf(sc, vf);
899 
900 	return (0);
901 } /* ixgbe_if_iov_vf_add */
902 
903 #else
904 
905 void
906 ixgbe_handle_mbx(void *context)
907 {
908 	UNREFERENCED_PARAMETER(context);
909 } /* ixgbe_handle_mbx */
910 
911 #endif
912