xref: /freebsd/sys/dev/ixgbe/if_sriov.c (revision c10511375252e53d6c9fe47f97e254f1380a89c5)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 
34 #include "ixgbe.h"
35 #include "ixgbe_sriov.h"
36 
37 #ifdef PCI_IOV
38 
39 #include <sys/ktr.h>
40 
41 MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
42 
43 /************************************************************************
44  * ixgbe_pci_iov_detach
45  ************************************************************************/
46 int
47 ixgbe_pci_iov_detach(device_t dev)
48 {
49 	return pci_iov_detach(dev);
50 }
51 
52 /************************************************************************
53  * ixgbe_define_iov_schemas
54  ************************************************************************/
55 void
56 ixgbe_define_iov_schemas(device_t dev, int *error)
57 {
58 	nvlist_t *pf_schema, *vf_schema;
59 
60 	pf_schema = pci_iov_schema_alloc_node();
61 	vf_schema = pci_iov_schema_alloc_node();
62 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
63 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
64 	    IOV_SCHEMA_HASDEFAULT, true);
65 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
66 	    IOV_SCHEMA_HASDEFAULT, false);
67 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
68 	    IOV_SCHEMA_HASDEFAULT, false);
69 	*error = pci_iov_attach(dev, pf_schema, vf_schema);
70 	if (*error != 0) {
71 		device_printf(dev,
72 		    "Error %d setting up SR-IOV\n", *error);
73 	}
74 } /* ixgbe_define_iov_schemas */
75 
76 /************************************************************************
77  * ixgbe_align_all_queue_indices
78  ************************************************************************/
79 inline void
80 ixgbe_align_all_queue_indices(struct ixgbe_softc *sc)
81 {
82 	int i;
83 	int index;
84 
85 	for (i = 0; i < sc->num_rx_queues; i++) {
86 		index = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
87 		sc->rx_queues[i].rxr.me = index;
88 	}
89 
90 	for (i = 0; i < sc->num_tx_queues; i++) {
91 		index = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
92 		sc->tx_queues[i].txr.me = index;
93 	}
94 }
95 
96 /* Support functions for SR-IOV/VF management */
97 static inline void
98 ixgbe_send_vf_msg(struct ixgbe_hw *hw, struct ixgbe_vf *vf, u32 msg)
99 {
100 	if (vf->flags & IXGBE_VF_CTS)
101 		msg |= IXGBE_VT_MSGTYPE_CTS;
102 
103 	hw->mbx.ops[vf->pool].write(hw, &msg, 1, vf->pool);
104 }
105 
106 static inline void
107 ixgbe_send_vf_success(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
108 {
109 	msg &= IXGBE_VT_MSG_MASK;
110 	ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_SUCCESS);
111 }
112 
113 static inline void
114 ixgbe_send_vf_failure(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
115 {
116 	msg &= IXGBE_VT_MSG_MASK;
117 	ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_FAILURE);
118 }
119 
120 static inline void
121 ixgbe_process_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
122 {
123 	if (!(vf->flags & IXGBE_VF_CTS))
124 		ixgbe_send_vf_failure(sc, vf, 0);
125 }
126 
127 static inline boolean_t
128 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
129 {
130 	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
131 }
132 
133 static inline int
134 ixgbe_vf_queues(int mode)
135 {
136 	switch (mode) {
137 	case IXGBE_64_VM:
138 		return (2);
139 	case IXGBE_32_VM:
140 		return (4);
141 	case IXGBE_NO_VM:
142 	default:
143 		return (0);
144 	}
145 }
146 
147 inline int
148 ixgbe_vf_que_index(int mode, int vfnum, int num)
149 {
150 	return ((vfnum * ixgbe_vf_queues(mode)) + num);
151 }
152 
153 static inline void
154 ixgbe_update_max_frame(struct ixgbe_softc * sc, int max_frame)
155 {
156 	if (sc->max_frame_size < max_frame)
157 		sc->max_frame_size = max_frame;
158 }
159 
160 inline u32
161 ixgbe_get_mrqc(int iov_mode)
162 {
163 	u32 mrqc;
164 
165 	switch (iov_mode) {
166 	case IXGBE_64_VM:
167 		mrqc = IXGBE_MRQC_VMDQRSS64EN;
168 		break;
169 	case IXGBE_32_VM:
170 		mrqc = IXGBE_MRQC_VMDQRSS32EN;
171 		break;
172 	case IXGBE_NO_VM:
173 		mrqc = 0;
174 		break;
175 	default:
176 		panic("Unexpected SR-IOV mode %d", iov_mode);
177 	}
178 
179 	return mrqc;
180 }
181 
182 
183 inline u32
184 ixgbe_get_mtqc(int iov_mode)
185 {
186 	uint32_t mtqc;
187 
188 	switch (iov_mode) {
189 	case IXGBE_64_VM:
190 		mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
191 		break;
192 	case IXGBE_32_VM:
193 		mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
194 		break;
195 	case IXGBE_NO_VM:
196 		mtqc = IXGBE_MTQC_64Q_1PB;
197 		break;
198 	default:
199 		panic("Unexpected SR-IOV mode %d", iov_mode);
200 	}
201 
202 	return mtqc;
203 }
204 
205 void
206 ixgbe_ping_all_vfs(struct ixgbe_softc *sc)
207 {
208 	struct ixgbe_vf *vf;
209 
210 	for (int i = 0; i < sc->num_vfs; i++) {
211 		vf = &sc->vfs[i];
212 		if (vf->flags & IXGBE_VF_ACTIVE)
213 			ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
214 	}
215 } /* ixgbe_ping_all_vfs */
216 
217 
218 static void
219 ixgbe_vf_set_default_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
220                           uint16_t tag)
221 {
222 	struct ixgbe_hw *hw;
223 	uint32_t vmolr, vmvir;
224 
225 	hw = &sc->hw;
226 
227 	vf->vlan_tag = tag;
228 
229 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
230 
231 	/* Do not receive packets that pass inexact filters. */
232 	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
233 
234 	/* Disable Multicast Promicuous Mode. */
235 	vmolr &= ~IXGBE_VMOLR_MPE;
236 
237 	/* Accept broadcasts. */
238 	vmolr |= IXGBE_VMOLR_BAM;
239 
240 	if (tag == 0) {
241 		/* Accept non-vlan tagged traffic. */
242 		vmolr |= IXGBE_VMOLR_AUPE;
243 
244 		/* Allow VM to tag outgoing traffic; no default tag. */
245 		vmvir = 0;
246 	} else {
247 		/* Require vlan-tagged traffic. */
248 		vmolr &= ~IXGBE_VMOLR_AUPE;
249 
250 		/* Tag all traffic with provided vlan tag. */
251 		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
252 	}
253 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
254 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
255 } /* ixgbe_vf_set_default_vlan */
256 
257 static void
258 ixgbe_clear_vfmbmem(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
259 {
260 	struct ixgbe_hw *hw = &sc->hw;
261 	uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
262 	uint16_t mbx_size = hw->mbx.size;
263 	uint16_t i;
264 
265 	for (i = 0; i < mbx_size; ++i)
266 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
267 } /* ixgbe_clear_vfmbmem */
268 
269 static boolean_t
270 ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
271 {
272 
273 	/*
274 	 * Frame size compatibility between PF and VF is only a problem on
275 	 * 82599-based cards.  X540 and later support any combination of jumbo
276 	 * frames on PFs and VFs.
277 	 */
278 	if (sc->hw.mac.type != ixgbe_mac_82599EB)
279 		return (true);
280 
281 	switch (vf->api_ver) {
282 	case IXGBE_API_VER_1_0:
283 	case IXGBE_API_VER_UNKNOWN:
284 		/*
285 		 * On legacy (1.0 and older) VF versions, we don't support jumbo
286 		 * frames on either the PF or the VF.
287 		 */
288 		if (sc->max_frame_size > ETHER_MAX_LEN ||
289 		    vf->maximum_frame_size > ETHER_MAX_LEN)
290 			return (false);
291 
292 		return (true);
293 
294 		break;
295 	case IXGBE_API_VER_1_1:
296 	default:
297 		/*
298 		 * 1.1 or later VF versions always work if they aren't using
299 		 * jumbo frames.
300 		 */
301 		if (vf->maximum_frame_size <= ETHER_MAX_LEN)
302 			return (true);
303 
304 		/*
305 		 * Jumbo frames only work with VFs if the PF is also using jumbo
306 		 * frames.
307 		 */
308 		if (sc->max_frame_size <= ETHER_MAX_LEN)
309 			return (true);
310 
311 		return (false);
312 	}
313 } /* ixgbe_vf_frame_size_compatible */
314 
315 
316 static void
317 ixgbe_process_vf_reset(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
318 {
319 	ixgbe_vf_set_default_vlan(sc, vf, vf->default_vlan);
320 
321 	// XXX clear multicast addresses
322 
323 	ixgbe_clear_rar(&sc->hw, vf->rar_index);
324 	ixgbe_clear_vfmbmem(sc, vf);
325 	ixgbe_toggle_txdctl(&sc->hw, IXGBE_VF_INDEX(vf->pool));
326 
327 	vf->api_ver = IXGBE_API_VER_UNKNOWN;
328 } /* ixgbe_process_vf_reset */
329 
330 
331 static void
332 ixgbe_vf_enable_transmit(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
333 {
334 	struct ixgbe_hw *hw;
335 	uint32_t vf_index, vfte;
336 
337 	hw = &sc->hw;
338 
339 	vf_index = IXGBE_VF_INDEX(vf->pool);
340 	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
341 	vfte |= IXGBE_VF_BIT(vf->pool);
342 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
343 } /* ixgbe_vf_enable_transmit */
344 
345 
346 static void
347 ixgbe_vf_enable_receive(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
348 {
349 	struct ixgbe_hw *hw;
350 	uint32_t vf_index, vfre;
351 
352 	hw = &sc->hw;
353 
354 	vf_index = IXGBE_VF_INDEX(vf->pool);
355 	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
356 	if (ixgbe_vf_frame_size_compatible(sc, vf))
357 		vfre |= IXGBE_VF_BIT(vf->pool);
358 	else
359 		vfre &= ~IXGBE_VF_BIT(vf->pool);
360 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
361 } /* ixgbe_vf_enable_receive */
362 
363 
364 static void
365 ixgbe_vf_reset_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
366 {
367 	struct ixgbe_hw *hw;
368 	uint32_t ack;
369 	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
370 
371 	hw = &sc->hw;
372 
373 	ixgbe_process_vf_reset(sc, vf);
374 
375 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
376 		ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr,
377 		    vf->pool, true);
378 		ack = IXGBE_VT_MSGTYPE_SUCCESS;
379 	} else
380 		ack = IXGBE_VT_MSGTYPE_FAILURE;
381 
382 	ixgbe_vf_enable_transmit(sc, vf);
383 	ixgbe_vf_enable_receive(sc, vf);
384 
385 	vf->flags |= IXGBE_VF_CTS;
386 
387 	resp[0] = IXGBE_VF_RESET | ack;
388 	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
389 	resp[3] = hw->mac.mc_filter_type;
390 	ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
391 } /* ixgbe_vf_reset_msg */
392 
393 
394 static void
395 ixgbe_vf_set_mac(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
396 {
397 	uint8_t *mac;
398 
399 	mac = (uint8_t*)&msg[1];
400 
401 	/* Check that the VF has permission to change the MAC address. */
402 	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
403 		ixgbe_send_vf_failure(sc, vf, msg[0]);
404 		return;
405 	}
406 
407 	if (ixgbe_validate_mac_addr(mac) != 0) {
408 		ixgbe_send_vf_failure(sc, vf, msg[0]);
409 		return;
410 	}
411 
412 	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
413 
414 	ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr, vf->pool,
415 	    true);
416 
417 	ixgbe_send_vf_success(sc, vf, msg[0]);
418 } /* ixgbe_vf_set_mac */
419 
420 
421 /*
422  * VF multicast addresses are set by using the appropriate bit in
423  * 1 of 128 32 bit addresses (4096 possible).
424  */
425 static void
426 ixgbe_vf_set_mc_addr(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 *msg)
427 {
428 	u16	*list = (u16*)&msg[1];
429 	int	entries;
430 	u32	vmolr, vec_bit, vec_reg, mta_reg;
431 
432 	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
433 	entries = min(entries, IXGBE_MAX_VF_MC);
434 
435 	vmolr = IXGBE_READ_REG(&sc->hw, IXGBE_VMOLR(vf->pool));
436 
437 	vf->num_mc_hashes = entries;
438 
439 	/* Set the appropriate MTA bit */
440 	for (int i = 0; i < entries; i++) {
441 		vf->mc_hash[i] = list[i];
442 		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
443 		vec_bit = vf->mc_hash[i] & 0x1F;
444 		mta_reg = IXGBE_READ_REG(&sc->hw, IXGBE_MTA(vec_reg));
445 		mta_reg |= (1 << vec_bit);
446 		IXGBE_WRITE_REG(&sc->hw, IXGBE_MTA(vec_reg), mta_reg);
447 	}
448 
449 	vmolr |= IXGBE_VMOLR_ROMPE;
450 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VMOLR(vf->pool), vmolr);
451 	ixgbe_send_vf_success(sc, vf, msg[0]);
452 } /* ixgbe_vf_set_mc_addr */
453 
454 
455 static void
456 ixgbe_vf_set_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
457 {
458 	struct ixgbe_hw *hw;
459 	int enable;
460 	uint16_t tag;
461 
462 	hw = &sc->hw;
463 	enable = IXGBE_VT_MSGINFO(msg[0]);
464 	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
465 
466 	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
467 		ixgbe_send_vf_failure(sc, vf, msg[0]);
468 		return;
469 	}
470 
471 	/* It is illegal to enable vlan tag 0. */
472 	if (tag == 0 && enable != 0) {
473 		ixgbe_send_vf_failure(sc, vf, msg[0]);
474 		return;
475 	}
476 
477 	ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
478 	ixgbe_send_vf_success(sc, vf, msg[0]);
479 } /* ixgbe_vf_set_vlan */
480 
481 
482 static void
483 ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
484 {
485 	struct ixgbe_hw *hw;
486 	uint32_t vf_max_size, pf_max_size, mhadd;
487 
488 	hw = &sc->hw;
489 	vf_max_size = msg[1];
490 
491 	if (vf_max_size < ETHER_CRC_LEN) {
492 		/* We intentionally ACK invalid LPE requests. */
493 		ixgbe_send_vf_success(sc, vf, msg[0]);
494 		return;
495 	}
496 
497 	vf_max_size -= ETHER_CRC_LEN;
498 
499 	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
500 		/* We intentionally ACK invalid LPE requests. */
501 		ixgbe_send_vf_success(sc, vf, msg[0]);
502 		return;
503 	}
504 
505 	vf->maximum_frame_size = vf_max_size;
506 	ixgbe_update_max_frame(sc, vf->maximum_frame_size);
507 
508 	/*
509 	 * We might have to disable reception to this VF if the frame size is
510 	 * not compatible with the config on the PF.
511 	 */
512 	ixgbe_vf_enable_receive(sc, vf);
513 
514 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
515 	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
516 
517 	if (pf_max_size < sc->max_frame_size) {
518 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
519 		mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
520 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
521 	}
522 
523 	ixgbe_send_vf_success(sc, vf, msg[0]);
524 } /* ixgbe_vf_set_lpe */
525 
526 
527 static void
528 ixgbe_vf_set_macvlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
529                      uint32_t *msg)
530 {
531 	//XXX implement this
532 	ixgbe_send_vf_failure(sc, vf, msg[0]);
533 } /* ixgbe_vf_set_macvlan */
534 
535 
536 static void
537 ixgbe_vf_api_negotiate(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
538     uint32_t *msg)
539 {
540 
541 	switch (msg[1]) {
542 	case IXGBE_API_VER_1_0:
543 	case IXGBE_API_VER_1_1:
544 		vf->api_ver = msg[1];
545 		ixgbe_send_vf_success(sc, vf, msg[0]);
546 		break;
547 	default:
548 		vf->api_ver = IXGBE_API_VER_UNKNOWN;
549 		ixgbe_send_vf_failure(sc, vf, msg[0]);
550 		break;
551 	}
552 } /* ixgbe_vf_api_negotiate */
553 
554 
555 static void
556 ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
557 {
558 	struct ixgbe_hw *hw;
559 	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
560 	int num_queues;
561 
562 	hw = &sc->hw;
563 
564 	/* GET_QUEUES is not supported on pre-1.1 APIs. */
565 	switch (msg[0]) {
566 	case IXGBE_API_VER_1_0:
567 	case IXGBE_API_VER_UNKNOWN:
568 		ixgbe_send_vf_failure(sc, vf, msg[0]);
569 		return;
570 	}
571 
572 	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_SUCCESS |
573 	    IXGBE_VT_MSGTYPE_CTS;
574 
575 	num_queues = ixgbe_vf_queues(sc->iov_mode);
576 	resp[IXGBE_VF_TX_QUEUES] = num_queues;
577 	resp[IXGBE_VF_RX_QUEUES] = num_queues;
578 	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
579 	resp[IXGBE_VF_DEF_QUEUE] = 0;
580 
581 	ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
582 } /* ixgbe_vf_get_queues */
583 
584 
585 static void
586 ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
587 {
588 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
589 #ifdef KTR
590 	if_t		ifp = iflib_get_ifp(ctx);
591 #endif
592 	struct ixgbe_hw *hw;
593 	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
594 	int error;
595 
596 	hw = &sc->hw;
597 
598 	error = hw->mbx.ops[vf->pool].read(hw, msg, IXGBE_VFMAILBOX_SIZE,
599 	    vf->pool);
600 
601 	if (error != 0)
602 		return;
603 
604 	CTR3(KTR_MALLOC, "%s: received msg %x from %d", if_name(ifp),
605 	    msg[0], vf->pool);
606 	if (msg[0] == IXGBE_VF_RESET) {
607 		ixgbe_vf_reset_msg(sc, vf, msg);
608 		return;
609 	}
610 
611 	if (!(vf->flags & IXGBE_VF_CTS)) {
612 		ixgbe_send_vf_success(sc, vf, msg[0]);
613 		return;
614 	}
615 
616 	switch (msg[0] & IXGBE_VT_MSG_MASK) {
617 	case IXGBE_VF_SET_MAC_ADDR:
618 		ixgbe_vf_set_mac(sc, vf, msg);
619 		break;
620 	case IXGBE_VF_SET_MULTICAST:
621 		ixgbe_vf_set_mc_addr(sc, vf, msg);
622 		break;
623 	case IXGBE_VF_SET_VLAN:
624 		ixgbe_vf_set_vlan(sc, vf, msg);
625 		break;
626 	case IXGBE_VF_SET_LPE:
627 		ixgbe_vf_set_lpe(sc, vf, msg);
628 		break;
629 	case IXGBE_VF_SET_MACVLAN:
630 		ixgbe_vf_set_macvlan(sc, vf, msg);
631 		break;
632 	case IXGBE_VF_API_NEGOTIATE:
633 		ixgbe_vf_api_negotiate(sc, vf, msg);
634 		break;
635 	case IXGBE_VF_GET_QUEUES:
636 		ixgbe_vf_get_queues(sc, vf, msg);
637 		break;
638 	default:
639 		ixgbe_send_vf_failure(sc, vf, msg[0]);
640 	}
641 } /* ixgbe_process_vf_msg */
642 
643 
644 /* Tasklet for handling VF -> PF mailbox messages */
645 void
646 ixgbe_handle_mbx(void *context)
647 {
648 	if_ctx_t        ctx = context;
649 	struct ixgbe_softc  *sc = iflib_get_softc(ctx);
650 	struct ixgbe_hw *hw;
651 	struct ixgbe_vf *vf;
652 	int i;
653 
654 	hw = &sc->hw;
655 
656 	for (i = 0; i < sc->num_vfs; i++) {
657 		vf = &sc->vfs[i];
658 
659 		if (vf->flags & IXGBE_VF_ACTIVE) {
660 			if (hw->mbx.ops[vf->pool].check_for_rst(hw, vf->pool) == 0)
661 				ixgbe_process_vf_reset(sc, vf);
662 
663 			if (hw->mbx.ops[vf->pool].check_for_msg(hw, vf->pool) == 0)
664 				ixgbe_process_vf_msg(ctx, vf);
665 
666 			if (hw->mbx.ops[vf->pool].check_for_ack(hw, vf->pool) == 0)
667 				ixgbe_process_vf_ack(sc, vf);
668 		}
669 	}
670 } /* ixgbe_handle_mbx */
671 
672 int
673 ixgbe_if_iov_init(if_ctx_t ctx, u16 num_vfs, const nvlist_t *config)
674 {
675 	struct ixgbe_softc *sc;
676 	int retval = 0;
677 
678 	sc = iflib_get_softc(ctx);
679 	sc->iov_mode = IXGBE_NO_VM;
680 
681 	if (num_vfs == 0) {
682 		/* Would we ever get num_vfs = 0? */
683 		retval = EINVAL;
684 		goto err_init_iov;
685 	}
686 
687 	/*
688 	 * We've got to reserve a VM's worth of queues for the PF,
689 	 * thus we go into "64 VF mode" if 32+ VFs are requested.
690 	 * With 64 VFs, you can only have two queues per VF.
691 	 * With 32 VFs, you can have up to four queues per VF.
692 	 */
693 	if (num_vfs >= IXGBE_32_VM)
694 		sc->iov_mode = IXGBE_64_VM;
695 	else
696 		sc->iov_mode = IXGBE_32_VM;
697 
698 	/* Again, reserving 1 VM's worth of queues for the PF */
699 	sc->pool = sc->iov_mode - 1;
700 
701 	if ((num_vfs > sc->pool) || (num_vfs >= IXGBE_64_VM)) {
702 		retval = ENOSPC;
703 		goto err_init_iov;
704 	}
705 
706 	sc->vfs = malloc(sizeof(*sc->vfs) * num_vfs, M_IXGBE_SRIOV,
707 	    M_NOWAIT | M_ZERO);
708 
709 	if (sc->vfs == NULL) {
710 		retval = ENOMEM;
711 		goto err_init_iov;
712 	}
713 
714 	sc->num_vfs = num_vfs;
715 	ixgbe_init_mbx_params_pf(&sc->hw);
716 
717 	sc->feat_en |= IXGBE_FEATURE_SRIOV;
718 	ixgbe_if_init(sc->ctx);
719 
720 	return (retval);
721 
722 err_init_iov:
723 	sc->num_vfs = 0;
724 	sc->pool = 0;
725 	sc->iov_mode = IXGBE_NO_VM;
726 
727 	return (retval);
728 } /* ixgbe_if_iov_init */
729 
730 void
731 ixgbe_if_iov_uninit(if_ctx_t ctx)
732 {
733 	struct ixgbe_hw *hw;
734 	struct ixgbe_softc *sc;
735 	uint32_t pf_reg, vf_reg;
736 
737 	sc = iflib_get_softc(ctx);
738 	hw = &sc->hw;
739 
740 	/* Enable rx/tx for the PF and disable it for all VFs. */
741 	pf_reg = IXGBE_VF_INDEX(sc->pool);
742 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(sc->pool));
743 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(sc->pool));
744 
745 	if (pf_reg == 0)
746 		vf_reg = 1;
747 	else
748 		vf_reg = 0;
749 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
750 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
751 
752 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
753 
754 	free(sc->vfs, M_IXGBE_SRIOV);
755 	sc->vfs = NULL;
756 	sc->num_vfs = 0;
757 	sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
758 } /* ixgbe_if_iov_uninit */
759 
760 static void
761 ixgbe_init_vf(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
762 {
763 	struct ixgbe_hw *hw;
764 	uint32_t vf_index, pfmbimr;
765 
766 	hw = &sc->hw;
767 
768 	if (!(vf->flags & IXGBE_VF_ACTIVE))
769 		return;
770 
771 	vf_index = IXGBE_VF_INDEX(vf->pool);
772 	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
773 	pfmbimr |= IXGBE_VF_BIT(vf->pool);
774 	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
775 
776 	ixgbe_vf_set_default_vlan(sc, vf, vf->vlan_tag);
777 
778 	// XXX multicast addresses
779 
780 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
781 		ixgbe_set_rar(&sc->hw, vf->rar_index,
782 		    vf->ether_addr, vf->pool, true);
783 	}
784 
785 	ixgbe_vf_enable_transmit(sc, vf);
786 	ixgbe_vf_enable_receive(sc, vf);
787 
788 	ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
789 } /* ixgbe_init_vf */
790 
791 void
792 ixgbe_initialize_iov(struct ixgbe_softc *sc)
793 {
794 	struct ixgbe_hw *hw = &sc->hw;
795 	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
796 	int i;
797 
798 	if (sc->iov_mode == IXGBE_NO_VM)
799 		return;
800 
801 	/* RMW appropriate registers based on IOV mode */
802 	/* Read... */
803 	mrqc    = IXGBE_READ_REG(hw, IXGBE_MRQC);
804 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
805 	gpie    = IXGBE_READ_REG(hw, IXGBE_GPIE);
806 	/* Modify... */
807 	mrqc    &= ~IXGBE_MRQC_MRQE_MASK;
808 	mtqc     =  IXGBE_MTQC_VT_ENA;      /* No initial MTQC read needed */
809 	gcr_ext |=  IXGBE_GCR_EXT_MSIX_EN;
810 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
811 	gpie    &= ~IXGBE_GPIE_VTMODE_MASK;
812 	switch (sc->iov_mode) {
813 	case IXGBE_64_VM:
814 		mrqc    |= IXGBE_MRQC_VMDQRSS64EN;
815 		mtqc    |= IXGBE_MTQC_64VF;
816 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
817 		gpie    |= IXGBE_GPIE_VTMODE_64;
818 		break;
819 	case IXGBE_32_VM:
820 		mrqc    |= IXGBE_MRQC_VMDQRSS32EN;
821 		mtqc    |= IXGBE_MTQC_32VF;
822 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
823 		gpie    |= IXGBE_GPIE_VTMODE_32;
824 		break;
825 	default:
826 		panic("Unexpected SR-IOV mode %d", sc->iov_mode);
827 	}
828 	/* Write... */
829 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
830 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
831 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
832 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
833 
834 	/* Enable rx/tx for the PF. */
835 	vf_reg = IXGBE_VF_INDEX(sc->pool);
836 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(sc->pool));
837 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(sc->pool));
838 
839 	/* Allow VM-to-VM communication. */
840 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
841 
842 	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
843 	vt_ctl |= (sc->pool << IXGBE_VT_CTL_POOL_SHIFT);
844 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
845 
846 	for (i = 0; i < sc->num_vfs; i++)
847 		ixgbe_init_vf(sc, &sc->vfs[i]);
848 } /* ixgbe_initialize_iov */
849 
850 
851 /* Check the max frame setting of all active VF's */
852 void
853 ixgbe_recalculate_max_frame(struct ixgbe_softc *sc)
854 {
855 	struct ixgbe_vf *vf;
856 
857 	for (int i = 0; i < sc->num_vfs; i++) {
858 		vf = &sc->vfs[i];
859 		if (vf->flags & IXGBE_VF_ACTIVE)
860 			ixgbe_update_max_frame(sc, vf->maximum_frame_size);
861 	}
862 } /* ixgbe_recalculate_max_frame */
863 
864 int
865 ixgbe_if_iov_vf_add(if_ctx_t ctx, u16 vfnum, const nvlist_t *config)
866 {
867 	struct ixgbe_softc *sc;
868 	struct ixgbe_vf *vf;
869 	const void *mac;
870 
871 	sc = iflib_get_softc(ctx);
872 
873 	KASSERT(vfnum < sc->num_vfs, ("VF index %d is out of range %d",
874 	    vfnum, sc->num_vfs));
875 
876 	vf = &sc->vfs[vfnum];
877 	vf->pool= vfnum;
878 
879 	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
880 	vf->rar_index = vfnum + 1;
881 	vf->default_vlan = 0;
882 	vf->maximum_frame_size = ETHER_MAX_LEN;
883 	ixgbe_update_max_frame(sc, vf->maximum_frame_size);
884 
885 	if (nvlist_exists_binary(config, "mac-addr")) {
886 		mac = nvlist_get_binary(config, "mac-addr", NULL);
887 		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
888 		if (nvlist_get_bool(config, "allow-set-mac"))
889 			vf->flags |= IXGBE_VF_CAP_MAC;
890 	} else
891 		/*
892 		 * If the administrator has not specified a MAC address then
893 		 * we must allow the VF to choose one.
894 		 */
895 		vf->flags |= IXGBE_VF_CAP_MAC;
896 
897 	vf->flags |= IXGBE_VF_ACTIVE;
898 
899 	ixgbe_init_vf(sc, vf);
900 
901 	return (0);
902 } /* ixgbe_if_iov_vf_add */
903 
904 #else
905 
906 void
907 ixgbe_handle_mbx(void *context)
908 {
909 	UNREFERENCED_PARAMETER(context);
910 } /* ixgbe_handle_mbx */
911 
912 #endif
913