xref: /linux/drivers/net/ethernet/intel/fm10k/fm10k_iov.c (revision 4949009eb8d40a441dcddcd96e101e77d31cf1b2)
1 /* Intel Ethernet Switch Host Interface Driver
2  * Copyright(c) 2013 - 2014 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * The full GNU General Public License is included in this distribution in
14  * the file called "COPYING".
15  *
16  * Contact Information:
17  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
19  */
20 
21 #include "fm10k.h"
22 #include "fm10k_vf.h"
23 #include "fm10k_pf.h"
24 
25 static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results,
26 			       struct fm10k_mbx_info *mbx)
27 {
28 	struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
29 	struct fm10k_intfc *interface = hw->back;
30 	struct pci_dev *pdev = interface->pdev;
31 
32 	dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n",
33 		**results & FM10K_TLV_ID_MASK, vf_info->vf_idx);
34 
35 	return fm10k_tlv_msg_error(hw, results, mbx);
36 }
37 
38 static const struct fm10k_msg_data iov_mbx_data[] = {
39 	FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
40 	FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
41 	FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf),
42 	FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
43 	FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error),
44 };
45 
46 s32 fm10k_iov_event(struct fm10k_intfc *interface)
47 {
48 	struct fm10k_hw *hw = &interface->hw;
49 	struct fm10k_iov_data *iov_data;
50 	s64 mbicr, vflre;
51 	int i;
52 
53 	/* if there is no iov_data then there is no mailboxes to process */
54 	if (!ACCESS_ONCE(interface->iov_data))
55 		return 0;
56 
57 	rcu_read_lock();
58 
59 	iov_data = interface->iov_data;
60 
61 	/* check again now that we are in the RCU block */
62 	if (!iov_data)
63 		goto read_unlock;
64 
65 	if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR))
66 		goto process_mbx;
67 
68 	/* read VFLRE to determine if any VFs have been reset */
69 	do {
70 		vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(0));
71 		vflre <<= 32;
72 		vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(1));
73 		vflre = (vflre << 32) | (vflre >> 32);
74 		vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0));
75 
76 		i = iov_data->num_vfs;
77 
78 		for (vflre <<= 64 - i; vflre && i--; vflre += vflre) {
79 			struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
80 
81 			if (vflre >= 0)
82 				continue;
83 
84 			hw->iov.ops.reset_resources(hw, vf_info);
85 			vf_info->mbx.ops.connect(hw, &vf_info->mbx);
86 		}
87 	} while (i != iov_data->num_vfs);
88 
89 process_mbx:
90 	/* read MBICR to determine which VFs require attention */
91 	mbicr = fm10k_read_reg(hw, FM10K_MBICR(1));
92 	mbicr <<= 32;
93 	mbicr |= fm10k_read_reg(hw, FM10K_MBICR(0));
94 
95 	i = iov_data->next_vf_mbx ? : iov_data->num_vfs;
96 
97 	for (mbicr <<= 64 - i; i--; mbicr += mbicr) {
98 		struct fm10k_mbx_info *mbx = &iov_data->vf_info[i].mbx;
99 
100 		if (mbicr >= 0)
101 			continue;
102 
103 		if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU))
104 			break;
105 
106 		mbx->ops.process(hw, mbx);
107 	}
108 
109 	if (i >= 0) {
110 		iov_data->next_vf_mbx = i + 1;
111 	} else if (iov_data->next_vf_mbx) {
112 		iov_data->next_vf_mbx = 0;
113 		goto process_mbx;
114 	}
115 read_unlock:
116 	rcu_read_unlock();
117 
118 	return 0;
119 }
120 
121 s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
122 {
123 	struct fm10k_hw *hw = &interface->hw;
124 	struct fm10k_iov_data *iov_data;
125 	int i;
126 
127 	/* if there is no iov_data then there is no mailboxes to process */
128 	if (!ACCESS_ONCE(interface->iov_data))
129 		return 0;
130 
131 	rcu_read_lock();
132 
133 	iov_data = interface->iov_data;
134 
135 	/* check again now that we are in the RCU block */
136 	if (!iov_data)
137 		goto read_unlock;
138 
139 	/* lock the mailbox for transmit and receive */
140 	fm10k_mbx_lock(interface);
141 
142 process_mbx:
143 	for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) {
144 		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
145 		struct fm10k_mbx_info *mbx = &vf_info->mbx;
146 		u16 glort = vf_info->glort;
147 
148 		/* verify port mapping is valid, if not reset port */
149 		if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort))
150 			hw->iov.ops.reset_lport(hw, vf_info);
151 
152 		/* reset VFs that have mailbox timed out */
153 		if (!mbx->timeout) {
154 			hw->iov.ops.reset_resources(hw, vf_info);
155 			mbx->ops.connect(hw, mbx);
156 		}
157 
158 		/* no work pending, then just continue */
159 		if (mbx->ops.tx_complete(mbx) && !mbx->ops.rx_ready(mbx))
160 			continue;
161 
162 		/* guarantee we have free space in the SM mailbox */
163 		if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU))
164 			break;
165 
166 		/* cleanup mailbox and process received messages */
167 		mbx->ops.process(hw, mbx);
168 	}
169 
170 	if (i >= 0) {
171 		iov_data->next_vf_mbx = i + 1;
172 	} else if (iov_data->next_vf_mbx) {
173 		iov_data->next_vf_mbx = 0;
174 		goto process_mbx;
175 	}
176 
177 	/* free the lock */
178 	fm10k_mbx_unlock(interface);
179 
180 read_unlock:
181 	rcu_read_unlock();
182 
183 	return 0;
184 }
185 
186 void fm10k_iov_suspend(struct pci_dev *pdev)
187 {
188 	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
189 	struct fm10k_iov_data *iov_data = interface->iov_data;
190 	struct fm10k_hw *hw = &interface->hw;
191 	int num_vfs, i;
192 
193 	/* pull out num_vfs from iov_data */
194 	num_vfs = iov_data ? iov_data->num_vfs : 0;
195 
196 	/* shut down queue mapping for VFs */
197 	fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss),
198 			FM10K_DGLORTMAP_NONE);
199 
200 	/* Stop any active VFs and reset their resources */
201 	for (i = 0; i < num_vfs; i++) {
202 		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
203 
204 		hw->iov.ops.reset_resources(hw, vf_info);
205 		hw->iov.ops.reset_lport(hw, vf_info);
206 	}
207 }
208 
209 int fm10k_iov_resume(struct pci_dev *pdev)
210 {
211 	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
212 	struct fm10k_iov_data *iov_data = interface->iov_data;
213 	struct fm10k_dglort_cfg dglort = { 0 };
214 	struct fm10k_hw *hw = &interface->hw;
215 	int num_vfs, i;
216 
217 	/* pull out num_vfs from iov_data */
218 	num_vfs = iov_data ? iov_data->num_vfs : 0;
219 
220 	/* return error if iov_data is not already populated */
221 	if (!iov_data)
222 		return -ENOMEM;
223 
224 	/* allocate hardware resources for the VFs */
225 	hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
226 
227 	/* configure DGLORT mapping for RSS */
228 	dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
229 	dglort.idx = fm10k_dglort_vf_rss;
230 	dglort.inner_rss = 1;
231 	dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1);
232 	dglort.queue_b = fm10k_vf_queue_index(hw, 0);
233 	dglort.vsi_l = fls(hw->iov.total_vfs - 1);
234 	dglort.vsi_b = 1;
235 
236 	hw->mac.ops.configure_dglort_map(hw, &dglort);
237 
238 	/* assign resources to the device */
239 	for (i = 0; i < num_vfs; i++) {
240 		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
241 
242 		/* allocate all but the last GLORT to the VFs */
243 		if (i == ((~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT))
244 			break;
245 
246 		/* assign GLORT to VF, and restrict it to multicast */
247 		hw->iov.ops.set_lport(hw, vf_info, i,
248 				      FM10K_VF_FLAG_MULTI_CAPABLE);
249 
250 		/* assign our default vid to the VF following reset */
251 		vf_info->sw_vid = hw->mac.default_vid;
252 
253 		/* mailbox is disconnected so we don't send a message */
254 		hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
255 
256 		/* now we are ready so we can connect */
257 		vf_info->mbx.ops.connect(hw, &vf_info->mbx);
258 	}
259 
260 	return 0;
261 }
262 
263 s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
264 {
265 	struct fm10k_iov_data *iov_data = interface->iov_data;
266 	struct fm10k_hw *hw = &interface->hw;
267 	struct fm10k_vf_info *vf_info;
268 	u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE;
269 
270 	/* no IOV support, not our message to process */
271 	if (!iov_data)
272 		return FM10K_ERR_PARAM;
273 
274 	/* glort outside our range, not our message to process */
275 	if (vf_idx >= iov_data->num_vfs)
276 		return FM10K_ERR_PARAM;
277 
278 	/* determine if an update has occured and if so notify the VF */
279 	vf_info = &iov_data->vf_info[vf_idx];
280 	if (vf_info->sw_vid != pvid) {
281 		vf_info->sw_vid = pvid;
282 		hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
283 	}
284 
285 	return 0;
286 }
287 
288 static void fm10k_iov_free_data(struct pci_dev *pdev)
289 {
290 	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
291 
292 	if (!interface->iov_data)
293 		return;
294 
295 	/* reclaim hardware resources */
296 	fm10k_iov_suspend(pdev);
297 
298 	/* drop iov_data from interface */
299 	kfree_rcu(interface->iov_data, rcu);
300 	interface->iov_data = NULL;
301 }
302 
303 static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
304 {
305 	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
306 	struct fm10k_iov_data *iov_data = interface->iov_data;
307 	struct fm10k_hw *hw = &interface->hw;
308 	size_t size;
309 	int i, err;
310 
311 	/* return error if iov_data is already populated */
312 	if (iov_data)
313 		return -EBUSY;
314 
315 	/* The PF should always be able to assign resources */
316 	if (!hw->iov.ops.assign_resources)
317 		return -ENODEV;
318 
319 	/* nothing to do if no VFs are requested */
320 	if (!num_vfs)
321 		return 0;
322 
323 	/* allocate memory for VF storage */
324 	size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]);
325 	iov_data = kzalloc(size, GFP_KERNEL);
326 	if (!iov_data)
327 		return -ENOMEM;
328 
329 	/* record number of VFs */
330 	iov_data->num_vfs = num_vfs;
331 
332 	/* loop through vf_info structures initializing each entry */
333 	for (i = 0; i < num_vfs; i++) {
334 		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
335 
336 		/* Record VF VSI value */
337 		vf_info->vsi = i + 1;
338 		vf_info->vf_idx = i;
339 
340 		/* initialize mailbox memory */
341 		err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i);
342 		if (err) {
343 			dev_err(&pdev->dev,
344 				"Unable to initialize SR-IOV mailbox\n");
345 			kfree(iov_data);
346 			return err;
347 		}
348 	}
349 
350 	/* assign iov_data to interface */
351 	interface->iov_data = iov_data;
352 
353 	/* allocate hardware resources for the VFs */
354 	fm10k_iov_resume(pdev);
355 
356 	return 0;
357 }
358 
359 void fm10k_iov_disable(struct pci_dev *pdev)
360 {
361 	if (pci_num_vf(pdev) && pci_vfs_assigned(pdev))
362 		dev_err(&pdev->dev,
363 			"Cannot disable SR-IOV while VFs are assigned\n");
364 	else
365 		pci_disable_sriov(pdev);
366 
367 	fm10k_iov_free_data(pdev);
368 }
369 
370 static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev)
371 {
372 	u32 err_sev;
373 	int pos;
374 
375 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
376 	if (!pos)
377 		return;
378 
379 	pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev);
380 	err_sev &= ~PCI_ERR_UNC_COMP_ABORT;
381 	pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev);
382 }
383 
384 int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
385 {
386 	int current_vfs = pci_num_vf(pdev);
387 	int err = 0;
388 
389 	if (current_vfs && pci_vfs_assigned(pdev)) {
390 		dev_err(&pdev->dev,
391 			"Cannot modify SR-IOV while VFs are assigned\n");
392 		num_vfs = current_vfs;
393 	} else {
394 		pci_disable_sriov(pdev);
395 		fm10k_iov_free_data(pdev);
396 	}
397 
398 	/* allocate resources for the VFs */
399 	err = fm10k_iov_alloc_data(pdev, num_vfs);
400 	if (err)
401 		return err;
402 
403 	/* allocate VFs if not already allocated */
404 	if (num_vfs && (num_vfs != current_vfs)) {
405 		/* Disable completer abort error reporting as
406 		 * the VFs can trigger this any time they read a queue
407 		 * that they don't own.
408 		 */
409 		fm10k_disable_aer_comp_abort(pdev);
410 
411 		err = pci_enable_sriov(pdev, num_vfs);
412 		if (err) {
413 			dev_err(&pdev->dev,
414 				"Enable PCI SR-IOV failed: %d\n", err);
415 			return err;
416 		}
417 	}
418 
419 	return num_vfs;
420 }
421 
422 int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
423 {
424 	struct fm10k_intfc *interface = netdev_priv(netdev);
425 	struct fm10k_iov_data *iov_data = interface->iov_data;
426 	struct fm10k_hw *hw = &interface->hw;
427 	struct fm10k_vf_info *vf_info;
428 
429 	/* verify SR-IOV is active and that vf idx is valid */
430 	if (!iov_data || vf_idx >= iov_data->num_vfs)
431 		return -EINVAL;
432 
433 	/* verify MAC addr is valid */
434 	if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac))
435 		return -EINVAL;
436 
437 	/* record new MAC address */
438 	vf_info = &iov_data->vf_info[vf_idx];
439 	ether_addr_copy(vf_info->mac, mac);
440 
441 	/* assigning the MAC will send a mailbox message so lock is needed */
442 	fm10k_mbx_lock(interface);
443 
444 	/* assign MAC address to VF */
445 	hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
446 
447 	fm10k_mbx_unlock(interface);
448 
449 	return 0;
450 }
451 
452 int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
453 			  u8 qos)
454 {
455 	struct fm10k_intfc *interface = netdev_priv(netdev);
456 	struct fm10k_iov_data *iov_data = interface->iov_data;
457 	struct fm10k_hw *hw = &interface->hw;
458 	struct fm10k_vf_info *vf_info;
459 
460 	/* verify SR-IOV is active and that vf idx is valid */
461 	if (!iov_data || vf_idx >= iov_data->num_vfs)
462 		return -EINVAL;
463 
464 	/* QOS is unsupported and VLAN IDs accepted range 0-4094 */
465 	if (qos || (vid > (VLAN_VID_MASK - 1)))
466 		return -EINVAL;
467 
468 	vf_info = &iov_data->vf_info[vf_idx];
469 
470 	/* exit if there is nothing to do */
471 	if (vf_info->pf_vid == vid)
472 		return 0;
473 
474 	/* record default VLAN ID for VF */
475 	vf_info->pf_vid = vid;
476 
477 	/* assigning the VLAN will send a mailbox message so lock is needed */
478 	fm10k_mbx_lock(interface);
479 
480 	/* Clear the VLAN table for the VF */
481 	hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
482 
483 	/* Update VF assignment and trigger reset */
484 	hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
485 
486 	fm10k_mbx_unlock(interface);
487 
488 	return 0;
489 }
490 
491 int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int unused,
492 			int rate)
493 {
494 	struct fm10k_intfc *interface = netdev_priv(netdev);
495 	struct fm10k_iov_data *iov_data = interface->iov_data;
496 	struct fm10k_hw *hw = &interface->hw;
497 
498 	/* verify SR-IOV is active and that vf idx is valid */
499 	if (!iov_data || vf_idx >= iov_data->num_vfs)
500 		return -EINVAL;
501 
502 	/* rate limit cannot be less than 10Mbs or greater than link speed */
503 	if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX))
504 		return -EINVAL;
505 
506 	/* store values */
507 	iov_data->vf_info[vf_idx].rate = rate;
508 
509 	/* update hardware configuration */
510 	hw->iov.ops.configure_tc(hw, vf_idx, rate);
511 
512 	return 0;
513 }
514 
515 int fm10k_ndo_get_vf_config(struct net_device *netdev,
516 			    int vf_idx, struct ifla_vf_info *ivi)
517 {
518 	struct fm10k_intfc *interface = netdev_priv(netdev);
519 	struct fm10k_iov_data *iov_data = interface->iov_data;
520 	struct fm10k_vf_info *vf_info;
521 
522 	/* verify SR-IOV is active and that vf idx is valid */
523 	if (!iov_data || vf_idx >= iov_data->num_vfs)
524 		return -EINVAL;
525 
526 	vf_info = &iov_data->vf_info[vf_idx];
527 
528 	ivi->vf = vf_idx;
529 	ivi->max_tx_rate = vf_info->rate;
530 	ivi->min_tx_rate = 0;
531 	ether_addr_copy(ivi->mac, vf_info->mac);
532 	ivi->vlan = vf_info->pf_vid;
533 	ivi->qos = 0;
534 
535 	return 0;
536 }
537