1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2018 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/ethtool.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_vlan.h>
16 #include <linux/interrupt.h>
17 #include <linux/etherdevice.h>
18 #include <net/dcbnl.h>
19 #include <linux/bnxt/hsi.h>
20 #include "bnxt.h"
21 #include "bnxt_hwrm.h"
22 #include "bnxt_ulp.h"
23 #include "bnxt_sriov.h"
24 #include "bnxt_vfr.h"
25 #include "bnxt_ethtool.h"
26
27 #ifdef CONFIG_BNXT_SRIOV
bnxt_hwrm_fwd_async_event_cmpl(struct bnxt * bp,struct bnxt_vf_info * vf,u16 event_id)28 static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
29 struct bnxt_vf_info *vf, u16 event_id)
30 {
31 struct hwrm_fwd_async_event_cmpl_input *req;
32 struct hwrm_async_event_cmpl *async_cmpl;
33 int rc = 0;
34
35 rc = hwrm_req_init(bp, req, HWRM_FWD_ASYNC_EVENT_CMPL);
36 if (rc)
37 goto exit;
38
39 if (vf)
40 req->encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
41 else
42 /* broadcast this async event to all VFs */
43 req->encap_async_event_target_id = cpu_to_le16(0xffff);
44 async_cmpl =
45 (struct hwrm_async_event_cmpl *)req->encap_async_event_cmpl;
46 async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
47 async_cmpl->event_id = cpu_to_le16(event_id);
48
49 rc = hwrm_req_send(bp, req);
50 exit:
51 if (rc)
52 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
53 rc);
54 return rc;
55 }
56
bnxt_vf_ndo_prep(struct bnxt * bp,int vf_id)57 static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
58 {
59 if (!bp->pf.active_vfs) {
60 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
61 return -EINVAL;
62 }
63 if (vf_id >= bp->pf.active_vfs) {
64 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
65 return -EINVAL;
66 }
67 return 0;
68 }
69
bnxt_set_vf_spoofchk(struct net_device * dev,int vf_id,bool setting)70 int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
71 {
72 struct bnxt *bp = netdev_priv(dev);
73 struct hwrm_func_cfg_input *req;
74 bool old_setting = false;
75 struct bnxt_vf_info *vf;
76 u32 func_flags;
77 int rc;
78
79 if (bp->hwrm_spec_code < 0x10701)
80 return -ENOTSUPP;
81
82 rc = bnxt_vf_ndo_prep(bp, vf_id);
83 if (rc)
84 return rc;
85
86 vf = &bp->pf.vf[vf_id];
87 if (vf->flags & BNXT_VF_SPOOFCHK)
88 old_setting = true;
89 if (old_setting == setting)
90 return 0;
91
92 if (setting)
93 func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
94 else
95 func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
96 /*TODO: if the driver supports VLAN filter on guest VLAN,
97 * the spoof check should also include vlan anti-spoofing
98 */
99 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
100 if (!rc) {
101 req->fid = cpu_to_le16(vf->fw_fid);
102 req->flags = cpu_to_le32(func_flags);
103 rc = hwrm_req_send(bp, req);
104 if (!rc) {
105 if (setting)
106 vf->flags |= BNXT_VF_SPOOFCHK;
107 else
108 vf->flags &= ~BNXT_VF_SPOOFCHK;
109 }
110 }
111 return rc;
112 }
113
bnxt_hwrm_func_qcfg_flags(struct bnxt * bp,struct bnxt_vf_info * vf)114 static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
115 {
116 struct hwrm_func_qcfg_output *resp;
117 struct hwrm_func_qcfg_input *req;
118 int rc;
119
120 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
121 if (rc)
122 return rc;
123
124 req->fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff);
125 resp = hwrm_req_hold(bp, req);
126 rc = hwrm_req_send(bp, req);
127 if (!rc)
128 vf->func_qcfg_flags = le16_to_cpu(resp->flags);
129 hwrm_req_drop(bp, req);
130 return rc;
131 }
132
bnxt_is_trusted_vf(struct bnxt * bp,struct bnxt_vf_info * vf)133 bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
134 {
135 if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
136 return !!(vf->flags & BNXT_VF_TRUST);
137
138 bnxt_hwrm_func_qcfg_flags(bp, vf);
139 return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF);
140 }
141
bnxt_hwrm_set_trusted_vf(struct bnxt * bp,struct bnxt_vf_info * vf)142 static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
143 {
144 struct hwrm_func_cfg_input *req;
145 int rc;
146
147 if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
148 return 0;
149
150 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
151 if (rc)
152 return rc;
153
154 req->fid = cpu_to_le16(vf->fw_fid);
155 if (vf->flags & BNXT_VF_TRUST)
156 req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
157 else
158 req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
159 return hwrm_req_send(bp, req);
160 }
161
bnxt_set_vf_trust(struct net_device * dev,int vf_id,bool trusted)162 int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
163 {
164 struct bnxt *bp = netdev_priv(dev);
165 struct bnxt_vf_info *vf;
166
167 if (bnxt_vf_ndo_prep(bp, vf_id))
168 return -EINVAL;
169
170 vf = &bp->pf.vf[vf_id];
171 if (trusted)
172 vf->flags |= BNXT_VF_TRUST;
173 else
174 vf->flags &= ~BNXT_VF_TRUST;
175
176 bnxt_hwrm_set_trusted_vf(bp, vf);
177 return 0;
178 }
179
bnxt_get_vf_config(struct net_device * dev,int vf_id,struct ifla_vf_info * ivi)180 int bnxt_get_vf_config(struct net_device *dev, int vf_id,
181 struct ifla_vf_info *ivi)
182 {
183 struct bnxt *bp = netdev_priv(dev);
184 struct bnxt_vf_info *vf;
185 int rc;
186
187 rc = bnxt_vf_ndo_prep(bp, vf_id);
188 if (rc)
189 return rc;
190
191 ivi->vf = vf_id;
192 vf = &bp->pf.vf[vf_id];
193
194 if (is_valid_ether_addr(vf->mac_addr))
195 memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
196 else
197 memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
198 ivi->max_tx_rate = vf->max_tx_rate;
199 ivi->min_tx_rate = vf->min_tx_rate;
200 ivi->vlan = vf->vlan & VLAN_VID_MASK;
201 ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
202 ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
203 ivi->trusted = bnxt_is_trusted_vf(bp, vf);
204 if (!(vf->flags & BNXT_VF_LINK_FORCED))
205 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
206 else if (vf->flags & BNXT_VF_LINK_UP)
207 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
208 else
209 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
210
211 return 0;
212 }
213
bnxt_set_vf_mac(struct net_device * dev,int vf_id,u8 * mac)214 int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
215 {
216 struct bnxt *bp = netdev_priv(dev);
217 struct hwrm_func_cfg_input *req;
218 struct bnxt_vf_info *vf;
219 int rc;
220
221 rc = bnxt_vf_ndo_prep(bp, vf_id);
222 if (rc)
223 return rc;
224 /* reject bc or mc mac addr, zero mac addr means allow
225 * VF to use its own mac addr
226 */
227 if (is_multicast_ether_addr(mac)) {
228 netdev_err(dev, "Invalid VF ethernet address\n");
229 return -EINVAL;
230 }
231 vf = &bp->pf.vf[vf_id];
232
233 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
234 if (rc)
235 return rc;
236
237 memcpy(vf->mac_addr, mac, ETH_ALEN);
238
239 req->fid = cpu_to_le16(vf->fw_fid);
240 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
241 memcpy(req->dflt_mac_addr, mac, ETH_ALEN);
242 return hwrm_req_send(bp, req);
243 }
244
bnxt_set_vf_vlan(struct net_device * dev,int vf_id,u16 vlan_id,u8 qos,__be16 vlan_proto)245 int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
246 __be16 vlan_proto)
247 {
248 struct bnxt *bp = netdev_priv(dev);
249 struct hwrm_func_cfg_input *req;
250 struct bnxt_vf_info *vf;
251 u16 vlan_tag;
252 int rc;
253
254 if (bp->hwrm_spec_code < 0x10201)
255 return -ENOTSUPP;
256
257 if (vlan_proto != htons(ETH_P_8021Q) &&
258 (vlan_proto != htons(ETH_P_8021AD) ||
259 !(bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP)))
260 return -EPROTONOSUPPORT;
261
262 rc = bnxt_vf_ndo_prep(bp, vf_id);
263 if (rc)
264 return rc;
265
266 if (vlan_id >= VLAN_N_VID || qos >= IEEE_8021Q_MAX_PRIORITIES ||
267 (!vlan_id && qos))
268 return -EINVAL;
269
270 vf = &bp->pf.vf[vf_id];
271 vlan_tag = vlan_id | (u16)qos << VLAN_PRIO_SHIFT;
272 if (vlan_tag == vf->vlan)
273 return 0;
274
275 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
276 if (!rc) {
277 req->fid = cpu_to_le16(vf->fw_fid);
278 req->dflt_vlan = cpu_to_le16(vlan_tag);
279 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
280 if (bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP) {
281 req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_TPID);
282 req->tpid = vlan_proto;
283 }
284 rc = hwrm_req_send(bp, req);
285 if (!rc)
286 vf->vlan = vlan_tag;
287 }
288 return rc;
289 }
290
bnxt_set_vf_bw(struct net_device * dev,int vf_id,int min_tx_rate,int max_tx_rate)291 int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
292 int max_tx_rate)
293 {
294 struct bnxt *bp = netdev_priv(dev);
295 struct hwrm_func_cfg_input *req;
296 struct bnxt_vf_info *vf;
297 u32 pf_link_speed;
298 int rc;
299
300 rc = bnxt_vf_ndo_prep(bp, vf_id);
301 if (rc)
302 return rc;
303
304 vf = &bp->pf.vf[vf_id];
305 pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
306 if (max_tx_rate > pf_link_speed) {
307 netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
308 max_tx_rate, vf_id);
309 return -EINVAL;
310 }
311
312 if (min_tx_rate > pf_link_speed) {
313 netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
314 min_tx_rate, vf_id);
315 return -EINVAL;
316 }
317 if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
318 return 0;
319 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
320 if (!rc) {
321 req->fid = cpu_to_le16(vf->fw_fid);
322 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
323 FUNC_CFG_REQ_ENABLES_MIN_BW);
324 req->max_bw = cpu_to_le32(max_tx_rate);
325 req->min_bw = cpu_to_le32(min_tx_rate);
326 rc = hwrm_req_send(bp, req);
327 if (!rc) {
328 vf->min_tx_rate = min_tx_rate;
329 vf->max_tx_rate = max_tx_rate;
330 }
331 }
332 return rc;
333 }
334
bnxt_set_vf_link_admin_state(struct bnxt * bp,int vf_id)335 static int bnxt_set_vf_link_admin_state(struct bnxt *bp, int vf_id)
336 {
337 struct hwrm_func_cfg_input *req;
338 struct bnxt_vf_info *vf;
339 int rc;
340
341 if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN))
342 return 0;
343
344 vf = &bp->pf.vf[vf_id];
345
346 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
347 if (rc)
348 return rc;
349
350 req->fid = cpu_to_le16(vf->fw_fid);
351 switch (vf->flags & (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP)) {
352 case BNXT_VF_LINK_FORCED:
353 req->options =
354 FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN;
355 break;
356 case (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP):
357 req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP;
358 break;
359 default:
360 req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO;
361 break;
362 }
363 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE);
364 return hwrm_req_send(bp, req);
365 }
366
bnxt_set_vf_link_state(struct net_device * dev,int vf_id,int link)367 int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
368 {
369 struct bnxt *bp = netdev_priv(dev);
370 struct bnxt_vf_info *vf;
371 int rc;
372
373 rc = bnxt_vf_ndo_prep(bp, vf_id);
374 if (rc)
375 return rc;
376
377 vf = &bp->pf.vf[vf_id];
378
379 vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
380 switch (link) {
381 case IFLA_VF_LINK_STATE_AUTO:
382 vf->flags |= BNXT_VF_LINK_UP;
383 break;
384 case IFLA_VF_LINK_STATE_DISABLE:
385 vf->flags |= BNXT_VF_LINK_FORCED;
386 break;
387 case IFLA_VF_LINK_STATE_ENABLE:
388 vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
389 break;
390 default:
391 netdev_err(bp->dev, "Invalid link option\n");
392 return -EINVAL;
393 }
394 if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)
395 rc = bnxt_set_vf_link_admin_state(bp, vf_id);
396 else if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
397 rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
398 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
399 return rc;
400 }
401
bnxt_set_vf_attr(struct bnxt * bp,int num_vfs)402 static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
403 {
404 int i;
405 struct bnxt_vf_info *vf;
406
407 for (i = 0; i < num_vfs; i++) {
408 vf = &bp->pf.vf[i];
409 memset(vf, 0, sizeof(*vf));
410 }
411 return 0;
412 }
413
bnxt_hwrm_func_vf_resource_free(struct bnxt * bp,int num_vfs)414 static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
415 {
416 struct hwrm_func_vf_resc_free_input *req;
417 struct bnxt_pf_info *pf = &bp->pf;
418 int i, rc;
419
420 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESC_FREE);
421 if (rc)
422 return rc;
423
424 hwrm_req_hold(bp, req);
425 for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
426 req->vf_id = cpu_to_le16(i);
427 rc = hwrm_req_send(bp, req);
428 if (rc)
429 break;
430 }
431 hwrm_req_drop(bp, req);
432 return rc;
433 }
434
bnxt_free_vf_resources(struct bnxt * bp)435 static void bnxt_free_vf_resources(struct bnxt *bp)
436 {
437 struct pci_dev *pdev = bp->pdev;
438 int i;
439
440 kfree(bp->pf.vf_event_bmap);
441 bp->pf.vf_event_bmap = NULL;
442
443 for (i = 0; i < 4; i++) {
444 if (bp->pf.hwrm_cmd_req_addr[i]) {
445 dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
446 bp->pf.hwrm_cmd_req_addr[i],
447 bp->pf.hwrm_cmd_req_dma_addr[i]);
448 bp->pf.hwrm_cmd_req_addr[i] = NULL;
449 }
450 }
451
452 bp->pf.active_vfs = 0;
453 kfree(bp->pf.vf);
454 bp->pf.vf = NULL;
455 }
456
bnxt_alloc_vf_resources(struct bnxt * bp,int num_vfs)457 static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
458 {
459 struct pci_dev *pdev = bp->pdev;
460 u32 nr_pages, size, i, j, k = 0;
461
462 bp->pf.vf = kzalloc_objs(struct bnxt_vf_info, num_vfs);
463 if (!bp->pf.vf)
464 return -ENOMEM;
465
466 bnxt_set_vf_attr(bp, num_vfs);
467
468 size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
469 nr_pages = size / BNXT_PAGE_SIZE;
470 if (size & (BNXT_PAGE_SIZE - 1))
471 nr_pages++;
472
473 for (i = 0; i < nr_pages; i++) {
474 bp->pf.hwrm_cmd_req_addr[i] =
475 dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
476 &bp->pf.hwrm_cmd_req_dma_addr[i],
477 GFP_KERNEL);
478
479 if (!bp->pf.hwrm_cmd_req_addr[i])
480 return -ENOMEM;
481
482 for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
483 struct bnxt_vf_info *vf = &bp->pf.vf[k];
484
485 vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
486 j * BNXT_HWRM_REQ_MAX_SIZE;
487 vf->hwrm_cmd_req_dma_addr =
488 bp->pf.hwrm_cmd_req_dma_addr[i] + j *
489 BNXT_HWRM_REQ_MAX_SIZE;
490 k++;
491 }
492 }
493
494 /* Max 128 VF's */
495 bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
496 if (!bp->pf.vf_event_bmap)
497 return -ENOMEM;
498
499 bp->pf.hwrm_cmd_req_pages = nr_pages;
500 return 0;
501 }
502
bnxt_hwrm_func_buf_rgtr(struct bnxt * bp)503 static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
504 {
505 struct hwrm_func_buf_rgtr_input *req;
506 int rc;
507
508 rc = hwrm_req_init(bp, req, HWRM_FUNC_BUF_RGTR);
509 if (rc)
510 return rc;
511
512 req->req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
513 req->req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
514 req->req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
515 req->req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
516 req->req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
517 req->req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
518 req->req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
519
520 return hwrm_req_send(bp, req);
521 }
522
__bnxt_set_vf_params(struct bnxt * bp,int vf_id)523 static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
524 {
525 struct hwrm_func_cfg_input *req;
526 struct bnxt_vf_info *vf;
527 int rc;
528
529 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
530 if (rc)
531 return rc;
532
533 vf = &bp->pf.vf[vf_id];
534 req->fid = cpu_to_le16(vf->fw_fid);
535
536 if (is_valid_ether_addr(vf->mac_addr)) {
537 req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
538 memcpy(req->dflt_mac_addr, vf->mac_addr, ETH_ALEN);
539 }
540 if (vf->vlan) {
541 req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
542 req->dflt_vlan = cpu_to_le16(vf->vlan);
543 }
544 if (vf->max_tx_rate) {
545 req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
546 FUNC_CFG_REQ_ENABLES_MIN_BW);
547 req->max_bw = cpu_to_le32(vf->max_tx_rate);
548 req->min_bw = cpu_to_le32(vf->min_tx_rate);
549 }
550 if (vf->flags & BNXT_VF_TRUST)
551 req->flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
552
553 return hwrm_req_send(bp, req);
554 }
555
bnxt_hwrm_roce_sriov_cfg(struct bnxt * bp,int num_vfs)556 static void bnxt_hwrm_roce_sriov_cfg(struct bnxt *bp, int num_vfs)
557 {
558 struct hwrm_func_qcaps_output *resp;
559 struct hwrm_func_cfg_input *cfg_req;
560 struct hwrm_func_qcaps_input *req;
561 int rc;
562
563 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
564 if (rc)
565 return;
566
567 req->fid = cpu_to_le16(0xffff);
568 resp = hwrm_req_hold(bp, req);
569 rc = hwrm_req_send(bp, req);
570 if (rc)
571 goto err;
572
573 rc = hwrm_req_init(bp, cfg_req, HWRM_FUNC_CFG);
574 if (rc)
575 goto err;
576
577 /* In case of VF Dynamic resource allocation, driver will provision
578 * maximum resources to all the VFs. FW will dynamically allocate
579 * resources to VFs on the fly, so always divide the resources by 1.
580 */
581 if (BNXT_ROCE_VF_DYN_ALLOC_CAP(bp))
582 num_vfs = 1;
583
584 cfg_req->fid = cpu_to_le16(0xffff);
585 cfg_req->enables2 =
586 cpu_to_le32(FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF |
587 FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF |
588 FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF |
589 FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF |
590 FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF |
591 FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF);
592 cfg_req->roce_max_av_per_vf =
593 cpu_to_le32(le32_to_cpu(resp->roce_vf_max_av) / num_vfs);
594 cfg_req->roce_max_cq_per_vf =
595 cpu_to_le32(le32_to_cpu(resp->roce_vf_max_cq) / num_vfs);
596 cfg_req->roce_max_mrw_per_vf =
597 cpu_to_le32(le32_to_cpu(resp->roce_vf_max_mrw) / num_vfs);
598 cfg_req->roce_max_qp_per_vf =
599 cpu_to_le32(le32_to_cpu(resp->roce_vf_max_qp) / num_vfs);
600 cfg_req->roce_max_srq_per_vf =
601 cpu_to_le32(le32_to_cpu(resp->roce_vf_max_srq) / num_vfs);
602 cfg_req->roce_max_gid_per_vf =
603 cpu_to_le32(le32_to_cpu(resp->roce_vf_max_gid) / num_vfs);
604
605 rc = hwrm_req_send(bp, cfg_req);
606
607 err:
608 hwrm_req_drop(bp, req);
609 if (rc)
610 netdev_err(bp->dev, "RoCE sriov configuration failed\n");
611 }
612
613 /* Only called by PF to reserve resources for VFs, returns actual number of
614 * VFs configured, or < 0 on error.
615 */
bnxt_hwrm_func_vf_resc_cfg(struct bnxt * bp,int num_vfs,bool reset)616 static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
617 {
618 struct hwrm_func_vf_resource_cfg_input *req;
619 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
620 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
621 u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
622 struct bnxt_pf_info *pf = &bp->pf;
623 int i, rc = 0, min = 1;
624 u16 vf_msix = 0;
625 u16 vf_rss;
626
627 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG);
628 if (rc)
629 return rc;
630
631 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
632 vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
633 vf_ring_grps = 0;
634 } else {
635 vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
636 }
637 vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp);
638 vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp);
639 if (bp->flags & BNXT_FLAG_AGG_RINGS)
640 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
641 else
642 vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
643 vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
644 vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
645 vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
646
647 req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
648 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
649 min = 0;
650 req->min_rsscos_ctx = cpu_to_le16(min);
651 }
652 if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
653 pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
654 req->min_cmpl_rings = cpu_to_le16(min);
655 req->min_tx_rings = cpu_to_le16(min);
656 req->min_rx_rings = cpu_to_le16(min);
657 req->min_l2_ctxs = cpu_to_le16(min);
658 req->min_vnics = cpu_to_le16(min);
659 req->min_stat_ctx = cpu_to_le16(min);
660 if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
661 req->min_hw_ring_grps = cpu_to_le16(min);
662 } else {
663 vf_cp_rings /= num_vfs;
664 vf_tx_rings /= num_vfs;
665 vf_rx_rings /= num_vfs;
666 if ((bp->fw_cap & BNXT_FW_CAP_PRE_RESV_VNICS) &&
667 vf_vnics >= pf->max_vfs) {
668 /* Take into account that FW has pre-reserved 1 VNIC for
669 * each pf->max_vfs.
670 */
671 vf_vnics = (vf_vnics - pf->max_vfs + num_vfs) / num_vfs;
672 } else {
673 vf_vnics /= num_vfs;
674 }
675 vf_stat_ctx /= num_vfs;
676 vf_ring_grps /= num_vfs;
677 vf_rss /= num_vfs;
678
679 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
680 req->min_cmpl_rings = cpu_to_le16(vf_cp_rings);
681 req->min_tx_rings = cpu_to_le16(vf_tx_rings);
682 req->min_rx_rings = cpu_to_le16(vf_rx_rings);
683 req->min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
684 req->min_vnics = cpu_to_le16(vf_vnics);
685 req->min_stat_ctx = cpu_to_le16(vf_stat_ctx);
686 req->min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
687 req->min_rsscos_ctx = cpu_to_le16(vf_rss);
688 }
689 req->max_cmpl_rings = cpu_to_le16(vf_cp_rings);
690 req->max_tx_rings = cpu_to_le16(vf_tx_rings);
691 req->max_rx_rings = cpu_to_le16(vf_rx_rings);
692 req->max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
693 req->max_vnics = cpu_to_le16(vf_vnics);
694 req->max_stat_ctx = cpu_to_le16(vf_stat_ctx);
695 req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
696 req->max_rsscos_ctx = cpu_to_le16(vf_rss);
697 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
698 req->max_msix = cpu_to_le16(vf_msix / num_vfs);
699
700 hwrm_req_hold(bp, req);
701 for (i = 0; i < num_vfs; i++) {
702 struct bnxt_vf_info *vf = &pf->vf[i];
703
704 vf->fw_fid = pf->first_vf_id + i;
705 rc = bnxt_set_vf_link_admin_state(bp, i);
706 if (rc)
707 break;
708
709 if (reset)
710 __bnxt_set_vf_params(bp, i);
711
712 req->vf_id = cpu_to_le16(vf->fw_fid);
713 rc = hwrm_req_send(bp, req);
714 if (rc)
715 break;
716 pf->active_vfs = i + 1;
717 }
718
719 if (pf->active_vfs) {
720 u16 n = pf->active_vfs;
721
722 hw_resc->max_tx_rings -= le16_to_cpu(req->min_tx_rings) * n;
723 hw_resc->max_rx_rings -= le16_to_cpu(req->min_rx_rings) * n;
724 hw_resc->max_hw_ring_grps -=
725 le16_to_cpu(req->min_hw_ring_grps) * n;
726 hw_resc->max_cp_rings -= le16_to_cpu(req->min_cmpl_rings) * n;
727 hw_resc->max_rsscos_ctxs -=
728 le16_to_cpu(req->min_rsscos_ctx) * n;
729 hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
730 hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
731 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
732 hw_resc->max_nqs -= vf_msix;
733
734 rc = pf->active_vfs;
735 }
736 hwrm_req_drop(bp, req);
737 return rc;
738 }
739
740 /* Only called by PF to reserve resources for VFs, returns actual number of
741 * VFs configured, or < 0 on error.
742 */
bnxt_hwrm_func_cfg(struct bnxt * bp,int num_vfs)743 static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
744 {
745 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
746 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
747 struct bnxt_pf_info *pf = &bp->pf;
748 struct hwrm_func_cfg_input *req;
749 int total_vf_tx_rings = 0;
750 u16 vf_ring_grps;
751 u32 mtu, i;
752 int rc;
753
754 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
755 if (rc)
756 return rc;
757
758 /* Remaining rings are distributed equally amongs VF's for now */
759 vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
760 vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs;
761 if (bp->flags & BNXT_FLAG_AGG_RINGS)
762 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
763 num_vfs;
764 else
765 vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
766 num_vfs;
767 vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
768 vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
769 vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
770 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
771
772 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU |
773 FUNC_CFG_REQ_ENABLES_MRU |
774 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
775 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
776 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
777 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
778 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
779 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
780 FUNC_CFG_REQ_ENABLES_NUM_VNICS |
781 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
782
783 if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) {
784 req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO;
785 req->enables |=
786 cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE);
787 }
788
789 mtu = bp->dev->mtu + VLAN_ETH_HLEN;
790 req->mru = cpu_to_le16(mtu);
791 req->admin_mtu = cpu_to_le16(mtu);
792
793 req->num_rsscos_ctxs = cpu_to_le16(1);
794 req->num_cmpl_rings = cpu_to_le16(vf_cp_rings);
795 req->num_tx_rings = cpu_to_le16(vf_tx_rings);
796 req->num_rx_rings = cpu_to_le16(vf_rx_rings);
797 req->num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
798 req->num_l2_ctxs = cpu_to_le16(4);
799
800 req->num_vnics = cpu_to_le16(vf_vnics);
801 /* FIXME spec currently uses 1 bit for stats ctx */
802 req->num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
803
804 hwrm_req_hold(bp, req);
805 for (i = 0; i < num_vfs; i++) {
806 int vf_tx_rsvd = vf_tx_rings;
807
808 req->fid = cpu_to_le16(pf->first_vf_id + i);
809 rc = hwrm_req_send(bp, req);
810 if (rc)
811 break;
812 pf->active_vfs = i + 1;
813 pf->vf[i].fw_fid = le16_to_cpu(req->fid);
814 rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
815 &vf_tx_rsvd);
816 if (rc)
817 break;
818 total_vf_tx_rings += vf_tx_rsvd;
819 }
820 hwrm_req_drop(bp, req);
821 if (pf->active_vfs) {
822 hw_resc->max_tx_rings -= total_vf_tx_rings;
823 hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
824 hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
825 hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
826 hw_resc->max_rsscos_ctxs -= num_vfs;
827 hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
828 hw_resc->max_vnics -= vf_vnics * num_vfs;
829 rc = pf->active_vfs;
830 }
831 return rc;
832 }
833
bnxt_func_cfg(struct bnxt * bp,int num_vfs,bool reset)834 static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset)
835 {
836 if (BNXT_NEW_RM(bp))
837 return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset);
838 else
839 return bnxt_hwrm_func_cfg(bp, num_vfs);
840 }
841
bnxt_cfg_hw_sriov(struct bnxt * bp,int * num_vfs,bool reset)842 int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
843 {
844 int rc;
845
846 /* Register buffers for VFs */
847 rc = bnxt_hwrm_func_buf_rgtr(bp);
848 if (rc)
849 return rc;
850
851 /* Reserve resources for VFs */
852 rc = bnxt_func_cfg(bp, *num_vfs, reset);
853 if (rc != *num_vfs) {
854 if (rc <= 0) {
855 netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
856 *num_vfs = 0;
857 return rc;
858 }
859 netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n",
860 rc);
861 *num_vfs = rc;
862 }
863
864 if (BNXT_RDMA_SRIOV_EN(bp) && BNXT_ROCE_VF_RESC_CAP(bp))
865 bnxt_hwrm_roce_sriov_cfg(bp, *num_vfs);
866
867 return 0;
868 }
869
bnxt_sriov_enable(struct bnxt * bp,int * num_vfs)870 static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
871 {
872 int rc = 0, vfs_supported;
873 int min_rx_rings, min_tx_rings, min_rss_ctxs;
874 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
875 int tx_ok = 0, rx_ok = 0, rss_ok = 0;
876 int avail_cp, avail_stat;
877
878 /* Check if we can enable requested num of vf's. At a minimum
879 * we require 1 RX 1 TX rings for each VF. In this minimum conf
880 * features like TPA will not be available.
881 */
882 vfs_supported = *num_vfs;
883
884 avail_cp = bnxt_get_avail_cp_rings_for_en(bp);
885 avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp);
886 avail_cp = min_t(int, avail_cp, avail_stat);
887
888 while (vfs_supported) {
889 min_rx_rings = vfs_supported;
890 min_tx_rings = vfs_supported;
891 min_rss_ctxs = vfs_supported;
892
893 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
894 if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
895 min_rx_rings)
896 rx_ok = 1;
897 } else {
898 if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
899 min_rx_rings)
900 rx_ok = 1;
901 }
902 if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
903 avail_cp < min_rx_rings)
904 rx_ok = 0;
905
906 if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
907 avail_cp >= min_tx_rings)
908 tx_ok = 1;
909
910 if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
911 min_rss_ctxs)
912 rss_ok = 1;
913
914 if (tx_ok && rx_ok && rss_ok)
915 break;
916
917 vfs_supported--;
918 }
919
920 if (!vfs_supported) {
921 netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
922 return -EINVAL;
923 }
924
925 if (vfs_supported != *num_vfs) {
926 netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
927 *num_vfs, vfs_supported);
928 *num_vfs = vfs_supported;
929 }
930
931 rc = bnxt_alloc_vf_resources(bp, *num_vfs);
932 if (rc)
933 goto err_out1;
934
935 rc = bnxt_cfg_hw_sriov(bp, num_vfs, false);
936 if (rc)
937 goto err_out2;
938
939 rc = pci_enable_sriov(bp->pdev, *num_vfs);
940 if (rc)
941 goto err_out2;
942
943 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
944 return 0;
945
946 /* Create representors for VFs in switchdev mode */
947 devl_lock(bp->dl);
948 rc = bnxt_vf_reps_create(bp);
949 devl_unlock(bp->dl);
950 if (rc) {
951 netdev_info(bp->dev, "Cannot enable VFS as representors cannot be created\n");
952 goto err_out3;
953 }
954
955 return 0;
956
957 err_out3:
958 /* Disable SR-IOV */
959 pci_disable_sriov(bp->pdev);
960
961 err_out2:
962 /* Free the resources reserved for various VF's */
963 bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
964
965 /* Restore the max resources */
966 bnxt_hwrm_func_qcaps(bp);
967
968 err_out1:
969 bnxt_free_vf_resources(bp);
970
971 return rc;
972 }
973
__bnxt_sriov_disable(struct bnxt * bp)974 void __bnxt_sriov_disable(struct bnxt *bp)
975 {
976 u16 num_vfs = pci_num_vf(bp->pdev);
977
978 if (!num_vfs)
979 return;
980
981 /* synchronize VF and VF-rep create and destroy */
982 devl_lock(bp->dl);
983 bnxt_vf_reps_destroy(bp);
984
985 if (pci_vfs_assigned(bp->pdev)) {
986 bnxt_hwrm_fwd_async_event_cmpl(
987 bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
988 netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
989 num_vfs);
990 } else {
991 pci_disable_sriov(bp->pdev);
992 /* Free the HW resources reserved for various VF's */
993 bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
994 }
995 devl_unlock(bp->dl);
996
997 bnxt_free_vf_resources(bp);
998 }
999
bnxt_sriov_disable(struct bnxt * bp)1000 static void bnxt_sriov_disable(struct bnxt *bp)
1001 {
1002 if (!pci_num_vf(bp->pdev))
1003 return;
1004
1005 __bnxt_sriov_disable(bp);
1006
1007 /* Reclaim all resources for the PF. */
1008 rtnl_lock();
1009 netdev_lock(bp->dev);
1010 bnxt_restore_pf_fw_resources(bp);
1011 netdev_unlock(bp->dev);
1012 rtnl_unlock();
1013 }
1014
bnxt_sriov_configure(struct pci_dev * pdev,int num_vfs)1015 int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
1016 {
1017 struct net_device *dev = pci_get_drvdata(pdev);
1018 struct bnxt *bp = netdev_priv(dev);
1019
1020 rtnl_lock();
1021 netdev_lock(dev);
1022 if (!netif_running(dev)) {
1023 netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
1024 netdev_unlock(dev);
1025 rtnl_unlock();
1026 return 0;
1027 }
1028 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
1029 netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
1030 netdev_unlock(dev);
1031 rtnl_unlock();
1032 return 0;
1033 }
1034 bp->sriov_cfg = true;
1035 netdev_unlock(dev);
1036 rtnl_unlock();
1037
1038 if (pci_vfs_assigned(bp->pdev)) {
1039 netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
1040 num_vfs = 0;
1041 goto sriov_cfg_exit;
1042 }
1043
1044 /* Check if enabled VFs is same as requested */
1045 if (num_vfs && num_vfs == bp->pf.active_vfs)
1046 goto sriov_cfg_exit;
1047
1048 /* if there are previous existing VFs, clean them up */
1049 bnxt_sriov_disable(bp);
1050 if (!num_vfs)
1051 goto sriov_cfg_exit;
1052
1053 bnxt_sriov_enable(bp, &num_vfs);
1054
1055 sriov_cfg_exit:
1056 bp->sriov_cfg = false;
1057 wake_up(&bp->sriov_cfg_wait);
1058
1059 return num_vfs;
1060 }
1061
bnxt_hwrm_fwd_resp(struct bnxt * bp,struct bnxt_vf_info * vf,void * encap_resp,__le64 encap_resp_addr,__le16 encap_resp_cpr,u32 msg_size)1062 static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
1063 void *encap_resp, __le64 encap_resp_addr,
1064 __le16 encap_resp_cpr, u32 msg_size)
1065 {
1066 struct hwrm_fwd_resp_input *req;
1067 int rc;
1068
1069 if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) {
1070 netdev_warn_once(bp->dev, "HWRM fwd response too big (%d bytes)\n",
1071 msg_size);
1072 return -EINVAL;
1073 }
1074
1075 rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
1076 if (!rc) {
1077 /* Set the new target id */
1078 req->target_id = cpu_to_le16(vf->fw_fid);
1079 req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
1080 req->encap_resp_len = cpu_to_le16(msg_size);
1081 req->encap_resp_addr = encap_resp_addr;
1082 req->encap_resp_cmpl_ring = encap_resp_cpr;
1083 memcpy(req->encap_resp, encap_resp, msg_size);
1084
1085 rc = hwrm_req_send(bp, req);
1086 }
1087 if (rc)
1088 netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
1089 return rc;
1090 }
1091
bnxt_hwrm_fwd_err_resp(struct bnxt * bp,struct bnxt_vf_info * vf,u32 msg_size)1092 static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
1093 u32 msg_size)
1094 {
1095 struct hwrm_reject_fwd_resp_input *req;
1096 int rc;
1097
1098 if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
1099 return -EINVAL;
1100
1101 rc = hwrm_req_init(bp, req, HWRM_REJECT_FWD_RESP);
1102 if (!rc) {
1103 /* Set the new target id */
1104 req->target_id = cpu_to_le16(vf->fw_fid);
1105 req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
1106 memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size);
1107
1108 rc = hwrm_req_send(bp, req);
1109 }
1110 if (rc)
1111 netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
1112 return rc;
1113 }
1114
bnxt_hwrm_exec_fwd_resp(struct bnxt * bp,struct bnxt_vf_info * vf,u32 msg_size)1115 static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
1116 u32 msg_size)
1117 {
1118 struct hwrm_exec_fwd_resp_input *req;
1119 int rc;
1120
1121 if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
1122 return -EINVAL;
1123
1124 rc = hwrm_req_init(bp, req, HWRM_EXEC_FWD_RESP);
1125 if (!rc) {
1126 /* Set the new target id */
1127 req->target_id = cpu_to_le16(vf->fw_fid);
1128 req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
1129 memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size);
1130
1131 rc = hwrm_req_send(bp, req);
1132 }
1133 if (rc)
1134 netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
1135 return rc;
1136 }
1137
bnxt_vf_configure_mac(struct bnxt * bp,struct bnxt_vf_info * vf)1138 static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
1139 {
1140 u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
1141 struct hwrm_func_vf_cfg_input *req =
1142 (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
1143
1144 /* Allow VF to set a valid MAC address, if trust is set to on or
1145 * if the PF assigned MAC address is zero
1146 */
1147 if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
1148 bool trust = bnxt_is_trusted_vf(bp, vf);
1149
1150 if (is_valid_ether_addr(req->dflt_mac_addr) &&
1151 (trust || !is_valid_ether_addr(vf->mac_addr) ||
1152 ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
1153 ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
1154 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1155 }
1156 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1157 }
1158 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1159 }
1160
bnxt_vf_validate_set_mac(struct bnxt * bp,struct bnxt_vf_info * vf)1161 static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
1162 {
1163 u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
1164 struct hwrm_cfa_l2_filter_alloc_input *req =
1165 (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
1166 bool mac_ok = false;
1167
1168 if (!is_valid_ether_addr((const u8 *)req->l2_addr))
1169 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1170
1171 /* Allow VF to set a valid MAC address, if trust is set to on.
1172 * Or VF MAC address must first match MAC address in PF's context.
1173 * Otherwise, it must match the VF MAC address if firmware spec >=
1174 * 1.2.2
1175 */
1176 if (bnxt_is_trusted_vf(bp, vf)) {
1177 mac_ok = true;
1178 } else if (is_valid_ether_addr(vf->mac_addr)) {
1179 if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
1180 mac_ok = true;
1181 } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
1182 if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
1183 mac_ok = true;
1184 } else {
1185 /* There are two cases:
1186 * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
1187 * to the PF and so it doesn't have to match
1188 * 2.Allow VF to modify its own MAC when PF has not assigned a
1189 * valid MAC address and firmware spec >= 0x10202
1190 */
1191 mac_ok = true;
1192 }
1193 if (mac_ok)
1194 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1195 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1196 }
1197
bnxt_vf_set_link(struct bnxt * bp,struct bnxt_vf_info * vf)1198 static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
1199 {
1200 int rc = 0;
1201
1202 if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
1203 /* real link */
1204 rc = bnxt_hwrm_exec_fwd_resp(
1205 bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
1206 } else {
1207 struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {};
1208 struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
1209
1210 phy_qcfg_req =
1211 (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
1212 mutex_lock(&bp->link_lock);
1213 memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
1214 sizeof(phy_qcfg_resp));
1215 mutex_unlock(&bp->link_lock);
1216 phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
1217 phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
1218 /* New SPEEDS2 fields are beyond the legacy structure, so
1219 * clear the SPEEDS2_SUPPORTED flag.
1220 */
1221 phy_qcfg_resp.option_flags &=
1222 ~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED;
1223 phy_qcfg_resp.valid = 1;
1224
1225 if (vf->flags & BNXT_VF_LINK_UP) {
1226 /* if physical link is down, force link up on VF */
1227 if (phy_qcfg_resp.link !=
1228 PORT_PHY_QCFG_RESP_LINK_LINK) {
1229 phy_qcfg_resp.link =
1230 PORT_PHY_QCFG_RESP_LINK_LINK;
1231 phy_qcfg_resp.link_speed = cpu_to_le16(
1232 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
1233 phy_qcfg_resp.duplex_cfg =
1234 PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
1235 phy_qcfg_resp.duplex_state =
1236 PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
1237 phy_qcfg_resp.pause =
1238 (PORT_PHY_QCFG_RESP_PAUSE_TX |
1239 PORT_PHY_QCFG_RESP_PAUSE_RX);
1240 }
1241 } else {
1242 /* force link down */
1243 phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
1244 phy_qcfg_resp.link_speed = 0;
1245 phy_qcfg_resp.duplex_state =
1246 PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
1247 phy_qcfg_resp.pause = 0;
1248 }
1249 rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
1250 phy_qcfg_req->resp_addr,
1251 phy_qcfg_req->cmpl_ring,
1252 sizeof(phy_qcfg_resp));
1253 }
1254 return rc;
1255 }
1256
bnxt_vf_req_validate_snd(struct bnxt * bp,struct bnxt_vf_info * vf)1257 static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
1258 {
1259 int rc = 0;
1260 struct input *encap_req = vf->hwrm_cmd_req_addr;
1261 u32 req_type = le16_to_cpu(encap_req->req_type);
1262
1263 switch (req_type) {
1264 case HWRM_FUNC_VF_CFG:
1265 rc = bnxt_vf_configure_mac(bp, vf);
1266 break;
1267 case HWRM_CFA_L2_FILTER_ALLOC:
1268 rc = bnxt_vf_validate_set_mac(bp, vf);
1269 break;
1270 case HWRM_FUNC_CFG:
1271 /* TODO Validate if VF is allowed to change mac address,
1272 * mtu, num of rings etc
1273 */
1274 rc = bnxt_hwrm_exec_fwd_resp(
1275 bp, vf, sizeof(struct hwrm_func_cfg_input));
1276 break;
1277 case HWRM_PORT_PHY_QCFG:
1278 rc = bnxt_vf_set_link(bp, vf);
1279 break;
1280 default:
1281 break;
1282 }
1283 return rc;
1284 }
1285
bnxt_hwrm_exec_fwd_req(struct bnxt * bp)1286 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1287 {
1288 u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
1289
1290 /* Scan through VF's and process commands */
1291 while (1) {
1292 vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
1293 if (vf_id >= active_vfs)
1294 break;
1295
1296 clear_bit(vf_id, bp->pf.vf_event_bmap);
1297 bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
1298 i = vf_id + 1;
1299 }
1300 }
1301
bnxt_approve_mac(struct bnxt * bp,const u8 * mac,bool strict)1302 int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
1303 {
1304 struct hwrm_func_vf_cfg_input *req;
1305 int rc = 0;
1306
1307 if (!BNXT_VF(bp))
1308 return 0;
1309
1310 if (bp->hwrm_spec_code < 0x10202) {
1311 if (is_valid_ether_addr(bp->vf.mac_addr))
1312 rc = -EADDRNOTAVAIL;
1313 goto mac_done;
1314 }
1315
1316 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
1317 if (rc)
1318 goto mac_done;
1319
1320 req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1321 memcpy(req->dflt_mac_addr, mac, ETH_ALEN);
1322 if (!strict)
1323 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT);
1324 rc = hwrm_req_send(bp, req);
1325 mac_done:
1326 if (rc && strict) {
1327 rc = -EADDRNOTAVAIL;
1328 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
1329 mac);
1330 return rc;
1331 }
1332 return 0;
1333 }
1334
bnxt_update_vf_mac(struct bnxt * bp)1335 void bnxt_update_vf_mac(struct bnxt *bp)
1336 {
1337 struct hwrm_func_qcaps_output *resp;
1338 struct hwrm_func_qcaps_input *req;
1339 bool inform_pf = false;
1340
1341 if (hwrm_req_init(bp, req, HWRM_FUNC_QCAPS))
1342 return;
1343
1344 req->fid = cpu_to_le16(0xffff);
1345
1346 resp = hwrm_req_hold(bp, req);
1347 if (hwrm_req_send(bp, req))
1348 goto update_vf_mac_exit;
1349
1350 /* Store MAC address from the firmware. There are 2 cases:
1351 * 1. MAC address is valid. It is assigned from the PF and we
1352 * need to override the current VF MAC address with it.
1353 * 2. MAC address is zero. The VF will use a random MAC address by
1354 * default but the stored zero MAC will allow the VF user to change
1355 * the random MAC address using ndo_set_mac_address() if he wants.
1356 */
1357 if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) {
1358 memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
1359 /* This means we are now using our own MAC address, let
1360 * the PF know about this MAC address.
1361 */
1362 if (!is_valid_ether_addr(bp->vf.mac_addr))
1363 inform_pf = true;
1364 }
1365
1366 /* overwrite netdev dev_addr with admin VF MAC */
1367 if (is_valid_ether_addr(bp->vf.mac_addr))
1368 eth_hw_addr_set(bp->dev, bp->vf.mac_addr);
1369 update_vf_mac_exit:
1370 hwrm_req_drop(bp, req);
1371 if (inform_pf)
1372 bnxt_approve_mac(bp, bp->dev->dev_addr, false);
1373 }
1374
1375 #else
1376
bnxt_cfg_hw_sriov(struct bnxt * bp,int * num_vfs,bool reset)1377 int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
1378 {
1379 if (*num_vfs)
1380 return -EOPNOTSUPP;
1381 return 0;
1382 }
1383
__bnxt_sriov_disable(struct bnxt * bp)1384 void __bnxt_sriov_disable(struct bnxt *bp)
1385 {
1386 }
1387
bnxt_hwrm_exec_fwd_req(struct bnxt * bp)1388 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1389 {
1390 netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
1391 }
1392
bnxt_update_vf_mac(struct bnxt * bp)1393 void bnxt_update_vf_mac(struct bnxt *bp)
1394 {
1395 }
1396
bnxt_approve_mac(struct bnxt * bp,const u8 * mac,bool strict)1397 int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
1398 {
1399 return 0;
1400 }
1401 #endif
1402